Pull drm fixes from Dave Airlie:
 "Weekly fixes, amdgpu and vmwgfx making up the most of it, along with
  panthor and i915/xe.

  Seems about right for this time of development, nothing major
  outstanding.

  client:
   - Fix description of module parameter

  panthor:
   - Flush writes before mapping buffers

  vmwgfx:
   - Improve command validation
   - Improve ref counting
   - Fix cursor-plane support

  amdgpu:
   - Disallow P2P DMA for GC 12 DCC surfaces
   - ctx error handling fix
   - UserQ fixes
   - VRR fix
   - ISP fix
   - JPEG 5.0.1 fix

  amdkfd:
   - Save area check fix
   - Fix GPU mappings for APU after prefetch

  i915:
   - Fix PSR's pipe to vblank conversion
   - Disable Panel Replay on MST links

  xe:
   - New HW workarounds affecting PTL and WCL platforms

* tag 'drm-fixes-2025-11-15' of https://gitlab.freedesktop.org/drm/kernel:
  drm/client: fix MODULE_PARM_DESC string for "active"
  drm/i915/dp_mst: Disable Panel Replay
  drm/amdkfd: Fix GPU mappings for APU after prefetch
  drm/amdkfd: relax checks for over allocation of save area
  drm/amdgpu/jpeg: Add parse_cs for JPEG5_0_1
  drm/amd/amdgpu: Ensure isp_kernel_buffer_alloc() creates a new BO
  drm/amd/display: Allow VRR params change if unsynced with the stream
  drm/amdgpu: fix lock warning in amdgpu_userq_fence_driver_process
  drm/amdgpu: jump to the correct label on failure
  drm/amdgpu: disable peer-to-peer access for DCC-enabled GC12 VRAM surfaces
  drm/xe/xe3lpg: Extend Wa_15016589081 for xe3lpg
  drm/xe/xe3: Extend wa_14023061436
  drm/xe/xe3: Add WA_14024681466 for Xe3_LPG
  drm/i915/psr: fix pipe to vblank conversion
  drm/panthor: Flush shmem writes before mapping buffers CPU-uncached
  drm/vmwgfx: Restore Guest-Backed only cursor plane support
  drm/vmwgfx: Use kref in vmw_bo_dirty
  drm/vmwgfx: Validate command header size against SVGA_CMD_MAX_DATASIZE
This commit is contained in:
Linus Torvalds
2025-11-14 13:39:15 -08:00
17 changed files with 102 additions and 20 deletions

View File

@@ -236,7 +236,7 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
r = amdgpu_xcp_select_scheds(adev, hw_ip, hw_prio, fpriv,
&num_scheds, &scheds);
if (r)
goto cleanup_entity;
goto error_free_entity;
}
/* disable load balance if the hw engine retains context among dependent jobs */

View File

@@ -82,6 +82,18 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
/*
* Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+.
* Such buffers cannot be safely accessed over P2P due to device-local
* compression metadata. Fallback to system-memory path instead.
* Device supports GFX12 (GC 12.x or newer)
* BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag
*
*/
if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) &&
bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
attach->peer2peer = false;
if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) &&
pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;

View File

@@ -280,6 +280,8 @@ int isp_kernel_buffer_alloc(struct device *dev, u64 size,
if (ret)
return ret;
/* Ensure *bo is NULL so a new BO will be created */
*bo = NULL;
ret = amdgpu_bo_create_kernel(adev,
size,
ISP_MC_ADDR_ALIGN,

View File

@@ -151,15 +151,16 @@ void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_d
{
struct amdgpu_userq_fence *userq_fence, *tmp;
struct dma_fence *fence;
unsigned long flags;
u64 rptr;
int i;
if (!fence_drv)
return;
spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
rptr = amdgpu_userq_fence_read(fence_drv);
spin_lock(&fence_drv->fence_list_lock);
list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
fence = &userq_fence->base;
@@ -174,7 +175,7 @@ void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_d
list_del(&userq_fence->link);
dma_fence_put(fence);
}
spin_unlock(&fence_drv->fence_list_lock);
spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
}
void amdgpu_userq_fence_driver_destroy(struct kref *ref)

View File

@@ -878,6 +878,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = {
.get_rptr = jpeg_v5_0_1_dec_ring_get_rptr,
.get_wptr = jpeg_v5_0_1_dec_ring_get_wptr,
.set_wptr = jpeg_v5_0_1_dec_ring_set_wptr,
.parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +

View File

@@ -297,16 +297,16 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
goto out_err_unreserve;
}
if (properties->ctx_save_restore_area_size != topo_dev->node_props.cwsr_size) {
pr_debug("queue cwsr size 0x%x not equal to node cwsr size 0x%x\n",
if (properties->ctx_save_restore_area_size < topo_dev->node_props.cwsr_size) {
pr_debug("queue cwsr size 0x%x not sufficient for node cwsr size 0x%x\n",
properties->ctx_save_restore_area_size,
topo_dev->node_props.cwsr_size);
err = -EINVAL;
goto out_err_unreserve;
}
total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
* NUM_XCC(pdd->dev->xcc_mask);
total_cwsr_size = (properties->ctx_save_restore_area_size +
topo_dev->node_props.debug_memory_size) * NUM_XCC(pdd->dev->xcc_mask);
total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
err = kfd_queue_buffer_get(vm, (void *)properties->ctx_save_restore_area_address,
@@ -352,8 +352,8 @@ int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_prope
topo_dev = kfd_topology_device_by_id(pdd->dev->id);
if (!topo_dev)
return -EINVAL;
total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
* NUM_XCC(pdd->dev->xcc_mask);
total_cwsr_size = (properties->ctx_save_restore_area_size +
topo_dev->node_props.debug_memory_size) * NUM_XCC(pdd->dev->xcc_mask);
total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
kfd_queue_buffer_svm_put(pdd, properties->ctx_save_restore_area_address, total_cwsr_size);

View File

@@ -3687,6 +3687,8 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
/* TODO: unmap ranges from GPU that lost access */
}
update_mapping |= !p->xnack_enabled && !list_empty(&remap_list);
list_for_each_entry_safe(prange, next, &remove_list, update_list) {
pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange, prange->start,

View File

@@ -1260,6 +1260,17 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
update_v_total_for_static_ramp(
core_freesync, stream, in_out_vrr);
}
/*
* If VRR is inactive, set vtotal min and max to nominal vtotal
*/
if (in_out_vrr->state == VRR_STATE_INACTIVE) {
in_out_vrr->adjust.v_total_min =
mod_freesync_calc_v_total_from_refresh(stream,
in_out_vrr->max_refresh_in_uhz);
in_out_vrr->adjust.v_total_max = in_out_vrr->adjust.v_total_min;
return;
}
}
unsigned long long mod_freesync_calc_nominal_field_rate(

View File

@@ -13,8 +13,8 @@
static char drm_client_default[16] = CONFIG_DRM_CLIENT_DEFAULT;
module_param_string(active, drm_client_default, sizeof(drm_client_default), 0444);
MODULE_PARM_DESC(active,
"Choose which drm client to start, default is"
CONFIG_DRM_CLIENT_DEFAULT "]");
"Choose which drm client to start, default is "
CONFIG_DRM_CLIENT_DEFAULT);
/**
* drm_client_setup() - Setup in-kernel DRM clients

View File

@@ -585,6 +585,10 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
struct intel_display *display = to_intel_display(intel_dp);
int ret;
/* TODO: Enable Panel Replay on MST once it's properly implemented. */
if (intel_dp->mst_detect == DRM_DP_MST)
return;
ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
&intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
if (ret < 0)
@@ -888,7 +892,8 @@ static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
u32 current_dc_state = intel_display_power_get_current_dc_state(display);
struct drm_vblank_crtc *vblank = &display->drm->vblank[intel_dp->psr.pipe];
struct intel_crtc *crtc = intel_crtc_for_pipe(display, intel_dp->psr.pipe);
struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base);
return (current_dc_state != DC_STATE_EN_UPTO_DC5 &&
current_dc_state != DC_STATE_EN_UPTO_DC6) ||

View File

@@ -288,6 +288,23 @@ panthor_gem_create_with_handle(struct drm_file *file,
panthor_gem_debugfs_set_usage_flags(bo, 0);
/* If this is a write-combine mapping, we query the sgt to force a CPU
* cache flush (dma_map_sgtable() is called when the sgt is created).
* This ensures the zero-ing is visible to any uncached mapping created
* by vmap/mmap.
* FIXME: Ideally this should be done when pages are allocated, not at
* BO creation time.
*/
if (shmem->map_wc) {
struct sg_table *sgt;
sgt = drm_gem_shmem_get_pages_sgt(shmem);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto out_put_gem;
}
}
/*
* Allocate an id of idr table where the obj is registered
* and handle has the id what user can see.
@@ -296,6 +313,7 @@ panthor_gem_create_with_handle(struct drm_file *file,
if (!ret)
*size = bo->base.base.size;
out_put_gem:
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put(&shmem->base);

View File

@@ -100,8 +100,10 @@ vmw_cursor_update_type(struct vmw_private *vmw, struct vmw_plane_state *vps)
if (vmw->has_mob) {
if ((vmw->capabilities2 & SVGA_CAP2_CURSOR_MOB) != 0)
return VMW_CURSOR_UPDATE_MOB;
else
return VMW_CURSOR_UPDATE_GB_ONLY;
}
drm_warn_once(&vmw->drm, "Unknown Cursor Type!\n");
return VMW_CURSOR_UPDATE_NONE;
}
@@ -139,6 +141,7 @@ static u32 vmw_cursor_mob_size(enum vmw_cursor_update_type update_type,
{
switch (update_type) {
case VMW_CURSOR_UPDATE_LEGACY:
case VMW_CURSOR_UPDATE_GB_ONLY:
case VMW_CURSOR_UPDATE_NONE:
return 0;
case VMW_CURSOR_UPDATE_MOB:
@@ -623,6 +626,7 @@ int vmw_cursor_plane_prepare_fb(struct drm_plane *plane,
if (!surface || vps->cursor.legacy.id == surface->snooper.id)
vps->cursor.update_type = VMW_CURSOR_UPDATE_NONE;
break;
case VMW_CURSOR_UPDATE_GB_ONLY:
case VMW_CURSOR_UPDATE_MOB: {
bo = vmw_user_object_buffer(&vps->uo);
if (bo) {
@@ -737,6 +741,7 @@ void
vmw_cursor_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct vmw_bo *bo;
struct drm_plane_state *new_state =
drm_atomic_get_new_plane_state(state, plane);
struct drm_plane_state *old_state =
@@ -762,6 +767,15 @@ vmw_cursor_plane_atomic_update(struct drm_plane *plane,
case VMW_CURSOR_UPDATE_MOB:
vmw_cursor_update_mob(dev_priv, vps);
break;
case VMW_CURSOR_UPDATE_GB_ONLY:
bo = vmw_user_object_buffer(&vps->uo);
if (bo)
vmw_send_define_cursor_cmd(dev_priv, bo->map.virtual,
vps->base.crtc_w,
vps->base.crtc_h,
vps->base.hotspot_x,
vps->base.hotspot_y);
break;
case VMW_CURSOR_UPDATE_NONE:
/* do nothing */
break;

View File

@@ -33,6 +33,7 @@ static const u32 __maybe_unused vmw_cursor_plane_formats[] = {
enum vmw_cursor_update_type {
VMW_CURSOR_UPDATE_NONE = 0,
VMW_CURSOR_UPDATE_LEGACY,
VMW_CURSOR_UPDATE_GB_ONLY,
VMW_CURSOR_UPDATE_MOB,
};

View File

@@ -3668,6 +3668,11 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
cmd_id = header->id;
if (header->size > SVGA_CMD_MAX_DATASIZE) {
VMW_DEBUG_USER("SVGA3D command: %d is too big.\n",
cmd_id + SVGA_3D_CMD_BASE);
return -E2BIG;
}
*size = header->size + sizeof(SVGA3dCmdHeader);
cmd_id -= SVGA_3D_CMD_BASE;

View File

@@ -32,22 +32,22 @@ enum vmw_bo_dirty_method {
/**
* struct vmw_bo_dirty - Dirty information for buffer objects
* @ref_count: Reference count for this structure. Must be first member!
* @start: First currently dirty bit
* @end: Last currently dirty bit + 1
* @method: The currently used dirty method
* @change_count: Number of consecutive method change triggers
* @ref_count: Reference count for this structure
* @bitmap_size: The size of the bitmap in bits. Typically equal to the
* nuber of pages in the bo.
* @bitmap: A bitmap where each bit represents a page. A set bit means a
* dirty page.
*/
struct vmw_bo_dirty {
struct kref ref_count;
unsigned long start;
unsigned long end;
enum vmw_bo_dirty_method method;
unsigned int change_count;
unsigned int ref_count;
unsigned long bitmap_size;
unsigned long bitmap[];
};
@@ -221,7 +221,7 @@ int vmw_bo_dirty_add(struct vmw_bo *vbo)
int ret;
if (dirty) {
dirty->ref_count++;
kref_get(&dirty->ref_count);
return 0;
}
@@ -235,7 +235,7 @@ int vmw_bo_dirty_add(struct vmw_bo *vbo)
dirty->bitmap_size = num_pages;
dirty->start = dirty->bitmap_size;
dirty->end = 0;
dirty->ref_count = 1;
kref_init(&dirty->ref_count);
if (num_pages < PAGE_SIZE / sizeof(pte_t)) {
dirty->method = VMW_BO_DIRTY_PAGETABLE;
} else {
@@ -274,10 +274,8 @@ void vmw_bo_dirty_release(struct vmw_bo *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
if (dirty && --dirty->ref_count == 0) {
kvfree(dirty);
if (dirty && kref_put(&dirty->ref_count, (void *)kvfree))
vbo->dirty = NULL;
}
}
/**

View File

@@ -168,6 +168,7 @@
#define XEHP_SLICE_COMMON_ECO_CHICKEN1 XE_REG_MCR(0x731c, XE_REG_OPTION_MASKED)
#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14)
#define FAST_CLEAR_VALIGN_FIX REG_BIT(13)
#define XE2LPM_CCCHKNREG1 XE_REG(0x82a8)

View File

@@ -679,6 +679,8 @@ static const struct xe_rtp_entry_sr engine_was[] = {
},
{ XE_RTP_NAME("14023061436"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001),
FUNC(xe_rtp_match_first_render_or_compute), OR,
GRAPHICS_VERSION_RANGE(3003, 3005),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(TDL_CHICKEN, QID_WAIT_FOR_THREAD_NOT_RUN_DISABLE))
},
@@ -916,6 +918,15 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3003), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE))
},
{ XE_RTP_NAME("14024681466"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3005), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(XEHP_SLICE_COMMON_ECO_CHICKEN1, FAST_CLEAR_VALIGN_FIX))
},
{ XE_RTP_NAME("15016589081"),
XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0),
ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
},
};
static __maybe_unused const struct xe_rtp_entry oob_was[] = {