mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-13 16:29:25 -04:00
Merge tag 'drm-xe-fixes-2024-03-26' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-fixes
- Fix build on mips - Fix wrong bound checks - Fix use of msec rather than jiffies - Remove dead code Signed-off-by: Dave Airlie <airlied@redhat.com> From: Lucas De Marchi <lucas.demarchi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/a47jbz45nry4gjmtyresaraakwnasgngncltmrshbfkx25mhzu@bvay7j3ed7ir
This commit is contained in:
@@ -144,9 +144,6 @@ static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
|
||||
.mem_type = XE_PL_TT,
|
||||
};
|
||||
*c += 1;
|
||||
|
||||
if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
|
||||
bo->props.preferred_mem_type = XE_PL_TT;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -181,25 +178,15 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
|
||||
}
|
||||
places[*c] = place;
|
||||
*c += 1;
|
||||
|
||||
if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
|
||||
bo->props.preferred_mem_type = mem_type;
|
||||
}
|
||||
|
||||
static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
|
||||
u32 bo_flags, u32 *c)
|
||||
{
|
||||
if (bo->props.preferred_gt == XE_GT1) {
|
||||
if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
|
||||
add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
|
||||
if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
|
||||
add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
|
||||
} else {
|
||||
if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
|
||||
add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
|
||||
if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
|
||||
add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
|
||||
}
|
||||
if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
|
||||
add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
|
||||
if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
|
||||
add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
|
||||
}
|
||||
|
||||
static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
|
||||
@@ -223,17 +210,8 @@ static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
|
||||
{
|
||||
u32 c = 0;
|
||||
|
||||
bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
|
||||
|
||||
/* The order of placements should indicate preferred location */
|
||||
|
||||
if (bo->props.preferred_mem_class == DRM_XE_MEM_REGION_CLASS_SYSMEM) {
|
||||
try_add_system(xe, bo, bo_flags, &c);
|
||||
try_add_vram(xe, bo, bo_flags, &c);
|
||||
} else {
|
||||
try_add_vram(xe, bo, bo_flags, &c);
|
||||
try_add_system(xe, bo, bo_flags, &c);
|
||||
}
|
||||
try_add_vram(xe, bo, bo_flags, &c);
|
||||
try_add_system(xe, bo, bo_flags, &c);
|
||||
try_add_stolen(xe, bo, bo_flags, &c);
|
||||
|
||||
if (!c)
|
||||
@@ -1126,13 +1104,6 @@ static void xe_gem_object_close(struct drm_gem_object *obj,
|
||||
}
|
||||
}
|
||||
|
||||
static bool should_migrate_to_system(struct xe_bo *bo)
|
||||
{
|
||||
struct xe_device *xe = xe_bo_device(bo);
|
||||
|
||||
return xe_device_in_fault_mode(xe) && bo->props.cpu_atomic;
|
||||
}
|
||||
|
||||
static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
|
||||
@@ -1141,7 +1112,7 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
|
||||
struct xe_bo *bo = ttm_to_xe_bo(tbo);
|
||||
bool needs_rpm = bo->flags & XE_BO_CREATE_VRAM_MASK;
|
||||
vm_fault_t ret;
|
||||
int idx, r = 0;
|
||||
int idx;
|
||||
|
||||
if (needs_rpm)
|
||||
xe_device_mem_access_get(xe);
|
||||
@@ -1153,17 +1124,8 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
|
||||
if (drm_dev_enter(ddev, &idx)) {
|
||||
trace_xe_bo_cpu_fault(bo);
|
||||
|
||||
if (should_migrate_to_system(bo)) {
|
||||
r = xe_bo_migrate(bo, XE_PL_TT);
|
||||
if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)
|
||||
ret = VM_FAULT_NOPAGE;
|
||||
else if (r)
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
}
|
||||
if (!ret)
|
||||
ret = ttm_bo_vm_fault_reserved(vmf,
|
||||
vmf->vma->vm_page_prot,
|
||||
TTM_BO_VM_NUM_PREFAULT);
|
||||
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
|
||||
TTM_BO_VM_NUM_PREFAULT);
|
||||
drm_dev_exit(idx);
|
||||
} else {
|
||||
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
|
||||
@@ -1291,9 +1253,6 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
|
||||
bo->flags = flags;
|
||||
bo->cpu_caching = cpu_caching;
|
||||
bo->ttm.base.funcs = &xe_gem_object_funcs;
|
||||
bo->props.preferred_mem_class = XE_BO_PROPS_INVALID;
|
||||
bo->props.preferred_gt = XE_BO_PROPS_INVALID;
|
||||
bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
|
||||
bo->ttm.priority = XE_BO_PRIORITY_NORMAL;
|
||||
INIT_LIST_HEAD(&bo->pinned_link);
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
||||
@@ -56,25 +56,6 @@ struct xe_bo {
|
||||
*/
|
||||
struct list_head client_link;
|
||||
#endif
|
||||
/** @props: BO user controlled properties */
|
||||
struct {
|
||||
/** @preferred_mem: preferred memory class for this BO */
|
||||
s16 preferred_mem_class;
|
||||
/** @prefered_gt: preferred GT for this BO */
|
||||
s16 preferred_gt;
|
||||
/** @preferred_mem_type: preferred memory type */
|
||||
s32 preferred_mem_type;
|
||||
/**
|
||||
* @cpu_atomic: the CPU expects to do atomics operations to
|
||||
* this BO
|
||||
*/
|
||||
bool cpu_atomic;
|
||||
/**
|
||||
* @device_atomic: the device expects to do atomics operations
|
||||
* to this BO
|
||||
*/
|
||||
bool device_atomic;
|
||||
} props;
|
||||
/** @freed: List node for delayed put. */
|
||||
struct llist_node freed;
|
||||
/** @created: Whether the bo has passed initial creation */
|
||||
|
||||
@@ -58,7 +58,7 @@ static inline struct xe_tile *xe_device_get_root_tile(struct xe_device *xe)
|
||||
|
||||
static inline struct xe_gt *xe_tile_get_gt(struct xe_tile *tile, u8 gt_id)
|
||||
{
|
||||
if (drm_WARN_ON(&tile_to_xe(tile)->drm, gt_id > XE_MAX_GT_PER_TILE))
|
||||
if (drm_WARN_ON(&tile_to_xe(tile)->drm, gt_id >= XE_MAX_GT_PER_TILE))
|
||||
gt_id = 0;
|
||||
|
||||
return gt_id ? tile->media_gt : tile->primary_gt;
|
||||
@@ -79,7 +79,7 @@ static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id)
|
||||
if (MEDIA_VER(xe) >= 13) {
|
||||
gt = xe_tile_get_gt(root_tile, gt_id);
|
||||
} else {
|
||||
if (drm_WARN_ON(&xe->drm, gt_id > XE_MAX_TILES_PER_DEVICE))
|
||||
if (drm_WARN_ON(&xe->drm, gt_id >= XE_MAX_TILES_PER_DEVICE))
|
||||
gt_id = 0;
|
||||
|
||||
gt = xe->tiles[gt_id].primary_gt;
|
||||
|
||||
@@ -448,7 +448,7 @@ find_hw_engine(struct xe_device *xe,
|
||||
{
|
||||
u32 idx;
|
||||
|
||||
if (eci.engine_class > ARRAY_SIZE(user_to_xe_engine_class))
|
||||
if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
|
||||
return NULL;
|
||||
|
||||
if (eci.gt_id >= xe->info.gt_count)
|
||||
|
||||
@@ -1220,7 +1220,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
|
||||
init_waitqueue_head(&ge->suspend_wait);
|
||||
|
||||
timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
|
||||
q->sched_props.job_timeout_ms;
|
||||
msecs_to_jiffies(q->sched_props.job_timeout_ms);
|
||||
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
|
||||
get_submit_wq(guc),
|
||||
q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES, 64,
|
||||
|
||||
@@ -97,7 +97,6 @@ static void set_offsets(u32 *regs,
|
||||
#define REG16(x) \
|
||||
(((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
|
||||
(((x) >> 2) & 0x7f)
|
||||
#define END 0
|
||||
{
|
||||
const u32 base = hwe->mmio_base;
|
||||
|
||||
@@ -168,7 +167,7 @@ static const u8 gen12_xcs_offsets[] = {
|
||||
REG16(0x274),
|
||||
REG16(0x270),
|
||||
|
||||
END
|
||||
0
|
||||
};
|
||||
|
||||
static const u8 dg2_xcs_offsets[] = {
|
||||
@@ -202,7 +201,7 @@ static const u8 dg2_xcs_offsets[] = {
|
||||
REG16(0x274),
|
||||
REG16(0x270),
|
||||
|
||||
END
|
||||
0
|
||||
};
|
||||
|
||||
static const u8 gen12_rcs_offsets[] = {
|
||||
@@ -298,7 +297,7 @@ static const u8 gen12_rcs_offsets[] = {
|
||||
REG(0x084),
|
||||
NOP(1),
|
||||
|
||||
END
|
||||
0
|
||||
};
|
||||
|
||||
static const u8 xehp_rcs_offsets[] = {
|
||||
@@ -339,7 +338,7 @@ static const u8 xehp_rcs_offsets[] = {
|
||||
LRI(1, 0),
|
||||
REG(0x0c8),
|
||||
|
||||
END
|
||||
0
|
||||
};
|
||||
|
||||
static const u8 dg2_rcs_offsets[] = {
|
||||
@@ -382,7 +381,7 @@ static const u8 dg2_rcs_offsets[] = {
|
||||
LRI(1, 0),
|
||||
REG(0x0c8),
|
||||
|
||||
END
|
||||
0
|
||||
};
|
||||
|
||||
static const u8 mtl_rcs_offsets[] = {
|
||||
@@ -425,7 +424,7 @@ static const u8 mtl_rcs_offsets[] = {
|
||||
LRI(1, 0),
|
||||
REG(0x0c8),
|
||||
|
||||
END
|
||||
0
|
||||
};
|
||||
|
||||
#define XE2_CTX_COMMON \
|
||||
@@ -471,7 +470,7 @@ static const u8 xe2_rcs_offsets[] = {
|
||||
LRI(1, 0), /* [0x47] */
|
||||
REG(0x0c8), /* [0x48] R_PWR_CLK_STATE */
|
||||
|
||||
END
|
||||
0
|
||||
};
|
||||
|
||||
static const u8 xe2_bcs_offsets[] = {
|
||||
@@ -482,16 +481,15 @@ static const u8 xe2_bcs_offsets[] = {
|
||||
REG16(0x200), /* [0x42] BCS_SWCTRL */
|
||||
REG16(0x204), /* [0x44] BLIT_CCTL */
|
||||
|
||||
END
|
||||
0
|
||||
};
|
||||
|
||||
static const u8 xe2_xcs_offsets[] = {
|
||||
XE2_CTX_COMMON,
|
||||
|
||||
END
|
||||
0
|
||||
};
|
||||
|
||||
#undef END
|
||||
#undef REG16
|
||||
#undef REG
|
||||
#undef LRI
|
||||
|
||||
@@ -132,7 +132,7 @@ query_engine_cycles(struct xe_device *xe,
|
||||
return -EINVAL;
|
||||
|
||||
eci = &resp.eci;
|
||||
if (eci->gt_id > XE_MAX_GT_PER_TILE)
|
||||
if (eci->gt_id >= XE_MAX_GT_PER_TILE)
|
||||
return -EINVAL;
|
||||
|
||||
gt = xe_device_get_gt(xe, eci->gt_id);
|
||||
|
||||
Reference in New Issue
Block a user