mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-15 13:02:46 -04:00
drm/xe: Replace XE_WARN_ON with drm_warn when just printing a string
Use the generic drm_warn instead of the driver-specific XE_WARN_ON in cases where XE_WARN_ON is used to unconditionally print a debug message. v2: Rebase Signed-off-by: Francois Dugast <francois.dugast@intel.com> Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
committed by
Rodrigo Vivi
parent
30278e2996
commit
5c0553cdc8
@@ -151,6 +151,7 @@ static const struct drm_info_list debugfs_list[] = {
|
||||
|
||||
void xe_gt_debugfs_register(struct xe_gt *gt)
|
||||
{
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
struct drm_minor *minor = gt_to_xe(gt)->drm.primary;
|
||||
struct dentry *root;
|
||||
struct drm_info_list *local;
|
||||
@@ -162,7 +163,7 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
|
||||
sprintf(name, "gt%d", gt->info.id);
|
||||
root = debugfs_create_dir(name, minor->debugfs_root);
|
||||
if (IS_ERR(root)) {
|
||||
XE_WARN_ON("Create GT directory failed");
|
||||
drm_warn(&xe->drm, "Create GT directory failed");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -172,7 +173,7 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
|
||||
* passed in (e.g. can't define this on the stack).
|
||||
*/
|
||||
#define DEBUGFS_SIZE (ARRAY_SIZE(debugfs_list) * sizeof(struct drm_info_list))
|
||||
local = drmm_kmalloc(>_to_xe(gt)->drm, DEBUGFS_SIZE, GFP_KERNEL);
|
||||
local = drmm_kmalloc(&xe->drm, DEBUGFS_SIZE, GFP_KERNEL);
|
||||
if (!local)
|
||||
return;
|
||||
|
||||
|
||||
@@ -322,6 +322,7 @@ static bool pf_queue_full(struct pf_queue *pf_queue)
|
||||
int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
|
||||
{
|
||||
struct xe_gt *gt = guc_to_gt(guc);
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
struct pf_queue *pf_queue;
|
||||
unsigned long flags;
|
||||
u32 asid;
|
||||
@@ -340,7 +341,7 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
|
||||
pf_queue->tail = (pf_queue->tail + len) % PF_QUEUE_NUM_DW;
|
||||
queue_work(gt->usm.pf_wq, &pf_queue->worker);
|
||||
} else {
|
||||
XE_WARN_ON("PF Queue full, shouldn't be possible");
|
||||
drm_warn(&xe->drm, "PF Queue full, shouldn't be possible");
|
||||
}
|
||||
spin_unlock_irqrestore(&pf_queue->lock, flags);
|
||||
|
||||
|
||||
@@ -1022,7 +1022,7 @@ static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
|
||||
adj_len);
|
||||
break;
|
||||
default:
|
||||
XE_WARN_ON("NOT_POSSIBLE");
|
||||
drm_warn(&xe->drm, "NOT_POSSIBLE");
|
||||
}
|
||||
|
||||
if (ret)
|
||||
|
||||
@@ -703,6 +703,7 @@ static void disable_scheduling_deregister(struct xe_guc *guc,
|
||||
struct xe_exec_queue *q)
|
||||
{
|
||||
MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
|
||||
struct xe_device *xe = guc_to_xe(guc);
|
||||
int ret;
|
||||
|
||||
set_min_preemption_timeout(guc, q);
|
||||
@@ -712,7 +713,7 @@ static void disable_scheduling_deregister(struct xe_guc *guc,
|
||||
if (!ret) {
|
||||
struct xe_gpu_scheduler *sched = &q->guc->sched;
|
||||
|
||||
XE_WARN_ON("Pending enable failed to respond");
|
||||
drm_warn(&xe->drm, "Pending enable failed to respond");
|
||||
xe_sched_submission_start(sched);
|
||||
xe_gt_reset_async(q->gt);
|
||||
xe_sched_tdr_queue_imm(sched);
|
||||
@@ -794,6 +795,8 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
|
||||
struct xe_guc_exec_queue *ge =
|
||||
container_of(w, struct xe_guc_exec_queue, lr_tdr);
|
||||
struct xe_exec_queue *q = ge->q;
|
||||
struct xe_guc *guc = exec_queue_to_guc(q);
|
||||
struct xe_device *xe = guc_to_xe(guc);
|
||||
struct xe_gpu_scheduler *sched = &ge->sched;
|
||||
|
||||
XE_WARN_ON(!xe_exec_queue_is_lr(q));
|
||||
@@ -828,7 +831,7 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
|
||||
!exec_queue_pending_disable(q) ||
|
||||
guc_read_stopped(guc), HZ * 5);
|
||||
if (!ret) {
|
||||
XE_WARN_ON("Schedule disable failed to respond");
|
||||
drm_warn(&xe->drm, "Schedule disable failed to respond");
|
||||
xe_sched_submission_start(sched);
|
||||
xe_gt_reset_async(q->gt);
|
||||
return;
|
||||
@@ -906,7 +909,7 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
|
||||
!exec_queue_pending_disable(q) ||
|
||||
guc_read_stopped(guc), HZ * 5);
|
||||
if (!ret || guc_read_stopped(guc)) {
|
||||
XE_WARN_ON("Schedule disable failed to respond");
|
||||
drm_warn(&xe->drm, "Schedule disable failed to respond");
|
||||
xe_sched_add_pending_job(sched, job);
|
||||
xe_sched_submission_start(sched);
|
||||
xe_gt_reset_async(q->gt);
|
||||
|
||||
@@ -1401,7 +1401,7 @@ static void vm_error_capture(struct xe_vm *vm, int err,
|
||||
}
|
||||
|
||||
if (copy_to_user(address, &capture, sizeof(capture)))
|
||||
XE_WARN_ON("Copy to user failed");
|
||||
drm_warn(&vm->xe->drm, "Copy to user failed");
|
||||
|
||||
if (in_kthread) {
|
||||
kthread_unuse_mm(vm->async_ops.error_capture.mm);
|
||||
@@ -2176,7 +2176,7 @@ static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
|
||||
return -ENODATA;
|
||||
break;
|
||||
default:
|
||||
XE_WARN_ON("NOT POSSIBLE");
|
||||
drm_warn(&xe->drm, "NOT POSSIBLE");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -2234,7 +2234,7 @@ static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
|
||||
(ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
|
||||
break;
|
||||
default:
|
||||
XE_WARN_ON("NOT POSSIBLE");
|
||||
drm_warn(&xe->drm, "NOT POSSIBLE");
|
||||
}
|
||||
}
|
||||
#else
|
||||
@@ -2332,7 +2332,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
|
||||
}
|
||||
break;
|
||||
default:
|
||||
XE_WARN_ON("NOT POSSIBLE");
|
||||
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
|
||||
ops = ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
@@ -2463,7 +2463,7 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
|
||||
op->flags |= XE_VMA_OP_COMMITTED;
|
||||
break;
|
||||
default:
|
||||
XE_WARN_ON("NOT POSSIBLE");
|
||||
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
|
||||
}
|
||||
|
||||
return err;
|
||||
@@ -2619,7 +2619,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
|
||||
/* Nothing to do */
|
||||
break;
|
||||
default:
|
||||
XE_WARN_ON("NOT POSSIBLE");
|
||||
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
|
||||
}
|
||||
|
||||
last_op = op;
|
||||
@@ -2734,7 +2734,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
|
||||
op->flags & XE_VMA_OP_LAST);
|
||||
break;
|
||||
default:
|
||||
XE_WARN_ON("NOT POSSIBLE");
|
||||
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
|
||||
}
|
||||
|
||||
if (err)
|
||||
@@ -2812,7 +2812,7 @@ static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
|
||||
op);
|
||||
break;
|
||||
default:
|
||||
XE_WARN_ON("NOT POSSIBLE");
|
||||
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -2893,7 +2893,7 @@ static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
|
||||
/* Nothing to do */
|
||||
break;
|
||||
default:
|
||||
XE_WARN_ON("NOT POSSIBLE");
|
||||
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -186,7 +186,7 @@ static int madvise_priority(struct xe_device *xe, struct xe_vm *vm,
|
||||
static int madvise_pin(struct xe_device *xe, struct xe_vm *vm,
|
||||
struct xe_vma **vmas, int num_vmas, u64 value)
|
||||
{
|
||||
XE_WARN_ON("NIY");
|
||||
drm_warn(&xe->drm, "NIY");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user