mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 10:01:39 -05:00
Merge tag 'drm-xe-fixes-2025-12-19' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-fixes
UAPI Changes: - Limit num_syncs to prevent oversized kernel allocations (Shuicheng) - Disallow 0 OA property values (Ashutosh) - Disallow 0 EU stall property values (Ashutosh) Driver Changes: - Fix kobject leak (Shuicheng) - Workaround (Vinay) - Loop variable reference fix (Matt Brost) - Fix a CONFIG corner-case incorrect number of arguments (Arnd Bergmann) - Skip reason prefix while emitting array (Raag) - VF migration fix (Tomasz) - Fix context in mei interrupt top half (Junxiao) - Don't include the CCS metadata in the dma-buf sg-table (Thomas) - VF queueing recovery work fix (Satyanarayana) - Increase TDF timeout (Jagmeet) - GT reset registers vs scheduler ordering fix (Jan) - Adjust long-running workload timeslices (Matt Brost) - Always set OA_OAGLBCTXCTRL_COUNTER_RESUME (Ashutosh) - Fix a return value (Dan Carpenter) - Drop preempt-fences when destroying imported dma-bufs (Thomas) - Use usleep_range for accurate long-running workload timeslicing (Matthew) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Hellstrom <thomas.hellstrom@linux.intel.com> Link: https://patch.msgid.link/aUSMlQ4iruzm0NQR@fedora
This commit is contained in:
@@ -1527,7 +1527,7 @@ static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
|
||||
* always succeed here, as long as we hold the lru lock.
|
||||
*/
|
||||
spin_lock(&ttm_bo->bdev->lru_lock);
|
||||
locked = dma_resv_trylock(ttm_bo->base.resv);
|
||||
locked = dma_resv_trylock(&ttm_bo->base._resv);
|
||||
spin_unlock(&ttm_bo->bdev->lru_lock);
|
||||
xe_assert(xe, locked);
|
||||
|
||||
@@ -1547,13 +1547,6 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
|
||||
bo = ttm_to_xe_bo(ttm_bo);
|
||||
xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount)));
|
||||
|
||||
/*
|
||||
* Corner case where TTM fails to allocate memory and this BOs resv
|
||||
* still points the VMs resv
|
||||
*/
|
||||
if (ttm_bo->base.resv != &ttm_bo->base._resv)
|
||||
return;
|
||||
|
||||
if (!xe_ttm_bo_lock_in_destructor(ttm_bo))
|
||||
return;
|
||||
|
||||
@@ -1563,14 +1556,14 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
|
||||
* TODO: Don't do this for external bos once we scrub them after
|
||||
* unbind.
|
||||
*/
|
||||
dma_resv_for_each_fence(&cursor, ttm_bo->base.resv,
|
||||
dma_resv_for_each_fence(&cursor, &ttm_bo->base._resv,
|
||||
DMA_RESV_USAGE_BOOKKEEP, fence) {
|
||||
if (xe_fence_is_xe_preempt(fence) &&
|
||||
!dma_fence_is_signaled(fence)) {
|
||||
if (!replacement)
|
||||
replacement = dma_fence_get_stub();
|
||||
|
||||
dma_resv_replace_fences(ttm_bo->base.resv,
|
||||
dma_resv_replace_fences(&ttm_bo->base._resv,
|
||||
fence->context,
|
||||
replacement,
|
||||
DMA_RESV_USAGE_BOOKKEEP);
|
||||
@@ -1578,7 +1571,7 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
|
||||
}
|
||||
dma_fence_put(replacement);
|
||||
|
||||
dma_resv_unlock(ttm_bo->base.resv);
|
||||
dma_resv_unlock(&ttm_bo->base._resv);
|
||||
}
|
||||
|
||||
static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
|
||||
|
||||
@@ -1056,7 +1056,7 @@ static void tdf_request_sync(struct xe_device *xe)
|
||||
* transient and need to be flushed..
|
||||
*/
|
||||
if (xe_mmio_wait32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
|
||||
150, NULL, false))
|
||||
300, NULL, false))
|
||||
xe_gt_err_once(gt, "TD flush timeout\n");
|
||||
|
||||
xe_force_wake_put(gt_to_fw(gt), fw_ref);
|
||||
|
||||
@@ -124,7 +124,7 @@ static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
|
||||
case XE_PL_TT:
|
||||
sgt = drm_prime_pages_to_sg(obj->dev,
|
||||
bo->ttm.ttm->pages,
|
||||
bo->ttm.ttm->num_pages);
|
||||
obj->size >> PAGE_SHIFT);
|
||||
if (IS_ERR(sgt))
|
||||
return sgt;
|
||||
|
||||
|
||||
@@ -315,7 +315,7 @@ static int xe_eu_stall_user_ext_set_property(struct xe_device *xe, u64 extension
|
||||
return -EFAULT;
|
||||
|
||||
if (XE_IOCTL_DBG(xe, ext.property >= ARRAY_SIZE(xe_set_eu_stall_property_funcs)) ||
|
||||
XE_IOCTL_DBG(xe, ext.pad))
|
||||
XE_IOCTL_DBG(xe, !ext.property) || XE_IOCTL_DBG(xe, ext.pad))
|
||||
return -EINVAL;
|
||||
|
||||
idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_set_eu_stall_property_funcs));
|
||||
|
||||
@@ -132,7 +132,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
|
||||
if (XE_IOCTL_DBG(xe, args->extensions) ||
|
||||
XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
|
||||
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
|
||||
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]) ||
|
||||
XE_IOCTL_DBG(xe, args->num_syncs > DRM_XE_MAX_SYNCS))
|
||||
return -EINVAL;
|
||||
|
||||
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
|
||||
|
||||
@@ -797,9 +797,6 @@ static int do_gt_restart(struct xe_gt *gt)
|
||||
xe_gt_sriov_pf_init_hw(gt);
|
||||
|
||||
xe_mocs_init(gt);
|
||||
err = xe_uc_start(>->uc);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
for_each_hw_engine(hwe, gt, id)
|
||||
xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
|
||||
@@ -807,6 +804,10 @@ static int do_gt_restart(struct xe_gt *gt)
|
||||
/* Get CCS mode in sync between sw/hw */
|
||||
xe_gt_apply_ccs_mode(gt);
|
||||
|
||||
err = xe_uc_start(>->uc);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Restore GT freq to expected values */
|
||||
xe_gt_sanitize_freq(gt);
|
||||
|
||||
|
||||
@@ -293,8 +293,10 @@ int xe_gt_freq_init(struct xe_gt *gt)
|
||||
return -ENOMEM;
|
||||
|
||||
err = sysfs_create_files(gt->freq, freq_attrs);
|
||||
if (err)
|
||||
if (err) {
|
||||
kobject_put(gt->freq);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = devm_add_action_or_reset(xe->drm.dev, freq_fini, gt->freq);
|
||||
if (err)
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
|
||||
#include <drm/drm_managed.h>
|
||||
|
||||
#include <generated/xe_wa_oob.h>
|
||||
#include "xe_force_wake.h"
|
||||
#include "xe_device.h"
|
||||
#include "xe_gt.h"
|
||||
@@ -16,6 +17,7 @@
|
||||
#include "xe_mmio.h"
|
||||
#include "xe_pm.h"
|
||||
#include "xe_sriov.h"
|
||||
#include "xe_wa.h"
|
||||
|
||||
/**
|
||||
* DOC: Xe GT Idle
|
||||
@@ -145,6 +147,12 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt)
|
||||
xe_mmio_write32(mmio, RENDER_POWERGATE_IDLE_HYSTERESIS, 25);
|
||||
}
|
||||
|
||||
if (XE_GT_WA(gt, 14020316580))
|
||||
gtidle->powergate_enable &= ~(VDN_HCP_POWERGATE_ENABLE(0) |
|
||||
VDN_MFXVDENC_POWERGATE_ENABLE(0) |
|
||||
VDN_HCP_POWERGATE_ENABLE(2) |
|
||||
VDN_MFXVDENC_POWERGATE_ENABLE(2));
|
||||
|
||||
xe_mmio_write32(mmio, POWERGATE_ENABLE, gtidle->powergate_enable);
|
||||
xe_force_wake_put(gt_to_fw(gt), fw_ref);
|
||||
}
|
||||
|
||||
@@ -733,7 +733,7 @@ static void vf_start_migration_recovery(struct xe_gt *gt)
|
||||
|
||||
spin_lock(>->sriov.vf.migration.lock);
|
||||
|
||||
if (!gt->sriov.vf.migration.recovery_queued ||
|
||||
if (!gt->sriov.vf.migration.recovery_queued &&
|
||||
!gt->sriov.vf.migration.recovery_teardown) {
|
||||
gt->sriov.vf.migration.recovery_queued = true;
|
||||
WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, true);
|
||||
|
||||
@@ -140,7 +140,7 @@ static ssize_t reasons_show(struct kobject *kobj,
|
||||
struct throttle_attribute *other_ta = kobj_attribute_to_throttle(kattr);
|
||||
|
||||
if (other_ta->mask != U32_MAX && reasons & other_ta->mask)
|
||||
ret += sysfs_emit_at(buff, ret, "%s ", (*pother)->name);
|
||||
ret += sysfs_emit_at(buff, ret, "%s ", (*pother)->name + strlen("reason_"));
|
||||
}
|
||||
|
||||
if (drm_WARN_ONCE(&xe->drm, !ret, "Unknown reason: %#x\n", reasons))
|
||||
|
||||
@@ -717,25 +717,45 @@ static bool vf_recovery(struct xe_guc *guc)
|
||||
return xe_gt_recovery_pending(guc_to_gt(guc));
|
||||
}
|
||||
|
||||
static inline void relaxed_ms_sleep(unsigned int delay_ms)
|
||||
{
|
||||
unsigned long min_us, max_us;
|
||||
|
||||
if (!delay_ms)
|
||||
return;
|
||||
|
||||
if (delay_ms > 20) {
|
||||
msleep(delay_ms);
|
||||
return;
|
||||
}
|
||||
|
||||
min_us = mul_u32_u32(delay_ms, 1000);
|
||||
max_us = min_us + 500;
|
||||
|
||||
usleep_range(min_us, max_us);
|
||||
}
|
||||
|
||||
static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
|
||||
{
|
||||
struct xe_guc *guc = exec_queue_to_guc(q);
|
||||
struct xe_device *xe = guc_to_xe(guc);
|
||||
struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
|
||||
unsigned int sleep_period_ms = 1;
|
||||
unsigned int sleep_period_ms = 1, sleep_total_ms = 0;
|
||||
|
||||
#define AVAILABLE_SPACE \
|
||||
CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE)
|
||||
if (wqi_size > AVAILABLE_SPACE && !vf_recovery(guc)) {
|
||||
try_again:
|
||||
q->guc->wqi_head = parallel_read(xe, map, wq_desc.head);
|
||||
if (wqi_size > AVAILABLE_SPACE) {
|
||||
if (sleep_period_ms == 1024) {
|
||||
if (wqi_size > AVAILABLE_SPACE && !vf_recovery(guc)) {
|
||||
if (sleep_total_ms > 2000) {
|
||||
xe_gt_reset_async(q->gt);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
msleep(sleep_period_ms);
|
||||
sleep_total_ms += sleep_period_ms;
|
||||
if (sleep_period_ms < 64)
|
||||
sleep_period_ms <<= 1;
|
||||
goto try_again;
|
||||
}
|
||||
@@ -1585,7 +1605,7 @@ static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
|
||||
since_resume_ms;
|
||||
|
||||
if (wait_ms > 0 && q->guc->resume_time)
|
||||
msleep(wait_ms);
|
||||
relaxed_ms_sleep(wait_ms);
|
||||
|
||||
set_exec_queue_suspended(q);
|
||||
disable_scheduling(q, false);
|
||||
@@ -2253,10 +2273,11 @@ static void guc_exec_queue_unpause_prepare(struct xe_guc *guc,
|
||||
struct xe_exec_queue *q)
|
||||
{
|
||||
struct xe_gpu_scheduler *sched = &q->guc->sched;
|
||||
struct xe_sched_job *job = NULL;
|
||||
struct xe_sched_job *job = NULL, *__job;
|
||||
bool restore_replay = false;
|
||||
|
||||
list_for_each_entry(job, &sched->base.pending_list, drm.list) {
|
||||
list_for_each_entry(__job, &sched->base.pending_list, drm.list) {
|
||||
job = __job;
|
||||
restore_replay |= job->restore_replay;
|
||||
if (restore_replay) {
|
||||
xe_gt_dbg(guc_to_gt(guc), "Replay JOB - guc_id=%d, seqno=%d",
|
||||
|
||||
@@ -223,7 +223,7 @@ void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir)
|
||||
if (xe->heci_gsc.irq < 0)
|
||||
return;
|
||||
|
||||
ret = generic_handle_irq(xe->heci_gsc.irq);
|
||||
ret = generic_handle_irq_safe(xe->heci_gsc.irq);
|
||||
if (ret)
|
||||
drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret);
|
||||
}
|
||||
@@ -243,7 +243,7 @@ void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir)
|
||||
if (xe->heci_gsc.irq < 0)
|
||||
return;
|
||||
|
||||
ret = generic_handle_irq(xe->heci_gsc.irq);
|
||||
ret = generic_handle_irq_safe(xe->heci_gsc.irq);
|
||||
if (ret)
|
||||
drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret);
|
||||
}
|
||||
|
||||
@@ -1105,11 +1105,12 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
|
||||
oag_buf_size_select(stream) |
|
||||
oag_configure_mmio_trigger(stream, true));
|
||||
|
||||
xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctx_ctrl, stream->periodic ?
|
||||
(OAG_OAGLBCTXCTRL_COUNTER_RESUME |
|
||||
xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctx_ctrl,
|
||||
OAG_OAGLBCTXCTRL_COUNTER_RESUME |
|
||||
(stream->periodic ?
|
||||
OAG_OAGLBCTXCTRL_TIMER_ENABLE |
|
||||
REG_FIELD_PREP(OAG_OAGLBCTXCTRL_TIMER_PERIOD_MASK,
|
||||
stream->period_exponent)) : 0);
|
||||
stream->period_exponent) : 0));
|
||||
|
||||
/*
|
||||
* Initialize Super Queue Internal Cnt Register
|
||||
@@ -1254,6 +1255,9 @@ static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value,
|
||||
static int xe_oa_set_prop_num_syncs(struct xe_oa *oa, u64 value,
|
||||
struct xe_oa_open_param *param)
|
||||
{
|
||||
if (XE_IOCTL_DBG(oa->xe, value > DRM_XE_MAX_SYNCS))
|
||||
return -EINVAL;
|
||||
|
||||
param->num_syncs = value;
|
||||
return 0;
|
||||
}
|
||||
@@ -1343,7 +1347,7 @@ static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_fr
|
||||
ARRAY_SIZE(xe_oa_set_property_funcs_config));
|
||||
|
||||
if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs_open)) ||
|
||||
XE_IOCTL_DBG(oa->xe, ext.pad))
|
||||
XE_IOCTL_DBG(oa->xe, !ext.property) || XE_IOCTL_DBG(oa->xe, ext.pad))
|
||||
return -EINVAL;
|
||||
|
||||
idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs_open));
|
||||
|
||||
@@ -21,7 +21,7 @@ EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_get_pf, "xe-vfio-pci");
|
||||
bool xe_sriov_vfio_migration_supported(struct xe_device *xe)
|
||||
{
|
||||
if (!IS_SRIOV_PF(xe))
|
||||
return -EPERM;
|
||||
return false;
|
||||
|
||||
return xe_sriov_pf_migration_supported(xe);
|
||||
}
|
||||
|
||||
@@ -214,7 +214,7 @@ int xe_svm_init(struct xe_vm *vm)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_DRM_GPUSVM)
|
||||
return drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)", &vm->xe->drm,
|
||||
NULL, NULL, 0, 0, 0, NULL, NULL, 0);
|
||||
NULL, 0, 0, 0, NULL, NULL, 0);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
@@ -1508,7 +1508,10 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
|
||||
INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
|
||||
|
||||
INIT_LIST_HEAD(&vm->preempt.exec_queues);
|
||||
vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
|
||||
if (flags & XE_VM_FLAG_FAULT_MODE)
|
||||
vm->preempt.min_run_period_ms = 0;
|
||||
else
|
||||
vm->preempt.min_run_period_ms = 5;
|
||||
|
||||
for_each_tile(tile, xe, id)
|
||||
xe_range_fence_tree_init(&vm->rftree[id]);
|
||||
@@ -3324,6 +3327,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
|
||||
if (XE_IOCTL_DBG(xe, args->extensions))
|
||||
return -EINVAL;
|
||||
|
||||
if (XE_IOCTL_DBG(xe, args->num_syncs > DRM_XE_MAX_SYNCS))
|
||||
return -EINVAL;
|
||||
|
||||
if (args->num_binds > 1) {
|
||||
u64 __user *bind_user =
|
||||
u64_to_user_ptr(args->vector_of_binds);
|
||||
|
||||
@@ -263,7 +263,7 @@ struct xe_vm {
|
||||
* @min_run_period_ms: The minimum run period before preempting
|
||||
* an engine again
|
||||
*/
|
||||
s64 min_run_period_ms;
|
||||
unsigned int min_run_period_ms;
|
||||
/** @exec_queues: list of exec queues attached to this VM */
|
||||
struct list_head exec_queues;
|
||||
/** @num_exec_queues: number exec queues attached to this VM */
|
||||
|
||||
@@ -270,14 +270,6 @@ static const struct xe_rtp_entry_sr gt_was[] = {
|
||||
XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F1C(0), MFXPIPE_CLKGATE_DIS)),
|
||||
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
|
||||
},
|
||||
{ XE_RTP_NAME("14020316580"),
|
||||
XE_RTP_RULES(MEDIA_VERSION(1301)),
|
||||
XE_RTP_ACTIONS(CLR(POWERGATE_ENABLE,
|
||||
VDN_HCP_POWERGATE_ENABLE(0) |
|
||||
VDN_MFXVDENC_POWERGATE_ENABLE(0) |
|
||||
VDN_HCP_POWERGATE_ENABLE(2) |
|
||||
VDN_MFXVDENC_POWERGATE_ENABLE(2))),
|
||||
},
|
||||
{ XE_RTP_NAME("14019449301"),
|
||||
XE_RTP_RULES(MEDIA_VERSION(1301), ENGINE_CLASS(VIDEO_DECODE)),
|
||||
XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F08(0), CG3DDISHRS_CLKGATE_DIS)),
|
||||
|
||||
@@ -76,3 +76,4 @@
|
||||
|
||||
15015404425_disable PLATFORM(PANTHERLAKE), MEDIA_STEP(B0, FOREVER)
|
||||
16026007364 MEDIA_VERSION(3000)
|
||||
14020316580 MEDIA_VERSION(1301)
|
||||
|
||||
@@ -1463,6 +1463,7 @@ struct drm_xe_exec {
|
||||
/** @exec_queue_id: Exec queue ID for the batch buffer */
|
||||
__u32 exec_queue_id;
|
||||
|
||||
#define DRM_XE_MAX_SYNCS 1024
|
||||
/** @num_syncs: Amount of struct drm_xe_sync in array. */
|
||||
__u32 num_syncs;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user