mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 06:41:39 -04:00
There is a small risk that when fetching a NULL context image the VF may get a tweaked context image prepared by another VF that was previously running on the engine before the GuC scheduler switched the VFs. To avoid that risk, without forcing GuC scheduler to trigger costly engine reset on every VF switch, use a watchdog mechanism that when configured with impossible condition, triggers an interrupt, which GuC will handle by doing an engine reset. Also adjust job size to account for additional dwords with watchdog setup. Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com> Reviewed-by: Michał Winiarski <michal.winiarski@intel.com> Link: https://patch.msgid.link/20260303201354.17948-4-michal.wajdeczko@intel.com
1188 lines
26 KiB
C
1188 lines
26 KiB
C
// SPDX-License-Identifier: MIT
|
|
/*
|
|
* Copyright © 2022 Intel Corporation
|
|
*/
|
|
|
|
#include "xe_gt.h"
|
|
|
|
#include <linux/minmax.h>
|
|
|
|
#include <drm/drm_managed.h>
|
|
#include <uapi/drm/xe_drm.h>
|
|
|
|
#include <generated/xe_device_wa_oob.h>
|
|
#include <generated/xe_wa_oob.h>
|
|
|
|
#include "instructions/xe_alu_commands.h"
|
|
#include "instructions/xe_mi_commands.h"
|
|
#include "regs/xe_engine_regs.h"
|
|
#include "regs/xe_gt_regs.h"
|
|
#include "xe_assert.h"
|
|
#include "xe_bb.h"
|
|
#include "xe_device.h"
|
|
#include "xe_eu_stall.h"
|
|
#include "xe_exec_queue.h"
|
|
#include "xe_execlist.h"
|
|
#include "xe_force_wake.h"
|
|
#include "xe_ggtt.h"
|
|
#include "xe_gsc.h"
|
|
#include "xe_gt_ccs_mode.h"
|
|
#include "xe_gt_clock.h"
|
|
#include "xe_gt_freq.h"
|
|
#include "xe_gt_idle.h"
|
|
#include "xe_gt_mcr.h"
|
|
#include "xe_gt_printk.h"
|
|
#include "xe_gt_sriov_pf.h"
|
|
#include "xe_gt_sriov_vf.h"
|
|
#include "xe_gt_stats.h"
|
|
#include "xe_gt_sysfs.h"
|
|
#include "xe_gt_topology.h"
|
|
#include "xe_guc_exec_queue_types.h"
|
|
#include "xe_guc_pc.h"
|
|
#include "xe_guc_rc.h"
|
|
#include "xe_guc_submit.h"
|
|
#include "xe_hw_fence.h"
|
|
#include "xe_hw_engine_class_sysfs.h"
|
|
#include "xe_irq.h"
|
|
#include "xe_lmtt.h"
|
|
#include "xe_lrc.h"
|
|
#include "xe_map.h"
|
|
#include "xe_migrate.h"
|
|
#include "xe_mmio.h"
|
|
#include "xe_pagefault.h"
|
|
#include "xe_pat.h"
|
|
#include "xe_pm.h"
|
|
#include "xe_mocs.h"
|
|
#include "xe_reg_sr.h"
|
|
#include "xe_ring_ops.h"
|
|
#include "xe_sa.h"
|
|
#include "xe_sched_job.h"
|
|
#include "xe_sriov.h"
|
|
#include "xe_tlb_inval.h"
|
|
#include "xe_tuning.h"
|
|
#include "xe_uc.h"
|
|
#include "xe_uc_fw.h"
|
|
#include "xe_vm.h"
|
|
#include "xe_wa.h"
|
|
#include "xe_wopcm.h"
|
|
|
|
struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
|
|
{
|
|
struct xe_device *xe = tile_to_xe(tile);
|
|
struct drm_device *drm = &xe->drm;
|
|
bool shared_wq = xe->info.needs_shared_vf_gt_wq && tile->primary_gt &&
|
|
IS_SRIOV_VF(xe);
|
|
struct workqueue_struct *ordered_wq;
|
|
struct xe_gt *gt;
|
|
|
|
gt = drmm_kzalloc(drm, sizeof(*gt), GFP_KERNEL);
|
|
if (!gt)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
gt->tile = tile;
|
|
if (shared_wq && tile->primary_gt->ordered_wq)
|
|
ordered_wq = tile->primary_gt->ordered_wq;
|
|
else
|
|
ordered_wq = drmm_alloc_ordered_workqueue(drm, "gt-ordered-wq",
|
|
WQ_MEM_RECLAIM);
|
|
if (IS_ERR(ordered_wq))
|
|
return ERR_CAST(ordered_wq);
|
|
|
|
gt->ordered_wq = ordered_wq;
|
|
|
|
return gt;
|
|
}
|
|
|
|
void xe_gt_sanitize(struct xe_gt *gt)
|
|
{
|
|
/*
|
|
* FIXME: if xe_uc_sanitize is called here, on TGL driver will not
|
|
* reload
|
|
*/
|
|
xe_guc_submit_disable(>->uc.guc);
|
|
}
|
|
|
|
static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
|
|
{
|
|
u32 reg;
|
|
|
|
if (!XE_GT_WA(gt, 16023588340))
|
|
return;
|
|
|
|
CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
|
|
if (!fw_ref.domains)
|
|
return;
|
|
|
|
if (xe_gt_is_main_type(gt)) {
|
|
reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
|
|
reg |= CG_DIS_CNTLBUS;
|
|
xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
|
|
}
|
|
|
|
xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0xF);
|
|
}
|
|
|
|
static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
|
|
{
|
|
u32 reg;
|
|
|
|
if (!XE_GT_WA(gt, 16023588340))
|
|
return;
|
|
|
|
if (xe_gt_is_media_type(gt))
|
|
return;
|
|
|
|
CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
|
|
if (!fw_ref.domains)
|
|
return;
|
|
|
|
reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
|
|
reg &= ~CG_DIS_CNTLBUS;
|
|
xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
|
|
}
|
|
|
|
static void xe_gt_enable_comp_1wcoh(struct xe_gt *gt)
|
|
{
|
|
struct xe_device *xe = gt_to_xe(gt);
|
|
u32 reg;
|
|
|
|
if (IS_SRIOV_VF(xe))
|
|
return;
|
|
|
|
if (GRAPHICS_VER(xe) >= 30 && xe->info.has_flat_ccs) {
|
|
CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
|
|
if (!fw_ref.domains)
|
|
return;
|
|
|
|
reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
|
|
reg |= EN_CMP_1WCOH;
|
|
xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
|
|
|
|
if (xe_gt_is_media_type(gt)) {
|
|
xe_mmio_rmw32(>->mmio, XE2_GAMWALK_CTRL_MEDIA, 0, EN_CMP_1WCOH_GW);
|
|
} else {
|
|
reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMWALK_CTRL_3D);
|
|
reg |= EN_CMP_1WCOH_GW;
|
|
xe_gt_mcr_multicast_write(gt, XE2_GAMWALK_CTRL_3D, reg);
|
|
}
|
|
}
|
|
}
|
|
|
|
static void gt_reset_worker(struct work_struct *w);
|
|
|
|
static int emit_job_sync(struct xe_exec_queue *q, struct xe_bb *bb,
|
|
long timeout_jiffies, bool force_reset)
|
|
{
|
|
struct xe_sched_job *job;
|
|
struct dma_fence *fence;
|
|
long timeout;
|
|
|
|
job = xe_bb_create_job(q, bb);
|
|
if (IS_ERR(job))
|
|
return PTR_ERR(job);
|
|
|
|
job->ring_ops_force_reset = force_reset;
|
|
|
|
xe_sched_job_arm(job);
|
|
fence = dma_fence_get(&job->drm.s_fence->finished);
|
|
xe_sched_job_push(job);
|
|
|
|
timeout = dma_fence_wait_timeout(fence, false, timeout_jiffies);
|
|
dma_fence_put(fence);
|
|
if (timeout < 0)
|
|
return timeout;
|
|
else if (!timeout)
|
|
return -ETIME;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
|
|
{
|
|
struct xe_bb *bb;
|
|
int ret;
|
|
|
|
bb = xe_bb_new(gt, 4, false);
|
|
if (IS_ERR(bb))
|
|
return PTR_ERR(bb);
|
|
|
|
ret = emit_job_sync(q, bb, HZ, false);
|
|
xe_bb_free(bb, NULL);
|
|
|
|
return ret;
|
|
}
|
|
|
|
/* Dwords required to emit a RMW of a register */
|
|
#define EMIT_RMW_DW 20
|
|
|
|
static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
|
|
{
|
|
struct xe_hw_engine *hwe = q->hwe;
|
|
struct xe_reg_sr *sr = &hwe->reg_lrc;
|
|
struct xe_reg_sr_entry *entry;
|
|
int count_rmw = 0, count_rmw_mcr = 0, count = 0, ret;
|
|
unsigned long idx;
|
|
struct xe_bb *bb;
|
|
size_t bb_len = 0;
|
|
u32 *cs;
|
|
|
|
/* count RMW registers as those will be handled separately */
|
|
xa_for_each(&sr->xa, idx, entry) {
|
|
if (entry->reg.masked || entry->clr_bits == ~0)
|
|
++count;
|
|
else if (entry->reg.mcr)
|
|
++count_rmw_mcr;
|
|
else
|
|
++count_rmw;
|
|
}
|
|
|
|
if (count)
|
|
bb_len += count * 2 + 1;
|
|
|
|
/*
|
|
* RMW of MCR registers is the same as a normal RMW, except an
|
|
* additional LRI (3 dwords) is required per register to steer the read
|
|
* to a nom-terminated instance.
|
|
*
|
|
* We could probably shorten the batch slightly by eliding the
|
|
* steering for consecutive MCR registers that have the same
|
|
* group/instance target, but it's not worth the extra complexity to do
|
|
* so.
|
|
*/
|
|
bb_len += count_rmw * EMIT_RMW_DW;
|
|
bb_len += count_rmw_mcr * (EMIT_RMW_DW + 3);
|
|
|
|
/*
|
|
* After doing all RMW, we need 7 trailing dwords to clean up,
|
|
* plus an additional 3 dwords to reset steering if any of the
|
|
* registers were MCR.
|
|
*/
|
|
if (count_rmw || count_rmw_mcr)
|
|
bb_len += 7 + (count_rmw_mcr ? 3 : 0);
|
|
|
|
if (hwe->class == XE_ENGINE_CLASS_RENDER)
|
|
/*
|
|
* Big enough to emit all of the context's 3DSTATE via
|
|
* xe_lrc_emit_hwe_state_instructions()
|
|
*/
|
|
bb_len += xe_gt_lrc_size(gt, hwe->class) / sizeof(u32);
|
|
|
|
xe_gt_dbg(gt, "LRC %s WA job: %zu dwords\n", hwe->name, bb_len);
|
|
|
|
bb = xe_bb_new(gt, bb_len, false);
|
|
if (IS_ERR(bb))
|
|
return PTR_ERR(bb);
|
|
|
|
cs = bb->cs;
|
|
|
|
if (count) {
|
|
/*
|
|
* Emit single LRI with all non RMW regs: 1 leading dw + 2dw per
|
|
* reg + 1
|
|
*/
|
|
|
|
*cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
|
|
|
|
xa_for_each(&sr->xa, idx, entry) {
|
|
struct xe_reg reg = entry->reg;
|
|
u32 val;
|
|
|
|
if (reg.masked)
|
|
val = entry->clr_bits << 16;
|
|
else if (entry->clr_bits == ~0)
|
|
val = 0;
|
|
else
|
|
continue;
|
|
|
|
val |= entry->set_bits;
|
|
|
|
*cs++ = reg.addr;
|
|
*cs++ = val;
|
|
xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
|
|
}
|
|
}
|
|
|
|
if (count_rmw || count_rmw_mcr) {
|
|
xa_for_each(&sr->xa, idx, entry) {
|
|
if (entry->reg.masked || entry->clr_bits == ~0)
|
|
continue;
|
|
|
|
if (entry->reg.mcr) {
|
|
struct xe_reg_mcr reg = { .__reg.raw = entry->reg.raw };
|
|
u8 group, instance;
|
|
|
|
xe_gt_mcr_get_nonterminated_steering(gt, reg, &group, &instance);
|
|
*cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1);
|
|
*cs++ = CS_MMIO_GROUP_INSTANCE_SELECT(hwe->mmio_base).addr;
|
|
*cs++ = SELECTIVE_READ_ADDRESSING |
|
|
REG_FIELD_PREP(SELECTIVE_READ_GROUP, group) |
|
|
REG_FIELD_PREP(SELECTIVE_READ_INSTANCE, instance);
|
|
}
|
|
|
|
*cs++ = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO;
|
|
*cs++ = entry->reg.addr;
|
|
*cs++ = CS_GPR_REG(0, 0).addr;
|
|
|
|
*cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) |
|
|
MI_LRI_LRM_CS_MMIO;
|
|
*cs++ = CS_GPR_REG(0, 1).addr;
|
|
*cs++ = entry->clr_bits;
|
|
*cs++ = CS_GPR_REG(0, 2).addr;
|
|
*cs++ = entry->set_bits;
|
|
|
|
*cs++ = MI_MATH(8);
|
|
*cs++ = CS_ALU_INSTR_LOAD(SRCA, REG0);
|
|
*cs++ = CS_ALU_INSTR_LOADINV(SRCB, REG1);
|
|
*cs++ = CS_ALU_INSTR_AND;
|
|
*cs++ = CS_ALU_INSTR_STORE(REG0, ACCU);
|
|
*cs++ = CS_ALU_INSTR_LOAD(SRCA, REG0);
|
|
*cs++ = CS_ALU_INSTR_LOAD(SRCB, REG2);
|
|
*cs++ = CS_ALU_INSTR_OR;
|
|
*cs++ = CS_ALU_INSTR_STORE(REG0, ACCU);
|
|
|
|
*cs++ = MI_LOAD_REGISTER_REG | MI_LRR_SRC_CS_MMIO;
|
|
*cs++ = CS_GPR_REG(0, 0).addr;
|
|
*cs++ = entry->reg.addr;
|
|
|
|
xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x%s\n",
|
|
entry->reg.addr, entry->clr_bits, entry->set_bits,
|
|
entry->reg.mcr ? " (MCR)" : "");
|
|
}
|
|
|
|
/* reset used GPR */
|
|
*cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(3) |
|
|
MI_LRI_LRM_CS_MMIO;
|
|
*cs++ = CS_GPR_REG(0, 0).addr;
|
|
*cs++ = 0;
|
|
*cs++ = CS_GPR_REG(0, 1).addr;
|
|
*cs++ = 0;
|
|
*cs++ = CS_GPR_REG(0, 2).addr;
|
|
*cs++ = 0;
|
|
|
|
/* reset steering */
|
|
if (count_rmw_mcr) {
|
|
*cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1);
|
|
*cs++ = CS_MMIO_GROUP_INSTANCE_SELECT(q->hwe->mmio_base).addr;
|
|
*cs++ = 0;
|
|
}
|
|
}
|
|
|
|
cs = xe_lrc_emit_hwe_state_instructions(q, cs);
|
|
|
|
bb->len = cs - bb->cs;
|
|
|
|
/* only VFs need to trigger reset to get a clean NULL context */
|
|
ret = emit_job_sync(q, bb, HZ, IS_SRIOV_VF(gt_to_xe(gt)));
|
|
|
|
xe_bb_free(bb, NULL);
|
|
|
|
return ret;
|
|
}
|
|
|
|
int xe_gt_record_default_lrcs(struct xe_gt *gt)
|
|
{
|
|
struct xe_device *xe = gt_to_xe(gt);
|
|
struct xe_hw_engine *hwe;
|
|
enum xe_hw_engine_id id;
|
|
int err = 0;
|
|
|
|
for_each_hw_engine(hwe, gt, id) {
|
|
struct xe_exec_queue *q, *nop_q;
|
|
void *default_lrc;
|
|
|
|
if (gt->default_lrc[hwe->class])
|
|
continue;
|
|
|
|
xe_reg_sr_init(&hwe->reg_lrc, hwe->name, xe);
|
|
xe_wa_process_lrc(hwe);
|
|
xe_hw_engine_setup_default_lrc_state(hwe);
|
|
xe_tuning_process_lrc(hwe);
|
|
|
|
default_lrc = drmm_kzalloc(&xe->drm,
|
|
xe_gt_lrc_size(gt, hwe->class),
|
|
GFP_KERNEL);
|
|
if (!default_lrc)
|
|
return -ENOMEM;
|
|
|
|
q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
|
|
hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
|
|
if (IS_ERR(q)) {
|
|
err = PTR_ERR(q);
|
|
xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
|
|
hwe->name, q);
|
|
return err;
|
|
}
|
|
|
|
/* Prime golden LRC with known good state */
|
|
err = emit_wa_job(gt, q);
|
|
if (err) {
|
|
xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
|
|
hwe->name, ERR_PTR(err), q->guc->id);
|
|
goto put_exec_queue;
|
|
}
|
|
|
|
nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance),
|
|
1, hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
|
|
if (IS_ERR(nop_q)) {
|
|
err = PTR_ERR(nop_q);
|
|
xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
|
|
hwe->name, nop_q);
|
|
goto put_exec_queue;
|
|
}
|
|
|
|
/* Switch to different LRC */
|
|
err = emit_nop_job(gt, nop_q);
|
|
if (err) {
|
|
xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
|
|
hwe->name, ERR_PTR(err), nop_q->guc->id);
|
|
goto put_nop_q;
|
|
}
|
|
|
|
xe_map_memcpy_from(xe, default_lrc,
|
|
&q->lrc[0]->bo->vmap,
|
|
xe_lrc_pphwsp_offset(q->lrc[0]),
|
|
xe_gt_lrc_size(gt, hwe->class));
|
|
|
|
gt->default_lrc[hwe->class] = default_lrc;
|
|
put_nop_q:
|
|
xe_exec_queue_put(nop_q);
|
|
put_exec_queue:
|
|
xe_exec_queue_put(q);
|
|
if (err)
|
|
break;
|
|
}
|
|
|
|
return err;
|
|
}
|
|
|
|
static void wa_14026539277(struct xe_gt *gt)
|
|
{
|
|
struct xe_device *xe = gt_to_xe(gt);
|
|
u32 val;
|
|
|
|
/*
|
|
* FIXME: We currently can't use FUNC(xe_rtp_match_not_sriov_vf) in the
|
|
* rules for Wa_14026539277 due to xe_wa_process_device_oob() being
|
|
* called before xe_sriov_probe_early(); and we can't move the call to
|
|
* the former to happen after the latter because MMIO read functions
|
|
* already depend on a device OOB workaround. This needs to be fixed by
|
|
* allowing workaround checks to happen at different stages of driver
|
|
* initialization.
|
|
*/
|
|
if (IS_SRIOV_VF(xe))
|
|
return;
|
|
|
|
if (!XE_DEVICE_WA(xe, 14026539277))
|
|
return;
|
|
|
|
if (!xe_gt_is_main_type(gt))
|
|
return;
|
|
|
|
val = xe_gt_mcr_unicast_read_any(gt, L2COMPUTESIDECTRL);
|
|
val &= ~CECTRL;
|
|
val |= CECTRL_CENODATA_ALWAYS;
|
|
xe_gt_mcr_multicast_write(gt, L2COMPUTESIDECTRL, val);
|
|
}
|
|
|
|
int xe_gt_init_early(struct xe_gt *gt)
|
|
{
|
|
int err;
|
|
|
|
if (IS_SRIOV_PF(gt_to_xe(gt))) {
|
|
err = xe_gt_sriov_pf_init_early(gt);
|
|
if (err)
|
|
return err;
|
|
}
|
|
|
|
if (IS_SRIOV_VF(gt_to_xe(gt))) {
|
|
err = xe_gt_sriov_vf_init_early(gt);
|
|
if (err)
|
|
return err;
|
|
}
|
|
|
|
xe_reg_sr_init(>->reg_sr, "GT", gt_to_xe(gt));
|
|
|
|
err = xe_wa_gt_init(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
err = xe_tuning_init(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
xe_wa_process_gt_oob(gt);
|
|
|
|
xe_force_wake_init_gt(gt, gt_to_fw(gt));
|
|
spin_lock_init(>->global_invl_lock);
|
|
|
|
err = xe_gt_tlb_inval_init_early(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
xe_mocs_init_early(gt);
|
|
|
|
/*
|
|
* Only after this point can GT-specific MMIO operations
|
|
* (including things like communication with the GuC)
|
|
* be performed.
|
|
*/
|
|
xe_gt_mmio_init(gt);
|
|
|
|
err = xe_uc_init_noalloc(>->uc);
|
|
if (err)
|
|
return err;
|
|
|
|
err = xe_gt_stats_init(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
|
|
if (!fw_ref.domains)
|
|
return -ETIMEDOUT;
|
|
|
|
xe_gt_mcr_init_early(gt);
|
|
xe_pat_init(gt);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void dump_pat_on_error(struct xe_gt *gt)
|
|
{
|
|
struct drm_printer p;
|
|
char prefix[32];
|
|
|
|
snprintf(prefix, sizeof(prefix), "[GT%u Error]", gt->info.id);
|
|
p = drm_dbg_printer(>_to_xe(gt)->drm, DRM_UT_DRIVER, prefix);
|
|
|
|
xe_pat_dump(gt, &p);
|
|
}
|
|
|
|
static int gt_init_with_gt_forcewake(struct xe_gt *gt)
|
|
{
|
|
int err;
|
|
|
|
CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
|
|
if (!fw_ref.domains)
|
|
return -ETIMEDOUT;
|
|
|
|
err = xe_uc_init(>->uc);
|
|
if (err)
|
|
return err;
|
|
|
|
xe_gt_topology_init(gt);
|
|
xe_gt_mcr_init(gt);
|
|
xe_gt_enable_host_l2_vram(gt);
|
|
xe_gt_enable_comp_1wcoh(gt);
|
|
|
|
if (xe_gt_is_main_type(gt)) {
|
|
err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
|
|
if (err)
|
|
return err;
|
|
if (IS_SRIOV_PF(gt_to_xe(gt)))
|
|
xe_lmtt_init(>_to_tile(gt)->sriov.pf.lmtt);
|
|
}
|
|
|
|
/* Enable per hw engine IRQs */
|
|
xe_irq_enable_hwe(gt);
|
|
|
|
/* Rerun MCR init as we now have hw engine list */
|
|
xe_gt_mcr_init(gt);
|
|
|
|
err = xe_hw_engines_init_early(gt);
|
|
if (err) {
|
|
dump_pat_on_error(gt);
|
|
return err;
|
|
}
|
|
|
|
err = xe_hw_engine_class_sysfs_init(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
/* Initialize CCS mode sysfs after early initialization of HW engines */
|
|
err = xe_gt_ccs_mode_sysfs_init(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
/*
|
|
* Stash hardware-reported version. Since this register does not exist
|
|
* on pre-MTL platforms, reading it there will (correctly) return 0.
|
|
*/
|
|
gt->info.gmdid = xe_mmio_read32(>->mmio, GMD_ID);
|
|
|
|
/*
|
|
* Wa_14026539277 can't be implemented as a regular GT workaround (i.e.
|
|
* as an entry in gt_was[]) for two reasons: it is actually a device
|
|
* workaround that happens to involve programming a GT register; and it
|
|
* needs to be applied early to avoid getting the hardware in a bad
|
|
* state before we have a chance to do the necessary programming.
|
|
*/
|
|
wa_14026539277(gt);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int gt_init_with_all_forcewake(struct xe_gt *gt)
|
|
{
|
|
int err;
|
|
|
|
CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
|
if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL))
|
|
return -ETIMEDOUT;
|
|
|
|
xe_gt_mcr_set_implicit_defaults(gt);
|
|
xe_wa_process_gt(gt);
|
|
xe_tuning_process_gt(gt);
|
|
xe_reg_sr_apply_mmio(>->reg_sr, gt);
|
|
|
|
err = xe_gt_clock_init(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
xe_mocs_init(gt);
|
|
err = xe_execlist_init(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
err = xe_hw_engines_init(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
err = xe_uc_init_post_hwconfig(>->uc);
|
|
if (err)
|
|
return err;
|
|
|
|
if (xe_gt_is_main_type(gt)) {
|
|
/*
|
|
* USM has its only SA pool to non-block behind user operations
|
|
*/
|
|
if (gt_to_xe(gt)->info.has_usm) {
|
|
struct xe_device *xe = gt_to_xe(gt);
|
|
|
|
gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
|
|
IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
|
|
if (IS_ERR(gt->usm.bb_pool))
|
|
return PTR_ERR(gt->usm.bb_pool);
|
|
}
|
|
}
|
|
|
|
if (xe_gt_is_main_type(gt)) {
|
|
struct xe_tile *tile = gt_to_tile(gt);
|
|
|
|
err = xe_migrate_init(tile->migrate);
|
|
if (err)
|
|
return err;
|
|
}
|
|
|
|
err = xe_uc_load_hw(>->uc);
|
|
if (err)
|
|
return err;
|
|
|
|
/* Configure default CCS mode of 1 engine with all resources */
|
|
if (xe_gt_ccs_mode_enabled(gt)) {
|
|
gt->ccs_mode = 1;
|
|
xe_gt_apply_ccs_mode(gt);
|
|
}
|
|
|
|
if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_is_main_type(gt))
|
|
xe_lmtt_init_hw(>_to_tile(gt)->sriov.pf.lmtt);
|
|
|
|
if (IS_SRIOV_PF(gt_to_xe(gt)))
|
|
xe_gt_sriov_pf_init_hw(gt);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void xe_gt_fini(void *arg)
|
|
{
|
|
struct xe_gt *gt = arg;
|
|
int i;
|
|
|
|
if (disable_work_sync(>->reset.worker))
|
|
/*
|
|
* If gt_reset_worker was halted from executing, take care of
|
|
* releasing the rpm reference here.
|
|
*/
|
|
xe_pm_runtime_put(gt_to_xe(gt));
|
|
|
|
for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
|
|
xe_hw_fence_irq_finish(>->fence_irq[i]);
|
|
|
|
xe_gt_disable_host_l2_vram(gt);
|
|
}
|
|
|
|
int xe_gt_init(struct xe_gt *gt)
|
|
{
|
|
int err;
|
|
int i;
|
|
|
|
INIT_WORK(>->reset.worker, gt_reset_worker);
|
|
|
|
for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) {
|
|
gt->ring_ops[i] = xe_ring_ops_get(gt, i);
|
|
xe_hw_fence_irq_init(>->fence_irq[i]);
|
|
}
|
|
|
|
err = devm_add_action_or_reset(gt_to_xe(gt)->drm.dev, xe_gt_fini, gt);
|
|
if (err)
|
|
return err;
|
|
|
|
err = xe_gt_sysfs_init(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
err = gt_init_with_gt_forcewake(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
err = xe_gt_idle_init(>->gtidle);
|
|
if (err)
|
|
return err;
|
|
|
|
err = xe_gt_freq_init(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
xe_force_wake_init_engines(gt, gt_to_fw(gt));
|
|
|
|
err = gt_init_with_all_forcewake(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
xe_gt_record_user_engines(gt);
|
|
|
|
err = xe_eu_stall_init(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
if (IS_SRIOV_VF(gt_to_xe(gt))) {
|
|
err = xe_gt_sriov_vf_init(gt);
|
|
if (err)
|
|
return err;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* xe_gt_mmio_init() - Initialize GT's MMIO access
|
|
* @gt: the GT object
|
|
*
|
|
* Initialize GT's MMIO accessor, which will be used to access registers inside
|
|
* this GT.
|
|
*/
|
|
void xe_gt_mmio_init(struct xe_gt *gt)
|
|
{
|
|
struct xe_tile *tile = gt_to_tile(gt);
|
|
struct xe_device *xe = tile_to_xe(tile);
|
|
|
|
xe_mmio_init(>->mmio, tile, tile->mmio.regs, tile->mmio.regs_size);
|
|
|
|
if (gt->info.type == XE_GT_TYPE_MEDIA) {
|
|
gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET;
|
|
gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH;
|
|
} else {
|
|
gt->mmio.adj_offset = 0;
|
|
gt->mmio.adj_limit = 0;
|
|
}
|
|
|
|
if (IS_SRIOV_VF(xe))
|
|
gt->mmio.sriov_vf_gt = gt;
|
|
}
|
|
|
|
void xe_gt_record_user_engines(struct xe_gt *gt)
|
|
{
|
|
struct xe_hw_engine *hwe;
|
|
enum xe_hw_engine_id id;
|
|
|
|
gt->user_engines.mask = 0;
|
|
memset(gt->user_engines.instances_per_class, 0,
|
|
sizeof(gt->user_engines.instances_per_class));
|
|
|
|
for_each_hw_engine(hwe, gt, id) {
|
|
if (xe_hw_engine_is_reserved(hwe))
|
|
continue;
|
|
|
|
gt->user_engines.mask |= BIT_ULL(id);
|
|
gt->user_engines.instances_per_class[hwe->class]++;
|
|
}
|
|
|
|
xe_gt_assert(gt, (gt->user_engines.mask | gt->info.engine_mask)
|
|
== gt->info.engine_mask);
|
|
}
|
|
|
|
static int do_gt_reset(struct xe_gt *gt)
|
|
{
|
|
int err;
|
|
|
|
if (IS_SRIOV_VF(gt_to_xe(gt)))
|
|
return xe_gt_sriov_vf_reset(gt);
|
|
|
|
xe_gsc_wa_14015076503(gt, true);
|
|
|
|
xe_mmio_write32(>->mmio, GDRST, GRDOM_FULL);
|
|
err = xe_mmio_wait32(>->mmio, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
|
|
if (err)
|
|
xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n",
|
|
ERR_PTR(err));
|
|
|
|
xe_gsc_wa_14015076503(gt, false);
|
|
|
|
return err;
|
|
}
|
|
|
|
static int vf_gt_restart(struct xe_gt *gt)
|
|
{
|
|
int err;
|
|
|
|
err = xe_uc_sanitize_reset(>->uc);
|
|
if (err)
|
|
return err;
|
|
|
|
err = xe_uc_load_hw(>->uc);
|
|
if (err)
|
|
return err;
|
|
|
|
err = xe_uc_start(>->uc);
|
|
if (err)
|
|
return err;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int do_gt_restart(struct xe_gt *gt)
|
|
{
|
|
struct xe_hw_engine *hwe;
|
|
enum xe_hw_engine_id id;
|
|
int err;
|
|
|
|
if (IS_SRIOV_VF(gt_to_xe(gt)))
|
|
return vf_gt_restart(gt);
|
|
|
|
xe_pat_init(gt);
|
|
|
|
xe_gt_enable_host_l2_vram(gt);
|
|
xe_gt_enable_comp_1wcoh(gt);
|
|
|
|
xe_gt_mcr_set_implicit_defaults(gt);
|
|
xe_reg_sr_apply_mmio(>->reg_sr, gt);
|
|
|
|
err = xe_wopcm_init(>->uc.wopcm);
|
|
if (err)
|
|
return err;
|
|
|
|
for_each_hw_engine(hwe, gt, id)
|
|
xe_hw_engine_enable_ring(hwe);
|
|
|
|
err = xe_uc_sanitize_reset(>->uc);
|
|
if (err)
|
|
return err;
|
|
|
|
err = xe_uc_load_hw(>->uc);
|
|
if (err)
|
|
return err;
|
|
|
|
if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_is_main_type(gt))
|
|
xe_lmtt_init_hw(>_to_tile(gt)->sriov.pf.lmtt);
|
|
|
|
if (IS_SRIOV_PF(gt_to_xe(gt)))
|
|
xe_gt_sriov_pf_init_hw(gt);
|
|
|
|
xe_mocs_init(gt);
|
|
|
|
for_each_hw_engine(hwe, gt, id)
|
|
xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
|
|
|
|
/* Get CCS mode in sync between sw/hw */
|
|
xe_gt_apply_ccs_mode(gt);
|
|
|
|
err = xe_uc_start(>->uc);
|
|
if (err)
|
|
return err;
|
|
|
|
/* Restore GT freq to expected values */
|
|
xe_gt_sanitize_freq(gt);
|
|
|
|
if (IS_SRIOV_PF(gt_to_xe(gt)))
|
|
xe_gt_sriov_pf_restart(gt);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void gt_reset_worker(struct work_struct *w)
|
|
{
|
|
struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
|
|
unsigned int fw_ref;
|
|
int err;
|
|
|
|
if (xe_device_wedged(gt_to_xe(gt)))
|
|
goto err_pm_put;
|
|
|
|
/* We only support GT resets with GuC submission */
|
|
if (!xe_device_uc_enabled(gt_to_xe(gt)))
|
|
goto err_pm_put;
|
|
|
|
xe_gt_info(gt, "reset started\n");
|
|
|
|
if (xe_fault_inject_gt_reset()) {
|
|
err = -ECANCELED;
|
|
goto err_fail;
|
|
}
|
|
|
|
xe_gt_sanitize(gt);
|
|
|
|
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
|
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
|
|
err = -ETIMEDOUT;
|
|
goto err_out;
|
|
}
|
|
|
|
if (IS_SRIOV_PF(gt_to_xe(gt)))
|
|
xe_gt_sriov_pf_stop_prepare(gt);
|
|
|
|
xe_guc_rc_disable(>->uc.guc);
|
|
xe_uc_stop_prepare(>->uc);
|
|
xe_pagefault_reset(gt_to_xe(gt), gt);
|
|
|
|
xe_uc_stop(>->uc);
|
|
|
|
xe_tlb_inval_reset(>->tlb_inval);
|
|
|
|
err = do_gt_reset(gt);
|
|
if (err)
|
|
goto err_out;
|
|
|
|
err = do_gt_restart(gt);
|
|
if (err)
|
|
goto err_out;
|
|
|
|
xe_force_wake_put(gt_to_fw(gt), fw_ref);
|
|
|
|
/* Pair with get while enqueueing the work in xe_gt_reset_async() */
|
|
xe_pm_runtime_put(gt_to_xe(gt));
|
|
|
|
xe_gt_info(gt, "reset done\n");
|
|
|
|
return;
|
|
|
|
err_out:
|
|
xe_force_wake_put(gt_to_fw(gt), fw_ref);
|
|
XE_WARN_ON(xe_uc_start(>->uc));
|
|
|
|
err_fail:
|
|
xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
|
|
xe_device_declare_wedged(gt_to_xe(gt));
|
|
err_pm_put:
|
|
xe_pm_runtime_put(gt_to_xe(gt));
|
|
}
|
|
|
|
void xe_gt_reset_async(struct xe_gt *gt)
|
|
{
|
|
xe_gt_info(gt, "trying reset from %ps\n", __builtin_return_address(0));
|
|
|
|
/* Don't do a reset while one is already in flight */
|
|
if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(>->uc))
|
|
return;
|
|
|
|
xe_gt_info(gt, "reset queued\n");
|
|
|
|
/* Pair with put in gt_reset_worker() if work is enqueued */
|
|
xe_pm_runtime_get_noresume(gt_to_xe(gt));
|
|
if (!queue_work(gt->ordered_wq, >->reset.worker))
|
|
xe_pm_runtime_put(gt_to_xe(gt));
|
|
}
|
|
|
|
void xe_gt_suspend_prepare(struct xe_gt *gt)
|
|
{
|
|
CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
|
xe_uc_suspend_prepare(>->uc);
|
|
}
|
|
|
|
int xe_gt_suspend(struct xe_gt *gt)
|
|
{
|
|
int err;
|
|
|
|
xe_gt_dbg(gt, "suspending\n");
|
|
xe_gt_sanitize(gt);
|
|
|
|
CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
|
if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) {
|
|
xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(-ETIMEDOUT));
|
|
return -ETIMEDOUT;
|
|
}
|
|
|
|
err = xe_uc_suspend(>->uc);
|
|
if (err) {
|
|
xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
|
|
return err;
|
|
}
|
|
|
|
xe_gt_idle_disable_pg(gt);
|
|
|
|
xe_gt_disable_host_l2_vram(gt);
|
|
|
|
xe_gt_dbg(gt, "suspended\n");
|
|
|
|
return 0;
|
|
}
|
|
|
|
void xe_gt_shutdown(struct xe_gt *gt)
|
|
{
|
|
CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
|
do_gt_reset(gt);
|
|
}
|
|
|
|
/**
|
|
* xe_gt_sanitize_freq() - Restore saved frequencies if necessary.
|
|
* @gt: the GT object
|
|
*
|
|
* Called after driver init/GSC load completes to restore GT frequencies if we
|
|
* limited them for any WAs.
|
|
*/
|
|
int xe_gt_sanitize_freq(struct xe_gt *gt)
|
|
{
|
|
int ret = 0;
|
|
|
|
if ((!xe_uc_fw_is_available(>->uc.gsc.fw) ||
|
|
xe_uc_fw_is_loaded(>->uc.gsc.fw) ||
|
|
xe_uc_fw_is_in_error_state(>->uc.gsc.fw)) &&
|
|
XE_GT_WA(gt, 22019338487))
|
|
ret = xe_guc_pc_restore_stashed_freq(>->uc.guc.pc);
|
|
|
|
return ret;
|
|
}
|
|
|
|
int xe_gt_resume(struct xe_gt *gt)
|
|
{
|
|
int err;
|
|
|
|
xe_gt_dbg(gt, "resuming\n");
|
|
CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
|
if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) {
|
|
xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(-ETIMEDOUT));
|
|
return -ETIMEDOUT;
|
|
}
|
|
|
|
err = do_gt_restart(gt);
|
|
if (err)
|
|
return err;
|
|
|
|
xe_gt_idle_enable_pg(gt);
|
|
|
|
xe_gt_dbg(gt, "resumed\n");
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* xe_gt_runtime_suspend() - GT runtime suspend
|
|
* @gt: the GT object
|
|
*
|
|
* Return: 0 on success, negative error code otherwise.
|
|
*/
|
|
int xe_gt_runtime_suspend(struct xe_gt *gt)
|
|
{
|
|
xe_gt_dbg(gt, "runtime suspending\n");
|
|
|
|
CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
|
if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) {
|
|
xe_gt_err(gt, "runtime suspend failed (%pe)\n", ERR_PTR(-ETIMEDOUT));
|
|
return -ETIMEDOUT;
|
|
}
|
|
|
|
xe_uc_runtime_suspend(>->uc);
|
|
xe_gt_disable_host_l2_vram(gt);
|
|
|
|
xe_gt_dbg(gt, "runtime suspended\n");
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* xe_gt_runtime_resume() - GT runtime resume
|
|
* @gt: the GT object
|
|
*
|
|
* Return: 0 on success, negative error code otherwise.
|
|
*/
|
|
int xe_gt_runtime_resume(struct xe_gt *gt)
|
|
{
|
|
xe_gt_dbg(gt, "runtime resuming\n");
|
|
|
|
CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
|
if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) {
|
|
xe_gt_err(gt, "runtime resume failed (%pe)\n", ERR_PTR(-ETIMEDOUT));
|
|
return -ETIMEDOUT;
|
|
}
|
|
|
|
xe_gt_enable_host_l2_vram(gt);
|
|
xe_uc_runtime_resume(>->uc);
|
|
|
|
xe_gt_dbg(gt, "runtime resumed\n");
|
|
|
|
return 0;
|
|
}
|
|
|
|
struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
|
|
enum xe_engine_class class,
|
|
u16 instance, bool logical)
|
|
{
|
|
struct xe_hw_engine *hwe;
|
|
enum xe_hw_engine_id id;
|
|
|
|
for_each_hw_engine(hwe, gt, id)
|
|
if (hwe->class == class &&
|
|
((!logical && hwe->instance == instance) ||
|
|
(logical && hwe->logical_instance == instance)))
|
|
return hwe;
|
|
|
|
return NULL;
|
|
}
|
|
|
|
struct xe_hw_engine *xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt,
|
|
enum xe_engine_class class)
|
|
{
|
|
struct xe_hw_engine *hwe;
|
|
enum xe_hw_engine_id id;
|
|
|
|
for_each_hw_engine(hwe, gt, id) {
|
|
switch (class) {
|
|
case XE_ENGINE_CLASS_RENDER:
|
|
case XE_ENGINE_CLASS_COMPUTE:
|
|
if (hwe->class == XE_ENGINE_CLASS_RENDER ||
|
|
hwe->class == XE_ENGINE_CLASS_COMPUTE)
|
|
return hwe;
|
|
break;
|
|
default:
|
|
if (hwe->class == class)
|
|
return hwe;
|
|
}
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
struct xe_hw_engine *xe_gt_any_hw_engine(struct xe_gt *gt)
|
|
{
|
|
struct xe_hw_engine *hwe;
|
|
enum xe_hw_engine_id id;
|
|
|
|
for_each_hw_engine(hwe, gt, id)
|
|
return hwe;
|
|
|
|
return NULL;
|
|
}
|
|
|
|
/**
|
|
* xe_gt_declare_wedged() - Declare GT wedged
|
|
* @gt: the GT object
|
|
*
|
|
* Wedge the GT which stops all submission, saves desired debug state, and
|
|
* cleans up anything which could timeout.
|
|
*/
|
|
void xe_gt_declare_wedged(struct xe_gt *gt)
|
|
{
|
|
xe_gt_assert(gt, gt_to_xe(gt)->wedged.mode);
|
|
|
|
xe_uc_declare_wedged(>->uc);
|
|
xe_tlb_inval_reset(>->tlb_inval);
|
|
}
|