mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 12:21:22 -05:00
Merge tag 'drm-fixes-2025-09-05' of https://gitlab.freedesktop.org/drm/kernel
Pull drm fixes from Dave Airlie: "Weekly drm fixes roundup, nouveau has two fixes for fence/irq racing problems that should fix a bunch of instability in userspace. Otherwise amdgpu along with some single fixes to bridge, xe, ivpu. Looks about usual for this time in the release. scheduler: - fix race in unschedulable tracepoint bridge: - ti-sn65dsi86: fix REFCLK setting xe: - Fix incorrect migration of backed-up object to VRAM amdgpu: - UserQ fixes - MES 11 fix - eDP/LVDS fix - Fix non-DC audio clean up - Fix duplicate cursor issue - Fix error path in PSP init nouveau: - fix nonstall interrupt handling - fix race on fence vs irq emission - update MAINTAINERS entry ivpu: - prevent recovery work during device remove" * tag 'drm-fixes-2025-09-05' of https://gitlab.freedesktop.org/drm/kernel: drm/amd/amdgpu: Fix missing error return on kzalloc failure drm/bridge: ti-sn65dsi86: fix REFCLK setting MAINTAINERS: Update git entry for nouveau drm/xe: Fix incorrect migration of backed-up object to VRAM drm/sched: Fix racy access to drm_sched_entity.dependency accel/ivpu: Prevent recovery work from being queued during device removal nouveau: Membar before between semaphore writes and the interrupt nouveau: fix disabling the nonstall irq due to storm code drm/amd/display: Clear the CUR_ENABLE register on DCN314 w/out DPP PG drm/amdgpu: drop hw access in non-DC audio fini drm/amd: Re-enable common modes for eDP and LVDS drm/amdgpu/mes11: make MES_MISC_OP_CHANGE_CONFIG failure non-fatal drm/amdgpu/sdma: bump firmware version checks for user queue support
This commit is contained in:
@@ -7821,7 +7821,7 @@ Q: https://patchwork.freedesktop.org/project/nouveau/
|
||||
Q: https://gitlab.freedesktop.org/drm/nouveau/-/merge_requests
|
||||
B: https://gitlab.freedesktop.org/drm/nouveau/-/issues
|
||||
C: irc://irc.oftc.net/nouveau
|
||||
T: git https://gitlab.freedesktop.org/drm/nouveau.git
|
||||
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||
F: drivers/gpu/drm/nouveau/
|
||||
F: include/uapi/drm/nouveau_drm.h
|
||||
|
||||
|
||||
@@ -677,7 +677,7 @@ static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
|
||||
static void ivpu_dev_fini(struct ivpu_device *vdev)
|
||||
{
|
||||
ivpu_jobs_abort_all(vdev);
|
||||
ivpu_pm_cancel_recovery(vdev);
|
||||
ivpu_pm_disable_recovery(vdev);
|
||||
ivpu_pm_disable(vdev);
|
||||
ivpu_prepare_for_reset(vdev);
|
||||
ivpu_shutdown(vdev);
|
||||
|
||||
@@ -417,10 +417,10 @@ void ivpu_pm_init(struct ivpu_device *vdev)
|
||||
ivpu_dbg(vdev, PM, "Autosuspend delay = %d\n", delay);
|
||||
}
|
||||
|
||||
void ivpu_pm_cancel_recovery(struct ivpu_device *vdev)
|
||||
void ivpu_pm_disable_recovery(struct ivpu_device *vdev)
|
||||
{
|
||||
drm_WARN_ON(&vdev->drm, delayed_work_pending(&vdev->pm->job_timeout_work));
|
||||
cancel_work_sync(&vdev->pm->recovery_work);
|
||||
disable_work_sync(&vdev->pm->recovery_work);
|
||||
}
|
||||
|
||||
void ivpu_pm_enable(struct ivpu_device *vdev)
|
||||
|
||||
@@ -25,7 +25,7 @@ struct ivpu_pm_info {
|
||||
void ivpu_pm_init(struct ivpu_device *vdev);
|
||||
void ivpu_pm_enable(struct ivpu_device *vdev);
|
||||
void ivpu_pm_disable(struct ivpu_device *vdev);
|
||||
void ivpu_pm_cancel_recovery(struct ivpu_device *vdev);
|
||||
void ivpu_pm_disable_recovery(struct ivpu_device *vdev);
|
||||
|
||||
int ivpu_pm_suspend_cb(struct device *dev);
|
||||
int ivpu_pm_resume_cb(struct device *dev);
|
||||
|
||||
@@ -448,7 +448,7 @@ static int psp_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
||||
if (!psp->cmd) {
|
||||
dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
|
||||
ret = -ENOMEM;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
adev->psp.xgmi_context.supports_extended_data =
|
||||
|
||||
@@ -1462,17 +1462,12 @@ static int dce_v10_0_audio_init(struct amdgpu_device *adev)
|
||||
|
||||
static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!amdgpu_audio)
|
||||
return;
|
||||
|
||||
if (!adev->mode_info.audio.enabled)
|
||||
return;
|
||||
|
||||
for (i = 0; i < adev->mode_info.audio.num_pins; i++)
|
||||
dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
|
||||
|
||||
adev->mode_info.audio.enabled = false;
|
||||
}
|
||||
|
||||
|
||||
@@ -1511,17 +1511,12 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev)
|
||||
|
||||
static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!amdgpu_audio)
|
||||
return;
|
||||
|
||||
if (!adev->mode_info.audio.enabled)
|
||||
return;
|
||||
|
||||
for (i = 0; i < adev->mode_info.audio.num_pins; i++)
|
||||
dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
|
||||
|
||||
adev->mode_info.audio.enabled = false;
|
||||
}
|
||||
|
||||
|
||||
@@ -1451,17 +1451,12 @@ static int dce_v6_0_audio_init(struct amdgpu_device *adev)
|
||||
|
||||
static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!amdgpu_audio)
|
||||
return;
|
||||
|
||||
if (!adev->mode_info.audio.enabled)
|
||||
return;
|
||||
|
||||
for (i = 0; i < adev->mode_info.audio.num_pins; i++)
|
||||
dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
|
||||
|
||||
adev->mode_info.audio.enabled = false;
|
||||
}
|
||||
|
||||
|
||||
@@ -1443,17 +1443,12 @@ static int dce_v8_0_audio_init(struct amdgpu_device *adev)
|
||||
|
||||
static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!amdgpu_audio)
|
||||
return;
|
||||
|
||||
if (!adev->mode_info.audio.enabled)
|
||||
return;
|
||||
|
||||
for (i = 0; i < adev->mode_info.audio.num_pins; i++)
|
||||
dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
|
||||
|
||||
adev->mode_info.audio.enabled = false;
|
||||
}
|
||||
|
||||
|
||||
@@ -641,8 +641,9 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes,
|
||||
break;
|
||||
case MES_MISC_OP_CHANGE_CONFIG:
|
||||
if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) < 0x63) {
|
||||
dev_err(mes->adev->dev, "MES FW version must be larger than 0x63 to support limit single process feature.\n");
|
||||
return -EINVAL;
|
||||
dev_warn_once(mes->adev->dev,
|
||||
"MES FW version must be larger than 0x63 to support limit single process feature.\n");
|
||||
return 0;
|
||||
}
|
||||
misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG;
|
||||
misc_pkt.change_config.opcode =
|
||||
|
||||
@@ -1377,7 +1377,7 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
|
||||
case IP_VERSION(6, 0, 0):
|
||||
if ((adev->sdma.instance[0].fw_version >= 24) && !adev->sdma.disable_uq)
|
||||
if ((adev->sdma.instance[0].fw_version >= 27) && !adev->sdma.disable_uq)
|
||||
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
|
||||
break;
|
||||
case IP_VERSION(6, 0, 1):
|
||||
@@ -1385,11 +1385,11 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
|
||||
break;
|
||||
case IP_VERSION(6, 0, 2):
|
||||
if ((adev->sdma.instance[0].fw_version >= 21) && !adev->sdma.disable_uq)
|
||||
if ((adev->sdma.instance[0].fw_version >= 23) && !adev->sdma.disable_uq)
|
||||
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
|
||||
break;
|
||||
case IP_VERSION(6, 0, 3):
|
||||
if ((adev->sdma.instance[0].fw_version >= 25) && !adev->sdma.disable_uq)
|
||||
if ((adev->sdma.instance[0].fw_version >= 27) && !adev->sdma.disable_uq)
|
||||
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
|
||||
break;
|
||||
case IP_VERSION(6, 1, 0):
|
||||
|
||||
@@ -8381,8 +8381,7 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
|
||||
drm_add_modes_noedid(connector, 1920, 1080);
|
||||
} else {
|
||||
amdgpu_dm_connector_ddc_get_modes(connector, drm_edid);
|
||||
if (encoder && (connector->connector_type != DRM_MODE_CONNECTOR_eDP) &&
|
||||
(connector->connector_type != DRM_MODE_CONNECTOR_LVDS))
|
||||
if (encoder)
|
||||
amdgpu_dm_connector_add_common_modes(encoder, connector);
|
||||
amdgpu_dm_connector_add_freesync_modes(connector, drm_edid);
|
||||
}
|
||||
|
||||
@@ -520,6 +520,15 @@ void dpp1_dppclk_control(
|
||||
REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 0);
|
||||
}
|
||||
|
||||
void dpp_force_disable_cursor(struct dpp *dpp_base)
|
||||
{
|
||||
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
|
||||
|
||||
/* Force disable cursor */
|
||||
REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, 0);
|
||||
dpp_base->pos.cur0_ctl.bits.cur0_enable = 0;
|
||||
}
|
||||
|
||||
static const struct dpp_funcs dcn10_dpp_funcs = {
|
||||
.dpp_read_state = dpp_read_state,
|
||||
.dpp_reset = dpp_reset,
|
||||
|
||||
@@ -1525,4 +1525,6 @@ void dpp1_construct(struct dcn10_dpp *dpp1,
|
||||
|
||||
void dpp1_cm_get_gamut_remap(struct dpp *dpp_base,
|
||||
struct dpp_grph_csc_adjustment *adjust);
|
||||
void dpp_force_disable_cursor(struct dpp *dpp_base);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1494,6 +1494,7 @@ static struct dpp_funcs dcn30_dpp_funcs = {
|
||||
.dpp_dppclk_control = dpp1_dppclk_control,
|
||||
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
|
||||
.dpp_get_gamut_remap = dpp3_cm_get_gamut_remap,
|
||||
.dpp_force_disable_cursor = dpp_force_disable_cursor,
|
||||
};
|
||||
|
||||
|
||||
|
||||
@@ -528,3 +528,75 @@ void dcn314_disable_link_output(struct dc_link *link,
|
||||
|
||||
apply_symclk_on_tx_off_wa(link);
|
||||
}
|
||||
|
||||
/**
|
||||
* dcn314_dpp_pg_control - DPP power gate control.
|
||||
*
|
||||
* @hws: dce_hwseq reference.
|
||||
* @dpp_inst: DPP instance reference.
|
||||
* @power_on: true if we want to enable power gate, false otherwise.
|
||||
*
|
||||
* Enable or disable power gate in the specific DPP instance.
|
||||
* If power gating is disabled, will force disable cursor in the DPP instance.
|
||||
*/
|
||||
void dcn314_dpp_pg_control(
|
||||
struct dce_hwseq *hws,
|
||||
unsigned int dpp_inst,
|
||||
bool power_on)
|
||||
{
|
||||
uint32_t power_gate = power_on ? 0 : 1;
|
||||
uint32_t pwr_status = power_on ? 0 : 2;
|
||||
|
||||
|
||||
if (hws->ctx->dc->debug.disable_dpp_power_gate) {
|
||||
/* Workaround for DCN314 with disabled power gating */
|
||||
if (!power_on) {
|
||||
|
||||
/* Force disable cursor if power gating is disabled */
|
||||
struct dpp *dpp = hws->ctx->dc->res_pool->dpps[dpp_inst];
|
||||
if (dpp && dpp->funcs->dpp_force_disable_cursor)
|
||||
dpp->funcs->dpp_force_disable_cursor(dpp);
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (REG(DOMAIN1_PG_CONFIG) == 0)
|
||||
return;
|
||||
|
||||
switch (dpp_inst) {
|
||||
case 0: /* DPP0 */
|
||||
REG_UPDATE(DOMAIN1_PG_CONFIG,
|
||||
DOMAIN1_POWER_GATE, power_gate);
|
||||
|
||||
REG_WAIT(DOMAIN1_PG_STATUS,
|
||||
DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
|
||||
1, 1000);
|
||||
break;
|
||||
case 1: /* DPP1 */
|
||||
REG_UPDATE(DOMAIN3_PG_CONFIG,
|
||||
DOMAIN3_POWER_GATE, power_gate);
|
||||
|
||||
REG_WAIT(DOMAIN3_PG_STATUS,
|
||||
DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
|
||||
1, 1000);
|
||||
break;
|
||||
case 2: /* DPP2 */
|
||||
REG_UPDATE(DOMAIN5_PG_CONFIG,
|
||||
DOMAIN5_POWER_GATE, power_gate);
|
||||
|
||||
REG_WAIT(DOMAIN5_PG_STATUS,
|
||||
DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
|
||||
1, 1000);
|
||||
break;
|
||||
case 3: /* DPP3 */
|
||||
REG_UPDATE(DOMAIN7_PG_CONFIG,
|
||||
DOMAIN7_POWER_GATE, power_gate);
|
||||
|
||||
REG_WAIT(DOMAIN7_PG_STATUS,
|
||||
DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
|
||||
1, 1000);
|
||||
break;
|
||||
default:
|
||||
BREAK_TO_DEBUGGER();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,4 +47,6 @@ void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst,
|
||||
|
||||
void dcn314_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal);
|
||||
|
||||
void dcn314_dpp_pg_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on);
|
||||
|
||||
#endif /* __DC_HWSS_DCN314_H__ */
|
||||
|
||||
@@ -141,6 +141,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
|
||||
.enable_power_gating_plane = dcn314_enable_power_gating_plane,
|
||||
.dpp_root_clock_control = dcn314_dpp_root_clock_control,
|
||||
.hubp_pg_control = dcn31_hubp_pg_control,
|
||||
.dpp_pg_control = dcn314_dpp_pg_control,
|
||||
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
|
||||
.update_odm = dcn314_update_odm,
|
||||
.dsc_pg_control = dcn314_dsc_pg_control,
|
||||
|
||||
@@ -349,6 +349,9 @@ struct dpp_funcs {
|
||||
struct dpp *dpp_base,
|
||||
enum dc_color_space color_space,
|
||||
struct dc_csc_transform cursor_csc_color_matrix);
|
||||
|
||||
void (*dpp_force_disable_cursor)(struct dpp *dpp_base);
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
||||
@@ -392,6 +392,17 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
|
||||
|
||||
gpiod_set_value_cansleep(pdata->enable_gpio, 1);
|
||||
|
||||
/*
|
||||
* After EN is deasserted and an external clock is detected, the bridge
|
||||
* will sample GPIO3:1 to determine its frequency. The driver will
|
||||
* overwrite this setting in ti_sn_bridge_set_refclk_freq(). But this is
|
||||
* racy. Thus we have to wait a couple of us. According to the datasheet
|
||||
* the GPIO lines has to be stable at least 5 us (td5) but it seems that
|
||||
* is not enough and the refclk frequency value is still lost or
|
||||
* overwritten by the bridge itself. Waiting for 20us seems to work.
|
||||
*/
|
||||
usleep_range(20, 30);
|
||||
|
||||
/*
|
||||
* If we have a reference clock we can enable communication w/ the
|
||||
* panel (including the aux channel) w/out any need for an input clock
|
||||
|
||||
@@ -18,7 +18,7 @@ gv100_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
|
||||
struct nvif_push *push = &chan->chan.push;
|
||||
int ret;
|
||||
|
||||
ret = PUSH_WAIT(push, 8);
|
||||
ret = PUSH_WAIT(push, 13);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -32,6 +32,11 @@ gv100_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
|
||||
NVDEF(NVC36F, SEM_EXECUTE, PAYLOAD_SIZE, 32BIT) |
|
||||
NVDEF(NVC36F, SEM_EXECUTE, RELEASE_TIMESTAMP, DIS));
|
||||
|
||||
PUSH_MTHD(push, NVC36F, MEM_OP_A, 0,
|
||||
MEM_OP_B, 0,
|
||||
MEM_OP_C, NVDEF(NVC36F, MEM_OP_C, MEMBAR_TYPE, SYS_MEMBAR),
|
||||
MEM_OP_D, NVDEF(NVC36F, MEM_OP_D, OPERATION, MEMBAR));
|
||||
|
||||
PUSH_MTHD(push, NVC36F, NON_STALL_INTERRUPT, 0);
|
||||
|
||||
PUSH_KICK(push);
|
||||
|
||||
@@ -7,6 +7,91 @@
|
||||
|
||||
#define NVC36F_NON_STALL_INTERRUPT (0x00000020)
|
||||
#define NVC36F_NON_STALL_INTERRUPT_HANDLE 31:0
|
||||
// NOTE - MEM_OP_A and MEM_OP_B have been replaced in gp100 with methods for
|
||||
// specifying the page address for a targeted TLB invalidate and the uTLB for
|
||||
// a targeted REPLAY_CANCEL for UVM.
|
||||
// The previous MEM_OP_A/B functionality is in MEM_OP_C/D, with slightly
|
||||
// rearranged fields.
|
||||
#define NVC36F_MEM_OP_A (0x00000028)
|
||||
#define NVC36F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_CLIENT_UNIT_ID 5:0 // only relevant for REPLAY_CANCEL_TARGETED
|
||||
#define NVC36F_MEM_OP_A_TLB_INVALIDATE_INVALIDATION_SIZE 5:0 // Used to specify size of invalidate, used for invalidates which are not of the REPLAY_CANCEL_TARGETED type
|
||||
#define NVC36F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_GPC_ID 10:6 // only relevant for REPLAY_CANCEL_TARGETED
|
||||
#define NVC36F_MEM_OP_A_TLB_INVALIDATE_CANCEL_MMU_ENGINE_ID 6:0 // only relevant for REPLAY_CANCEL_VA_GLOBAL
|
||||
#define NVC36F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR 11:11
|
||||
#define NVC36F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_EN 0x00000001
|
||||
#define NVC36F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_DIS 0x00000000
|
||||
#define NVC36F_MEM_OP_A_TLB_INVALIDATE_TARGET_ADDR_LO 31:12
|
||||
#define NVC36F_MEM_OP_B (0x0000002c)
|
||||
#define NVC36F_MEM_OP_B_TLB_INVALIDATE_TARGET_ADDR_HI 31:0
|
||||
#define NVC36F_MEM_OP_C (0x00000030)
|
||||
#define NVC36F_MEM_OP_C_MEMBAR_TYPE 2:0
|
||||
#define NVC36F_MEM_OP_C_MEMBAR_TYPE_SYS_MEMBAR 0x00000000
|
||||
#define NVC36F_MEM_OP_C_MEMBAR_TYPE_MEMBAR 0x00000001
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB 0:0
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE 0x00000000
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL 0x00000001 // Probably nonsensical for MMU_TLB_INVALIDATE_TARGETED
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_GPC 1:1
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_GPC_ENABLE 0x00000000
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE 0x00000001
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY 4:2 // only relevant if GPC ENABLE
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE 0x00000000
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START 0x00000001
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START_ACK_ALL 0x00000002
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_TARGETED 0x00000003
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_GLOBAL 0x00000004
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_VA_GLOBAL 0x00000005
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE 6:5 // only relevant if GPC ENABLE
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_NONE 0x00000000
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_GLOBALLY 0x00000001
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_INTRANODE 0x00000002
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE 9:7 //only relevant for REPLAY_CANCEL_VA_GLOBAL
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_READ 0
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE 1
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_STRONG 2
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_RSVRVD 3
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_WEAK 4
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_ALL 5
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE_AND_ATOMIC 6
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ALL 7
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL 9:7 // Invalidate affects this level and all below
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_ALL 0x00000000 // Invalidate tlb caches at all levels of the page table
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_PTE_ONLY 0x00000001
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE0 0x00000002
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE1 0x00000003
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE2 0x00000004
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 0x00000005
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE4 0x00000006
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE5 0x00000007
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE 11:10 // only relevant if PDB_ONE
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_VID_MEM 0x00000000
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_COHERENT 0x00000002
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_NONCOHERENT 0x00000003
|
||||
#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_ADDR_LO 31:12 // only relevant if PDB_ONE
|
||||
#define NVC36F_MEM_OP_C_ACCESS_COUNTER_CLR_TARGETED_NOTIFY_TAG 19:0
|
||||
// MEM_OP_D MUST be preceded by MEM_OPs A-C.
|
||||
#define NVC36F_MEM_OP_D (0x00000034)
|
||||
#define NVC36F_MEM_OP_D_TLB_INVALIDATE_PDB_ADDR_HI 26:0 // only relevant if PDB_ONE
|
||||
#define NVC36F_MEM_OP_D_OPERATION 31:27
|
||||
#define NVC36F_MEM_OP_D_OPERATION_MEMBAR 0x00000005
|
||||
#define NVC36F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE 0x00000009
|
||||
#define NVC36F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE_TARGETED 0x0000000a
|
||||
#define NVC36F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d
|
||||
#define NVC36F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e
|
||||
// CLEAN_LINES is an alias for Tegra/GPU IP usage
|
||||
#define NVC36F_MEM_OP_B_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e
|
||||
#define NVC36F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f
|
||||
#define NVC36F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010
|
||||
#define NVC36F_MEM_OP_D_OPERATION_L2_WAIT_FOR_SYS_PENDING_READS 0x00000015
|
||||
#define NVC36F_MEM_OP_D_OPERATION_ACCESS_COUNTER_CLR 0x00000016
|
||||
#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE 1:0
|
||||
#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MIMC 0x00000000
|
||||
#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MOMC 0x00000001
|
||||
#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_ALL 0x00000002
|
||||
#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_TARGETED 0x00000003
|
||||
#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE 2:2
|
||||
#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MIMC 0x00000000
|
||||
#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MOMC 0x00000001
|
||||
#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_BANK 6:3
|
||||
#define NVC36F_SEM_ADDR_LO (0x0000005c)
|
||||
#define NVC36F_SEM_ADDR_LO_OFFSET 31:2
|
||||
#define NVC36F_SEM_ADDR_HI (0x00000060)
|
||||
|
||||
@@ -350,6 +350,8 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
|
||||
nvkm_chid_unref(&fifo->chid);
|
||||
|
||||
nvkm_event_fini(&fifo->nonstall.event);
|
||||
if (fifo->func->nonstall_dtor)
|
||||
fifo->func->nonstall_dtor(fifo);
|
||||
mutex_destroy(&fifo->mutex);
|
||||
|
||||
if (fifo->func->dtor)
|
||||
|
||||
@@ -517,19 +517,11 @@ ga100_fifo_nonstall_intr(struct nvkm_inth *inth)
|
||||
static void
|
||||
ga100_fifo_nonstall_block(struct nvkm_event *event, int type, int index)
|
||||
{
|
||||
struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
|
||||
struct nvkm_runl *runl = nvkm_runl_get(fifo, index, 0);
|
||||
|
||||
nvkm_inth_block(&runl->nonstall.inth);
|
||||
}
|
||||
|
||||
static void
|
||||
ga100_fifo_nonstall_allow(struct nvkm_event *event, int type, int index)
|
||||
{
|
||||
struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
|
||||
struct nvkm_runl *runl = nvkm_runl_get(fifo, index, 0);
|
||||
|
||||
nvkm_inth_allow(&runl->nonstall.inth);
|
||||
}
|
||||
|
||||
const struct nvkm_event_func
|
||||
@@ -564,12 +556,26 @@ ga100_fifo_nonstall_ctor(struct nvkm_fifo *fifo)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_inth_allow(&runl->nonstall.inth);
|
||||
|
||||
nr = max(nr, runl->id + 1);
|
||||
}
|
||||
|
||||
return nr;
|
||||
}
|
||||
|
||||
void
|
||||
ga100_fifo_nonstall_dtor(struct nvkm_fifo *fifo)
|
||||
{
|
||||
struct nvkm_runl *runl;
|
||||
|
||||
nvkm_runl_foreach(runl, fifo) {
|
||||
if (runl->nonstall.vector < 0)
|
||||
continue;
|
||||
nvkm_inth_block(&runl->nonstall.inth);
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
ga100_fifo_runl_ctor(struct nvkm_fifo *fifo)
|
||||
{
|
||||
@@ -599,6 +605,7 @@ ga100_fifo = {
|
||||
.runl_ctor = ga100_fifo_runl_ctor,
|
||||
.mmu_fault = &tu102_fifo_mmu_fault,
|
||||
.nonstall_ctor = ga100_fifo_nonstall_ctor,
|
||||
.nonstall_dtor = ga100_fifo_nonstall_dtor,
|
||||
.nonstall = &ga100_fifo_nonstall,
|
||||
.runl = &ga100_runl,
|
||||
.runq = &ga100_runq,
|
||||
|
||||
@@ -30,6 +30,7 @@ ga102_fifo = {
|
||||
.runl_ctor = ga100_fifo_runl_ctor,
|
||||
.mmu_fault = &tu102_fifo_mmu_fault,
|
||||
.nonstall_ctor = ga100_fifo_nonstall_ctor,
|
||||
.nonstall_dtor = ga100_fifo_nonstall_dtor,
|
||||
.nonstall = &ga100_fifo_nonstall,
|
||||
.runl = &ga100_runl,
|
||||
.runq = &ga100_runq,
|
||||
|
||||
@@ -41,6 +41,7 @@ struct nvkm_fifo_func {
|
||||
void (*start)(struct nvkm_fifo *, unsigned long *);
|
||||
|
||||
int (*nonstall_ctor)(struct nvkm_fifo *);
|
||||
void (*nonstall_dtor)(struct nvkm_fifo *);
|
||||
const struct nvkm_event_func *nonstall;
|
||||
|
||||
const struct nvkm_runl_func *runl;
|
||||
@@ -200,6 +201,7 @@ u32 tu102_chan_doorbell_handle(struct nvkm_chan *);
|
||||
|
||||
int ga100_fifo_runl_ctor(struct nvkm_fifo *);
|
||||
int ga100_fifo_nonstall_ctor(struct nvkm_fifo *);
|
||||
void ga100_fifo_nonstall_dtor(struct nvkm_fifo *);
|
||||
extern const struct nvkm_event_func ga100_fifo_nonstall;
|
||||
extern const struct nvkm_runl_func ga100_runl;
|
||||
extern const struct nvkm_runq_func ga100_runq;
|
||||
|
||||
@@ -601,6 +601,7 @@ r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device,
|
||||
rm->chan.func = &r535_chan;
|
||||
rm->nonstall = &ga100_fifo_nonstall;
|
||||
rm->nonstall_ctor = ga100_fifo_nonstall_ctor;
|
||||
rm->nonstall_dtor = ga100_fifo_nonstall_dtor;
|
||||
|
||||
return nvkm_fifo_new_(rm, device, type, inst, pfifo);
|
||||
}
|
||||
|
||||
@@ -391,7 +391,8 @@ EXPORT_SYMBOL(drm_sched_entity_set_priority);
|
||||
* Add a callback to the current dependency of the entity to wake up the
|
||||
* scheduler when the entity becomes available.
|
||||
*/
|
||||
static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
|
||||
static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity,
|
||||
struct drm_sched_job *sched_job)
|
||||
{
|
||||
struct drm_gpu_scheduler *sched = entity->rq->sched;
|
||||
struct dma_fence *fence = entity->dependency;
|
||||
@@ -421,6 +422,10 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
|
||||
entity->dependency = fence;
|
||||
}
|
||||
|
||||
if (trace_drm_sched_job_unschedulable_enabled() &&
|
||||
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &entity->dependency->flags))
|
||||
trace_drm_sched_job_unschedulable(sched_job, entity->dependency);
|
||||
|
||||
if (!dma_fence_add_callback(entity->dependency, &entity->cb,
|
||||
drm_sched_entity_wakeup))
|
||||
return true;
|
||||
@@ -461,10 +466,8 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
|
||||
|
||||
while ((entity->dependency =
|
||||
drm_sched_job_dependency(sched_job, entity))) {
|
||||
if (drm_sched_entity_add_dependency_cb(entity)) {
|
||||
trace_drm_sched_job_unschedulable(sched_job, entity->dependency);
|
||||
if (drm_sched_entity_add_dependency_cb(entity, sched_job))
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* skip jobs from entity that marked guilty */
|
||||
|
||||
@@ -819,8 +819,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
|
||||
return ret;
|
||||
}
|
||||
|
||||
tt_has_data = ttm && (ttm_tt_is_populated(ttm) ||
|
||||
(ttm->page_flags & TTM_TT_FLAG_SWAPPED));
|
||||
tt_has_data = ttm && (ttm_tt_is_populated(ttm) || ttm_tt_is_swapped(ttm));
|
||||
|
||||
move_lacks_source = !old_mem || (handle_system_ccs ? (!bo->ccs_cleared) :
|
||||
(!mem_type_is_vram(old_mem_type) && !tt_has_data));
|
||||
|
||||
Reference in New Issue
Block a user