Pull drm fixes from Dave Airlie:
 "Weekly drm fixes, bit bigger than last week, but overall amdgpu/xe
  with some ivpu bits and a random few fixes, and dropping the
  ttm_backup struct which wrapped struct file and was recently
  frowned at.

  drm:
   - Fix overflow when generating wedged event

  ttm:
   - Fix documentation
   - Remove struct ttm_backup

  panel:
   - simple: Fix timings for AUO G101EVN010

  amdgpu:
   - DC FP fixes
   - Freesync fix
   - DMUB AUX fixes
   - VCN fix
   - Hibernation fixes
   - HDP fixes

  xe:
   - Prevent PF queue overflow
   - Hold all forcewake during mocs test
   - Remove GSC flush on reset path
   - Fix forcewake put on error path
   - Fix runtime warning when building without svm

  i915:
   - Fix oops on resume after disconnecting DP MST sinks during suspend
   - Fix SPLC num_waiters refcounting

  ivpu:
   - Increase timeouts
   - Fix deadlock in cmdq ioctl
   - Unlock mutices in correct order

  v3d:
   - Avoid memory leak in job handling"

* tag 'drm-fixes-2025-05-10' of https://gitlab.freedesktop.org/drm/kernel: (32 commits)
  drm/i915/dp: Fix determining SST/MST mode during MTP TU state computation
  drm/xe: Add config control for svm flush work
  drm/xe: Release force wake first then runtime power
  drm/xe/gsc: do not flush the GSC worker from the reset path
  drm/xe/tests/mocs: Hold XE_FORCEWAKE_ALL for LNCF regs
  drm/xe: Add page queue multiplier
  drm/amdgpu/hdp7: use memcfg register to post the write for HDP flush
  drm/amdgpu/hdp6: use memcfg register to post the write for HDP flush
  drm/amdgpu/hdp5.2: use memcfg register to post the write for HDP flush
  drm/amdgpu/hdp5: use memcfg register to post the write for HDP flush
  drm/amdgpu/hdp4: use memcfg register to post the write for HDP flush
  drm/amdgpu: fix pm notifier handling
  Revert "drm/amd: Stop evicting resources on APUs in suspend"
  drm/amdgpu/vcn: using separate VCN1_AON_SOC offset
  drm/amd/display: Fix wrong handling for AUX_DEFER case
  drm/amd/display: Copy AUX read reply data whenever length > 0
  drm/amd/display: Remove incorrect checking in dmub aux handler
  drm/amd/display: Fix the checking condition in dmub aux handling
  drm/amd/display: Shift DMUB AUX reply command if necessary
  drm/amd/display: Call FP Protect Before Mode Programming/Mode Support
  ...
This commit is contained in:
Linus Torvalds
2025-05-09 12:41:34 -07:00
47 changed files with 285 additions and 195 deletions

View File

@@ -119,7 +119,7 @@ static void timeouts_init(struct ivpu_device *vdev)
else
vdev->timeout.autosuspend = 100;
vdev->timeout.d0i3_entry_msg = 5;
vdev->timeout.state_dump_msg = 10;
vdev->timeout.state_dump_msg = 100;
}
}

View File

@@ -681,8 +681,8 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
err_erase_xa:
xa_erase(&vdev->submitted_jobs_xa, job->job_id);
err_unlock:
mutex_unlock(&vdev->submitted_jobs_lock);
mutex_unlock(&file_priv->lock);
mutex_unlock(&vdev->submitted_jobs_lock);
ivpu_rpm_put(vdev);
return ret;
}
@@ -874,15 +874,21 @@ int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *
int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev;
struct drm_ivpu_cmdq_create *args = data;
struct ivpu_cmdq *cmdq;
int ret;
if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
return -ENODEV;
if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
return -EINVAL;
ret = ivpu_rpm_get(vdev);
if (ret < 0)
return ret;
mutex_lock(&file_priv->lock);
cmdq = ivpu_cmdq_create(file_priv, ivpu_job_to_jsm_priority(args->priority), false);
@@ -891,6 +897,8 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
mutex_unlock(&file_priv->lock);
ivpu_rpm_put(vdev);
return cmdq ? 0 : -ENOMEM;
}
@@ -900,28 +908,35 @@ int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file
struct ivpu_device *vdev = file_priv->vdev;
struct drm_ivpu_cmdq_destroy *args = data;
struct ivpu_cmdq *cmdq;
u32 cmdq_id;
u32 cmdq_id = 0;
int ret;
if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
return -ENODEV;
ret = ivpu_rpm_get(vdev);
if (ret < 0)
return ret;
mutex_lock(&file_priv->lock);
cmdq = xa_load(&file_priv->cmdq_xa, args->cmdq_id);
if (!cmdq || cmdq->is_legacy) {
ret = -ENOENT;
goto err_unlock;
} else {
cmdq_id = cmdq->id;
ivpu_cmdq_destroy(file_priv, cmdq);
ret = 0;
}
cmdq_id = cmdq->id;
ivpu_cmdq_destroy(file_priv, cmdq);
mutex_unlock(&file_priv->lock);
ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);
return 0;
err_unlock:
mutex_unlock(&file_priv->lock);
/* Abort any pending jobs only if cmdq was destroyed */
if (!ret)
ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);
ivpu_rpm_put(vdev);
return ret;
}

View File

@@ -1614,11 +1614,9 @@ static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_cap
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
void amdgpu_choose_low_power_state(struct amdgpu_device *adev);
#else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
#endif
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);

View File

@@ -1533,22 +1533,4 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
#endif /* CONFIG_AMD_PMC */
}
/**
* amdgpu_choose_low_power_state
*
* @adev: amdgpu_device_pointer
*
* Choose the target low power state for the GPU
*/
void amdgpu_choose_low_power_state(struct amdgpu_device *adev)
{
if (adev->in_runpm)
return;
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
else if (amdgpu_acpi_is_s3_active(adev))
adev->in_s3 = true;
}
#endif /* CONFIG_SUSPEND */

View File

@@ -4907,28 +4907,20 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
* @data: data
*
* This function is called when the system is about to suspend or hibernate.
* It is used to evict resources from the device before the system goes to
* sleep while there is still access to swap.
* It is used to set the appropriate flags so that eviction can be optimized
* in the pm prepare callback.
*/
static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
void *data)
{
struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb);
int r;
switch (mode) {
case PM_HIBERNATION_PREPARE:
adev->in_s4 = true;
fallthrough;
case PM_SUSPEND_PREPARE:
r = amdgpu_device_evict_resources(adev);
/*
* This is considered non-fatal at this time because
* amdgpu_device_prepare() will also fatally evict resources.
* See https://gitlab.freedesktop.org/drm/amd/-/issues/3781
*/
if (r)
drm_warn(adev_to_drm(adev), "Failed to evict resources, freeze active processes if problems occur: %d\n", r);
break;
case PM_POST_HIBERNATION:
adev->in_s4 = false;
break;
}
@@ -4949,15 +4941,13 @@ int amdgpu_device_prepare(struct drm_device *dev)
struct amdgpu_device *adev = drm_to_adev(dev);
int i, r;
amdgpu_choose_low_power_state(adev);
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
/* Evict the majority of BOs before starting suspend sequence */
r = amdgpu_device_evict_resources(adev);
if (r)
goto unprepare;
return r;
flush_delayed_work(&adev->gfx.gfx_off_delay_work);
@@ -4968,15 +4958,10 @@ int amdgpu_device_prepare(struct drm_device *dev)
continue;
r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]);
if (r)
goto unprepare;
return r;
}
return 0;
unprepare:
adev->in_s0ix = adev->in_s3 = adev->in_s4 = false;
return r;
}
/**

View File

@@ -2615,13 +2615,8 @@ static int amdgpu_pmops_freeze(struct device *dev)
static int amdgpu_pmops_thaw(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
r = amdgpu_device_resume(drm_dev, true);
adev->in_s4 = false;
return r;
return amdgpu_device_resume(drm_dev, true);
}
static int amdgpu_pmops_poweroff(struct device *dev)
@@ -2634,9 +2629,6 @@ static int amdgpu_pmops_poweroff(struct device *dev)
static int amdgpu_pmops_restore(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
adev->in_s4 = false;
return amdgpu_device_resume(drm_dev, true);
}

View File

@@ -66,7 +66,6 @@
#define VCN_ENC_CMD_REG_WAIT 0x0000000c
#define VCN_AON_SOC_ADDRESS_2_0 0x1f800
#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define VCN_VID_IP_ADDRESS_2_0 0x0
#define VCN_AON_IP_ADDRESS_2_0 0x30000

View File

@@ -41,7 +41,12 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
/* We just need to read back a register to post the write.
* Reading back the remapped register causes problems on
* some platforms so just read back the memory size register.
*/
if (adev->nbio.funcs->get_memsize)
adev->nbio.funcs->get_memsize(adev);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}

View File

@@ -32,7 +32,12 @@ static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
/* We just need to read back a register to post the write.
* Reading back the remapped register causes problems on
* some platforms so just read back the memory size register.
*/
if (adev->nbio.funcs->get_memsize)
adev->nbio.funcs->get_memsize(adev);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}

View File

@@ -33,7 +33,17 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
if (!ring || !ring->funcs->emit_wreg) {
WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
0);
RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
if (amdgpu_sriov_vf(adev)) {
/* this is fine because SR_IOV doesn't remap the register */
RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
} else {
/* We just need to read back a register to post the write.
* Reading back the remapped register causes problems on
* some platforms so just read back the memory size register.
*/
if (adev->nbio.funcs->get_memsize)
adev->nbio.funcs->get_memsize(adev);
}
} else {
amdgpu_ring_emit_wreg(ring,
(adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,

View File

@@ -35,7 +35,12 @@ static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
/* We just need to read back a register to post the write.
* Reading back the remapped register causes problems on
* some platforms so just read back the memory size register.
*/
if (adev->nbio.funcs->get_memsize)
adev->nbio.funcs->get_memsize(adev);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}

View File

@@ -32,7 +32,12 @@ static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
/* We just need to read back a register to post the write.
* Reading back the remapped register causes problems on
* some platforms so just read back the memory size register.
*/
if (adev->nbio.funcs->get_memsize)
adev->nbio.funcs->get_memsize(adev);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}

View File

@@ -39,6 +39,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x1fd
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x503

View File

@@ -39,6 +39,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f

View File

@@ -40,6 +40,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f

View File

@@ -46,6 +46,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define VCN_HARVEST_MMSCH 0
@@ -614,7 +615,8 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
adev->gfx.config.gb_addr_config, 0, indirect);
}
/**

View File

@@ -45,6 +45,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = {
SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),

View File

@@ -46,6 +46,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 (0x48300 + 0x38000)
#define VCN1_AON_SOC_ADDRESS_3_0 (0x48000 + 0x38000)
#define VCN_HARVEST_MMSCH 0

View File

@@ -533,7 +533,8 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
/* VCN global tiling registers */
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
adev->gfx.config.gb_addr_config, 0, indirect);
return;
}

View File

@@ -673,15 +673,21 @@ static void dm_crtc_high_irq(void *interrupt_params)
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
if (acrtc->dm_irq_params.stream &&
acrtc->dm_irq_params.vrr_params.supported &&
acrtc->dm_irq_params.freesync_config.state ==
VRR_STATE_ACTIVE_VARIABLE) {
acrtc->dm_irq_params.vrr_params.supported) {
bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
mod_freesync_handle_v_update(adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params.adjust);
/* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */
if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
dc_stream_adjust_vmin_vmax(adev->dm.dc,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params.adjust);
}
}
/*
@@ -12743,7 +12749,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
* Transient states before tunneling is enabled could
* lead to this error. We can ignore this for now.
*/
if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) {
DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
payload->address, payload->length,
p_notify->result);
@@ -12752,22 +12758,14 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
goto out;
}
payload->reply[0] = adev->dm.dmub_notify->aux_reply.command & 0xF;
if (adev->dm.dmub_notify->aux_reply.command & 0xF0)
/* The reply is stored in the top nibble of the command. */
payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF;
payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
if (!payload->write && p_notify->aux_reply.length &&
(payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
if (payload->length != p_notify->aux_reply.length) {
DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
p_notify->aux_reply.length,
payload->address, payload->length);
*operation_result = AUX_RET_ERROR_INVALID_REPLY;
goto out;
}
if (!payload->write && p_notify->aux_reply.length)
memcpy(payload->data, p_notify->aux_reply.data,
p_notify->aux_reply.length);
}
/* success */
ret = p_notify->aux_reply.length;

View File

@@ -51,6 +51,9 @@
#define PEAK_FACTOR_X1000 1006
/*
* This function handles both native AUX and I2C-Over-AUX transactions.
*/
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
@@ -87,15 +90,25 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
if (adev->dm.aux_hpd_discon_quirk) {
if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE &&
operation_result == AUX_RET_ERROR_HPD_DISCON) {
result = 0;
result = msg->size;
operation_result = AUX_RET_SUCCESS;
}
}
if (payload.write && result >= 0)
result = msg->size;
/*
* result equals to 0 includes the cases of AUX_DEFER/I2C_DEFER
*/
if (payload.write && result >= 0) {
if (result) {
/*one byte indicating partially written bytes. Force 0 to retry*/
drm_info(adev_to_drm(adev), "amdgpu: AUX partially written\n");
result = 0;
} else if (!payload.reply[0])
/*I2C_ACK|AUX_ACK*/
result = msg->size;
}
if (result < 0)
if (result < 0) {
switch (operation_result) {
case AUX_RET_SUCCESS:
break;
@@ -114,6 +127,13 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
break;
}
drm_info(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result);
}
if (payload.reply[0])
drm_info(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.",
payload.reply[0]);
return result;
}

View File

@@ -234,7 +234,9 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
if (!result)
return false;
DC_FP_START();
result = dml2_build_mode_programming(mode_programming);
DC_FP_END();
if (!result)
return false;
@@ -277,7 +279,9 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co
mode_support->dml2_instance = dml_init->dml2_instance;
dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming;
DC_FP_START();
is_supported = dml2_check_mode_supported(mode_support);
DC_FP_END();
if (!is_supported)
return false;
@@ -288,16 +292,12 @@ bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml
{
bool out = false;
DC_FP_START();
/* Use dml_validate_only for fast_validate path */
if (fast_validate)
out = dml21_check_mode_support(in_dc, context, dml_ctx);
else
out = dml21_mode_check_and_programming(in_dc, context, dml_ctx);
DC_FP_END();
return out;
}

View File

@@ -973,7 +973,9 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p
}
}
static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context, struct scaler_data *out)
static struct scaler_data *get_scaler_data_for_plane(
const struct dc_plane_state *in,
struct dc_state *context)
{
int i;
struct pipe_ctx *temp_pipe = &context->res_ctx.temp_pipe;
@@ -994,7 +996,7 @@ static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc
}
ASSERT(i < MAX_PIPES);
memcpy(out, &temp_pipe->plane_res.scl_data, sizeof(*out));
return &temp_pipe->plane_res.scl_data;
}
static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location,
@@ -1057,11 +1059,7 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out
const struct dc_plane_state *in, struct dc_state *context,
const struct soc_bounding_box_st *soc)
{
struct scaler_data *scaler_data = kzalloc(sizeof(*scaler_data), GFP_KERNEL);
if (!scaler_data)
return;
get_scaler_data_for_plane(in, context, scaler_data);
struct scaler_data *scaler_data = get_scaler_data_for_plane(in, context);
out->CursorBPP[location] = dml_cur_32bit;
out->CursorWidth[location] = 256;
@@ -1126,8 +1124,6 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out
out->DynamicMetadataTransmittedBytes[location] = 0;
out->NumberOfCursors[location] = 1;
kfree(scaler_data);
}
static unsigned int map_stream_to_dml_display_cfg(const struct dml2_context *dml2,

View File

@@ -2114,8 +2114,6 @@ static bool dcn32_resource_construct(
#define REG_STRUCT dccg_regs
dccg_regs_init();
DC_FP_START();
ctx->dc_bios->regs = &bios_regs;
pool->base.res_cap = &res_cap_dcn32;
@@ -2501,14 +2499,10 @@ static bool dcn32_resource_construct(
if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0))
dc->config.sdpif_request_limit_words_per_umc = 16;
DC_FP_END();
return true;
create_fail:
DC_FP_END();
dcn32_resource_destruct(pool);
return false;

View File

@@ -549,7 +549,7 @@ int drm_dev_wedged_event(struct drm_device *dev, unsigned long method)
if (drm_WARN_ONCE(dev, !recovery, "invalid recovery method %u\n", opt))
break;
len += scnprintf(event_string + len, sizeof(event_string), "%s,", recovery);
len += scnprintf(event_string + len, sizeof(event_string) - len, "%s,", recovery);
}
if (recovery)

View File

@@ -242,7 +242,7 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
bool is_mst = intel_dp->is_mst;
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int bpp_x16, slots = -EINVAL;
int dsc_slice_count = 0;
int max_dpt_bpp_x16;

View File

@@ -1001,6 +1001,10 @@ void intel_rps_dec_waiters(struct intel_rps *rps)
if (rps_uses_slpc(rps)) {
slpc = rps_to_slpc(rps);
/* Don't decrement num_waiters for req where increment was skipped */
if (slpc->power_profile == SLPC_POWER_PROFILES_POWER_SAVING)
return;
intel_guc_slpc_dec_waiters(slpc);
} else {
atomic_dec(&rps->num_waiters);
@@ -1029,11 +1033,15 @@ void intel_rps_boost(struct i915_request *rq)
if (slpc->power_profile == SLPC_POWER_PROFILES_POWER_SAVING)
return;
if (slpc->min_freq_softlimit >= slpc->boost_freq)
return;
/* Return if old value is non zero */
if (!atomic_fetch_inc(&slpc->num_waiters)) {
/*
* Skip queuing boost work if frequency is already boosted,
* but still increment num_waiters.
*/
if (slpc->min_freq_softlimit >= slpc->boost_freq)
return;
GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
rq->fence.context, rq->fence.seqno);
queue_work(rps_to_gt(rps)->i915->unordered_wq,

View File

@@ -1027,27 +1027,28 @@ static const struct panel_desc auo_g070vvn01 = {
},
};
static const struct drm_display_mode auo_g101evn010_mode = {
.clock = 68930,
.hdisplay = 1280,
.hsync_start = 1280 + 82,
.hsync_end = 1280 + 82 + 2,
.htotal = 1280 + 82 + 2 + 84,
.vdisplay = 800,
.vsync_start = 800 + 8,
.vsync_end = 800 + 8 + 2,
.vtotal = 800 + 8 + 2 + 6,
static const struct display_timing auo_g101evn010_timing = {
.pixelclock = { 64000000, 68930000, 85000000 },
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 8, 64, 256 },
.hback_porch = { 8, 64, 256 },
.hsync_len = { 40, 168, 767 },
.vactive = { 800, 800, 800 },
.vfront_porch = { 4, 8, 100 },
.vback_porch = { 4, 8, 100 },
.vsync_len = { 8, 16, 223 },
};
static const struct panel_desc auo_g101evn010 = {
.modes = &auo_g101evn010_mode,
.num_modes = 1,
.timings = &auo_g101evn010_timing,
.num_timings = 1,
.bpc = 6,
.size = {
.width = 216,
.height = 135,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};

View File

@@ -7,20 +7,6 @@
#include <linux/page-flags.h>
#include <linux/swap.h>
/*
* Casting from randomized struct file * to struct ttm_backup * is fine since
* struct ttm_backup is never defined nor dereferenced.
*/
static struct file *ttm_backup_to_file(struct ttm_backup *backup)
{
return (void *)backup;
}
static struct ttm_backup *ttm_file_to_backup(struct file *file)
{
return (void *)file;
}
/*
* Need to map shmem indices to handle since a handle value
* of 0 means error, following the swp_entry_t convention.
@@ -40,12 +26,12 @@ static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle)
* @backup: The struct backup pointer used to obtain the handle
* @handle: The handle obtained from the @backup_page function.
*/
void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle)
void ttm_backup_drop(struct file *backup, pgoff_t handle)
{
loff_t start = ttm_backup_handle_to_shmem_idx(handle);
start <<= PAGE_SHIFT;
shmem_truncate_range(file_inode(ttm_backup_to_file(backup)), start,
shmem_truncate_range(file_inode(backup), start,
start + PAGE_SIZE - 1);
}
@@ -55,16 +41,15 @@ void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle)
* @backup: The struct backup pointer used to back up the page.
* @dst: The struct page to copy into.
* @handle: The handle returned when the page was backed up.
* @intr: Try to perform waits interruptable or at least killable.
* @intr: Try to perform waits interruptible or at least killable.
*
* Return: 0 on success, Negative error code on failure, notably
* -EINTR if @intr was set to true and a signal is pending.
*/
int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
int ttm_backup_copy_page(struct file *backup, struct page *dst,
pgoff_t handle, bool intr)
{
struct file *filp = ttm_backup_to_file(backup);
struct address_space *mapping = filp->f_mapping;
struct address_space *mapping = backup->f_mapping;
struct folio *from_folio;
pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle);
@@ -106,12 +91,11 @@ int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
* the folio size- and usage.
*/
s64
ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
ttm_backup_backup_page(struct file *backup, struct page *page,
bool writeback, pgoff_t idx, gfp_t page_gfp,
gfp_t alloc_gfp)
{
struct file *filp = ttm_backup_to_file(backup);
struct address_space *mapping = filp->f_mapping;
struct address_space *mapping = backup->f_mapping;
unsigned long handle = 0;
struct folio *to_folio;
int ret;
@@ -161,9 +145,9 @@ ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
*
* After a call to this function, it's illegal to use the @backup pointer.
*/
void ttm_backup_fini(struct ttm_backup *backup)
void ttm_backup_fini(struct file *backup)
{
fput(ttm_backup_to_file(backup));
fput(backup);
}
/**
@@ -194,14 +178,10 @@ EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail);
*
* Create a backup utilizing shmem objects.
*
* Return: A pointer to a struct ttm_backup on success,
* Return: A pointer to a struct file on success,
* an error pointer on error.
*/
struct ttm_backup *ttm_backup_shmem_create(loff_t size)
struct file *ttm_backup_shmem_create(loff_t size)
{
struct file *filp;
filp = shmem_file_setup("ttm shmem backup", size, 0);
return ttm_file_to_backup(filp);
return shmem_file_setup("ttm shmem backup", size, 0);
}

View File

@@ -506,7 +506,7 @@ static void ttm_pool_allocated_page_commit(struct page *allocated,
* if successful, populate the page-table and dma-address arrays.
*/
static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore,
struct ttm_backup *backup,
struct file *backup,
const struct ttm_operation_ctx *ctx,
struct ttm_pool_alloc_state *alloc)
@@ -655,7 +655,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
pgoff_t start_page, pgoff_t end_page)
{
struct page **pages = &tt->pages[start_page];
struct ttm_backup *backup = tt->backup;
struct file *backup = tt->backup;
pgoff_t i, nr;
for (i = start_page; i < end_page; i += nr, pages += nr) {
@@ -963,7 +963,7 @@ void ttm_pool_drop_backed_up(struct ttm_tt *tt)
long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
const struct ttm_backup_flags *flags)
{
struct ttm_backup *backup = tt->backup;
struct file *backup = tt->backup;
struct page *page;
unsigned long handle;
gfp_t alloc_gfp;

View File

@@ -544,7 +544,7 @@ EXPORT_SYMBOL(ttm_tt_pages_limit);
*/
int ttm_tt_setup_backup(struct ttm_tt *tt)
{
struct ttm_backup *backup =
struct file *backup =
ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT);
if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)))

View File

@@ -744,11 +744,16 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
return DRM_GPU_SCHED_STAT_NOMINAL;
}
/* If the current address or return address have changed, then the GPU
* has probably made progress and we should delay the reset. This
* could fail if the GPU got in an infinite loop in the CL, but that
* is pretty unlikely outside of an i-g-t testcase.
*/
static void
v3d_sched_skip_reset(struct drm_sched_job *sched_job)
{
struct drm_gpu_scheduler *sched = sched_job->sched;
spin_lock(&sched->job_list_lock);
list_add(&sched_job->list, &sched->pending_list);
spin_unlock(&sched->job_list_lock);
}
static enum drm_gpu_sched_stat
v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
u32 *timedout_ctca, u32 *timedout_ctra)
@@ -758,9 +763,16 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q));
u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q));
/* If the current address or return address have changed, then the GPU
* has probably made progress and we should delay the reset. This
* could fail if the GPU got in an infinite loop in the CL, but that
* is pretty unlikely outside of an i-g-t testcase.
*/
if (*timedout_ctca != ctca || *timedout_ctra != ctra) {
*timedout_ctca = ctca;
*timedout_ctra = ctra;
v3d_sched_skip_reset(sched_job);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
@@ -800,11 +812,13 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
struct v3d_dev *v3d = job->base.v3d;
u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4(v3d->ver));
/* If we've made progress, skip reset and let the timer get
* rearmed.
/* If we've made progress, skip reset, add the job to the pending
* list, and let the timer get rearmed.
*/
if (job->timedout_batches != batches) {
job->timedout_batches = batches;
v3d_sched_skip_reset(sched_job);
return DRM_GPU_SCHED_STAT_NOMINAL;
}

View File

@@ -46,8 +46,11 @@ static void read_l3cc_table(struct xe_gt *gt,
unsigned int fw_ref, i;
u32 reg_val;
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n");
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
xe_force_wake_put(gt_to_fw(gt), fw_ref);
KUNIT_ASSERT_TRUE_MSG(test, true, "Forcewake Failed.\n");
}
for (i = 0; i < info->num_mocs_regs; i++) {
if (!(i & 1)) {

View File

@@ -555,6 +555,28 @@ void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc)
flush_work(&gsc->work);
}
void xe_gsc_stop_prepare(struct xe_gsc *gsc)
{
struct xe_gt *gt = gsc_to_gt(gsc);
int ret;
if (!xe_uc_fw_is_loadable(&gsc->fw) || xe_uc_fw_is_in_error_state(&gsc->fw))
return;
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GSC);
/*
* If the GSC FW load or the proxy init are interrupted, the only way
* to recover it is to do an FLR and reload the GSC from scratch.
* Therefore, let's wait for the init to complete before stopping
* operations. The proxy init is the last step, so we can just wait on
* that
*/
ret = xe_gsc_wait_for_proxy_init_done(gsc);
if (ret)
xe_gt_err(gt, "failed to wait for GSC init completion before uc stop\n");
}
/*
* wa_14015076503: if the GSC FW is loaded, we need to alert it before doing a
* GSC engine reset by writing a notification bit in the GS1 register and then

View File

@@ -16,6 +16,7 @@ struct xe_hw_engine;
int xe_gsc_init(struct xe_gsc *gsc);
int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc);
void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc);
void xe_gsc_stop_prepare(struct xe_gsc *gsc);
void xe_gsc_load_start(struct xe_gsc *gsc);
void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec);

View File

@@ -71,6 +71,17 @@ bool xe_gsc_proxy_init_done(struct xe_gsc *gsc)
HECI1_FWSTS1_PROXY_STATE_NORMAL;
}
int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc)
{
struct xe_gt *gt = gsc_to_gt(gsc);
/* Proxy init can take up to 500ms, so wait double that for safety */
return xe_mmio_wait32(&gt->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE),
HECI1_FWSTS1_CURRENT_STATE,
HECI1_FWSTS1_PROXY_STATE_NORMAL,
USEC_PER_SEC, NULL, false);
}
static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set)
{
struct xe_gt *gt = gsc_to_gt(gsc);

View File

@@ -12,6 +12,7 @@ struct xe_gsc;
int xe_gsc_proxy_init(struct xe_gsc *gsc);
bool xe_gsc_proxy_init_done(struct xe_gsc *gsc);
int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc);
int xe_gsc_proxy_start(struct xe_gsc *gsc);
int xe_gsc_proxy_request_handler(struct xe_gsc *gsc);

View File

@@ -857,7 +857,7 @@ void xe_gt_suspend_prepare(struct xe_gt *gt)
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
xe_uc_stop_prepare(&gt->uc);
xe_uc_suspend_prepare(&gt->uc);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
}

View File

@@ -92,22 +92,23 @@ static int hw_engines(struct xe_gt *gt, struct drm_printer *p)
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
unsigned int fw_ref;
int ret = 0;
xe_pm_runtime_get(xe);
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
xe_pm_runtime_put(xe);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
return -ETIMEDOUT;
ret = -ETIMEDOUT;
goto fw_put;
}
for_each_hw_engine(hwe, gt, id)
xe_hw_engine_print(hwe, p);
fw_put:
xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_pm_runtime_put(xe);
return 0;
return ret;
}
static int powergate_info(struct xe_gt *gt, struct drm_printer *p)

View File

@@ -435,9 +435,16 @@ static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss,
XE_MAX_EU_FUSE_BITS) * num_dss;
/* user can issue separate page faults per EU and per CS */
/*
* user can issue separate page faults per EU and per CS
*
* XXX: Multiplier required as compute UMD are getting PF queue errors
* without it. Follow on why this multiplier is required.
*/
#define PF_MULTIPLIER 8
pf_queue->num_dw =
(num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW;
(num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW * PF_MULTIPLIER;
#undef PF_MULTIPLIER
pf_queue->gt = gt;
pf_queue->data = devm_kcalloc(xe->drm.dev, pf_queue->num_dw,

View File

@@ -947,3 +947,15 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
return 0;
}
#endif
/**
* xe_svm_flush() - SVM flush
* @vm: The VM.
*
* Flush all SVM actions.
*/
void xe_svm_flush(struct xe_vm *vm)
{
if (xe_vm_in_fault_mode(vm))
flush_work(&vm->svm.garbage_collector.work);
}

View File

@@ -72,6 +72,9 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
int xe_svm_bo_evict(struct xe_bo *bo);
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
void xe_svm_flush(struct xe_vm *vm);
#else
static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
{
@@ -124,6 +127,11 @@ static inline
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
{
}
static inline void xe_svm_flush(struct xe_vm *vm)
{
}
#endif
/**

View File

@@ -244,7 +244,7 @@ void xe_uc_gucrc_disable(struct xe_uc *uc)
void xe_uc_stop_prepare(struct xe_uc *uc)
{
xe_gsc_wait_for_worker_completion(&uc->gsc);
xe_gsc_stop_prepare(&uc->gsc);
xe_guc_stop_prepare(&uc->guc);
}
@@ -278,6 +278,12 @@ static void uc_reset_wait(struct xe_uc *uc)
goto again;
}
void xe_uc_suspend_prepare(struct xe_uc *uc)
{
xe_gsc_wait_for_worker_completion(&uc->gsc);
xe_guc_stop_prepare(&uc->guc);
}
int xe_uc_suspend(struct xe_uc *uc)
{
/* GuC submission not enabled, nothing to do */

View File

@@ -18,6 +18,7 @@ int xe_uc_reset_prepare(struct xe_uc *uc);
void xe_uc_stop_prepare(struct xe_uc *uc);
void xe_uc_stop(struct xe_uc *uc);
int xe_uc_start(struct xe_uc *uc);
void xe_uc_suspend_prepare(struct xe_uc *uc);
int xe_uc_suspend(struct xe_uc *uc);
int xe_uc_sanitize_reset(struct xe_uc *uc);
void xe_uc_declare_wedged(struct xe_uc *uc);

View File

@@ -3312,8 +3312,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
/* Ensure all UNMAPs visible */
if (xe_vm_in_fault_mode(vm))
flush_work(&vm->svm.garbage_collector.work);
xe_svm_flush(vm);
err = down_write_killable(&vm->lock);
if (err)

View File

@@ -9,14 +9,12 @@
#include <linux/mm_types.h>
#include <linux/shmem_fs.h>
struct ttm_backup;
/**
* ttm_backup_handle_to_page_ptr() - Convert handle to struct page pointer
* @handle: The handle to convert.
*
* Converts an opaque handle received from the
* struct ttm_backoup_ops::backup_page() function to an (invalid)
* ttm_backup_backup_page() function to an (invalid)
* struct page pointer suitable for a struct page array.
*
* Return: An (invalid) struct page pointer.
@@ -45,8 +43,8 @@ static inline bool ttm_backup_page_ptr_is_handle(const struct page *page)
*
* Return: The handle that was previously used in
* ttm_backup_handle_to_page_ptr() to obtain a struct page pointer, suitable
* for use as argument in the struct ttm_backup_ops drop() or
* copy_backed_up_page() functions.
* for use as argument in the struct ttm_backup_drop() or
* ttm_backup_copy_page() functions.
*/
static inline unsigned long
ttm_backup_page_ptr_to_handle(const struct page *page)
@@ -55,20 +53,20 @@ ttm_backup_page_ptr_to_handle(const struct page *page)
return (unsigned long)page >> 1;
}
void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle);
void ttm_backup_drop(struct file *backup, pgoff_t handle);
int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
int ttm_backup_copy_page(struct file *backup, struct page *dst,
pgoff_t handle, bool intr);
s64
ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
ttm_backup_backup_page(struct file *backup, struct page *page,
bool writeback, pgoff_t idx, gfp_t page_gfp,
gfp_t alloc_gfp);
void ttm_backup_fini(struct ttm_backup *backup);
void ttm_backup_fini(struct file *backup);
u64 ttm_backup_bytes_avail(void);
struct ttm_backup *ttm_backup_shmem_create(loff_t size);
struct file *ttm_backup_shmem_create(loff_t size);
#endif

View File

@@ -118,7 +118,7 @@ struct ttm_tt {
* ttm_tt_create() callback is responsible for assigning
* this field.
*/
struct ttm_backup *backup;
struct file *backup;
/**
* @caching: The current caching state of the pages, see enum
* ttm_caching.