mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-14 22:47:06 -05:00
drm/amdgpu: update the handle ptr in hw_fini
Update the *handle to amdgpu_ip_block ptr for all functions pointers of hw_fini. Also update the ip_block ptr where ever needed as there were cyclic dependency of hw_fini on suspend and some followed clean up. Signed-off-by: Sunil Khatri <sunil.khatri@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
committed by
Alex Deucher
parent
58608034ed
commit
692d2cd180
@@ -500,11 +500,11 @@ static int acp_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
* @handle: handle used to pass amdgpu_device pointer
|
||||
*
|
||||
*/
|
||||
static int acp_hw_fini(void *handle)
|
||||
static int acp_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
u32 val = 0;
|
||||
u32 count = 0;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
/* return early if no ACP */
|
||||
if (!adev->acp.acp_genpd) {
|
||||
|
||||
@@ -3285,7 +3285,7 @@ static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
|
||||
if (!adev->ip_blocks[i].status.hw)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
|
||||
r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
|
||||
r = adev->ip_blocks[i].version->funcs->hw_fini(&adev->ip_blocks[i]);
|
||||
/* XXX handle errors */
|
||||
if (r) {
|
||||
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
|
||||
@@ -3324,7 +3324,7 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
|
||||
if (!adev->ip_blocks[i].status.hw)
|
||||
continue;
|
||||
|
||||
r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
|
||||
r = adev->ip_blocks[i].version->funcs->hw_fini(&adev->ip_blocks[i]);
|
||||
/* XXX handle errors */
|
||||
if (r) {
|
||||
DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
|
||||
|
||||
@@ -66,10 +66,9 @@ static int isp_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
* @handle: handle for amdgpu_device pointer
|
||||
*
|
||||
*/
|
||||
static int isp_hw_fini(void *handle)
|
||||
static int isp_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_isp *isp = &adev->isp;
|
||||
struct amdgpu_isp *isp = &ip_block->adev->isp;
|
||||
|
||||
if (isp->funcs->hw_fini != NULL)
|
||||
return isp->funcs->hw_fini(isp);
|
||||
|
||||
@@ -3002,9 +3002,9 @@ static int psp_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int psp_hw_fini(void *handle)
|
||||
static int psp_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
struct psp_context *psp = &adev->psp;
|
||||
|
||||
if (psp->ta_fw) {
|
||||
|
||||
@@ -857,9 +857,9 @@ static int umsch_mm_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int umsch_mm_hw_fini(void *handle)
|
||||
static int umsch_mm_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
umsch_mm_ring_stop(&adev->umsch_mm);
|
||||
|
||||
@@ -875,9 +875,7 @@ static int umsch_mm_hw_fini(void *handle)
|
||||
|
||||
static int umsch_mm_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return umsch_mm_hw_fini(adev);
|
||||
return umsch_mm_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int umsch_mm_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -600,7 +600,7 @@ static int amdgpu_vkms_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_vkms_hw_fini(void *handle)
|
||||
static int amdgpu_vkms_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -613,7 +613,7 @@ static int amdgpu_vkms_suspend(struct amdgpu_ip_block *ip_block)
|
||||
r = drm_mode_config_helper_suspend(adev_to_drm(adev));
|
||||
if (r)
|
||||
return r;
|
||||
return amdgpu_vkms_hw_fini(adev);
|
||||
return amdgpu_vkms_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int amdgpu_vkms_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -421,9 +421,9 @@ static int vpe_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vpe_hw_fini(void *handle)
|
||||
static int vpe_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
struct amdgpu_vpe *vpe = &adev->vpe;
|
||||
|
||||
vpe_ring_stop(vpe);
|
||||
@@ -440,7 +440,7 @@ static int vpe_suspend(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
cancel_delayed_work_sync(&adev->vpe.idle_work);
|
||||
|
||||
return vpe_hw_fini(adev);
|
||||
return vpe_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int vpe_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -2148,16 +2148,14 @@ static int cik_common_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cik_common_hw_fini(void *handle)
|
||||
static int cik_common_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cik_common_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return cik_common_hw_fini(adev);
|
||||
return cik_common_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int cik_common_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -328,20 +328,16 @@ static int cik_ih_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return cik_ih_irq_init(adev);
|
||||
}
|
||||
|
||||
static int cik_ih_hw_fini(void *handle)
|
||||
static int cik_ih_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
cik_ih_irq_disable(adev);
|
||||
cik_ih_irq_disable(ip_block->adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cik_ih_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return cik_ih_hw_fini(adev);
|
||||
return cik_ih_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int cik_ih_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -1001,9 +1001,9 @@ static int cik_sdma_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return r;
|
||||
}
|
||||
|
||||
static int cik_sdma_hw_fini(void *handle)
|
||||
static int cik_sdma_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
cik_ctx_switch_enable(adev, false);
|
||||
cik_sdma_enable(adev, false);
|
||||
@@ -1013,9 +1013,7 @@ static int cik_sdma_hw_fini(void *handle)
|
||||
|
||||
static int cik_sdma_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return cik_sdma_hw_fini(adev);
|
||||
return cik_sdma_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int cik_sdma_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -324,20 +324,16 @@ static int cz_ih_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cz_ih_hw_fini(void *handle)
|
||||
static int cz_ih_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
cz_ih_irq_disable(adev);
|
||||
cz_ih_irq_disable(ip_block->adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cz_ih_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return cz_ih_hw_fini(adev);
|
||||
return cz_ih_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int cz_ih_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -2887,10 +2887,10 @@ static int dce_v10_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_v10_0_hw_fini(void *handle)
|
||||
static int dce_v10_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
int i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
dce_v10_0_hpd_fini(adev);
|
||||
|
||||
@@ -2917,7 +2917,7 @@ static int dce_v10_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
adev->mode_info.bl_level =
|
||||
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
|
||||
|
||||
return dce_v10_0_hw_fini(adev);
|
||||
return dce_v10_0_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int dce_v10_0_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -3025,10 +3025,10 @@ static int dce_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_v11_0_hw_fini(void *handle)
|
||||
static int dce_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
int i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
dce_v11_0_hpd_fini(adev);
|
||||
|
||||
@@ -3055,7 +3055,7 @@ static int dce_v11_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
adev->mode_info.bl_level =
|
||||
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
|
||||
|
||||
return dce_v11_0_hw_fini(adev);
|
||||
return dce_v11_0_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int dce_v11_0_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -2783,10 +2783,10 @@ static int dce_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_v6_0_hw_fini(void *handle)
|
||||
static int dce_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
int i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
dce_v6_0_hpd_fini(adev);
|
||||
|
||||
@@ -2812,7 +2812,7 @@ static int dce_v6_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
adev->mode_info.bl_level =
|
||||
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
|
||||
|
||||
return dce_v6_0_hw_fini(adev);
|
||||
return dce_v6_0_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int dce_v6_0_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -2805,10 +2805,10 @@ static int dce_v8_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_v8_0_hw_fini(void *handle)
|
||||
static int dce_v8_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
int i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
dce_v8_0_hpd_fini(adev);
|
||||
|
||||
@@ -2835,7 +2835,7 @@ static int dce_v8_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
adev->mode_info.bl_level =
|
||||
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
|
||||
|
||||
return dce_v8_0_hw_fini(adev);
|
||||
return dce_v8_0_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int dce_v8_0_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -7418,9 +7418,9 @@ static int gfx_v10_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return r;
|
||||
}
|
||||
|
||||
static int gfx_v10_0_hw_fini(void *handle)
|
||||
static int gfx_v10_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
|
||||
@@ -7431,7 +7431,7 @@ static int gfx_v10_0_hw_fini(void *handle)
|
||||
* otherwise the gfxoff disallowing will be failed to set.
|
||||
*/
|
||||
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 1))
|
||||
gfx_v10_0_set_powergating_state(handle, AMD_PG_STATE_UNGATE);
|
||||
gfx_v10_0_set_powergating_state(ip_block->adev, AMD_PG_STATE_UNGATE);
|
||||
|
||||
if (!adev->no_hw_access) {
|
||||
if (amdgpu_async_gfx_ring) {
|
||||
@@ -7458,9 +7458,7 @@ static int gfx_v10_0_hw_fini(void *handle)
|
||||
|
||||
static int gfx_v10_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return gfx_v10_0_hw_fini(adev);
|
||||
return gfx_v10_0_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int gfx_v10_0_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -4667,9 +4667,9 @@ static int gfx_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return r;
|
||||
}
|
||||
|
||||
static int gfx_v11_0_hw_fini(void *handle)
|
||||
static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
|
||||
@@ -4707,9 +4707,7 @@ static int gfx_v11_0_hw_fini(void *handle)
|
||||
|
||||
static int gfx_v11_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return gfx_v11_0_hw_fini(adev);
|
||||
return gfx_v11_0_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int gfx_v11_0_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -3603,9 +3603,9 @@ static int gfx_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return r;
|
||||
}
|
||||
|
||||
static int gfx_v12_0_hw_fini(void *handle)
|
||||
static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
uint32_t tmp;
|
||||
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
|
||||
@@ -3645,9 +3645,7 @@ static int gfx_v12_0_hw_fini(void *handle)
|
||||
|
||||
static int gfx_v12_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return gfx_v12_0_hw_fini(adev);
|
||||
return gfx_v12_0_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int gfx_v12_0_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -3142,9 +3142,9 @@ static int gfx_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return r;
|
||||
}
|
||||
|
||||
static int gfx_v6_0_hw_fini(void *handle)
|
||||
static int gfx_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
gfx_v6_0_cp_enable(adev, false);
|
||||
adev->gfx.rlc.funcs->stop(adev);
|
||||
@@ -3155,9 +3155,7 @@ static int gfx_v6_0_hw_fini(void *handle)
|
||||
|
||||
static int gfx_v6_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return gfx_v6_0_hw_fini(adev);
|
||||
return gfx_v6_0_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int gfx_v6_0_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -4486,9 +4486,9 @@ static int gfx_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return r;
|
||||
}
|
||||
|
||||
static int gfx_v7_0_hw_fini(void *handle)
|
||||
static int gfx_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
|
||||
@@ -4501,9 +4501,7 @@ static int gfx_v7_0_hw_fini(void *handle)
|
||||
|
||||
static int gfx_v7_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return gfx_v7_0_hw_fini(adev);
|
||||
return gfx_v7_0_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int gfx_v7_0_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -4879,10 +4879,9 @@ static int gfx_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int gfx_v8_0_hw_fini(void *handle)
|
||||
static int gfx_v8_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ip_block *ip_block;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
|
||||
@@ -4899,10 +4898,6 @@ static int gfx_v8_0_hw_fini(void *handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
|
||||
if (!ip_block)
|
||||
return -EINVAL;
|
||||
|
||||
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
|
||||
if (!gfx_v8_0_wait_for_idle(ip_block))
|
||||
gfx_v8_0_cp_enable(adev, false);
|
||||
@@ -4919,9 +4914,7 @@ static int gfx_v8_0_hw_fini(void *handle)
|
||||
|
||||
static int gfx_v8_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return gfx_v8_0_hw_fini(adev);
|
||||
return gfx_v8_0_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int gfx_v8_0_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -4022,9 +4022,9 @@ static int gfx_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return r;
|
||||
}
|
||||
|
||||
static int gfx_v9_0_hw_fini(void *handle)
|
||||
static int gfx_v9_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
|
||||
amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
|
||||
@@ -4076,9 +4076,7 @@ static int gfx_v9_0_hw_fini(void *handle)
|
||||
|
||||
static int gfx_v9_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return gfx_v9_0_hw_fini(adev);
|
||||
return gfx_v9_0_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int gfx_v9_0_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -2369,9 +2369,9 @@ static int gfx_v9_4_3_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return r;
|
||||
}
|
||||
|
||||
static int gfx_v9_4_3_hw_fini(void *handle)
|
||||
static int gfx_v9_4_3_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int i, num_xcc;
|
||||
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
|
||||
@@ -2388,9 +2388,7 @@ static int gfx_v9_4_3_hw_fini(void *handle)
|
||||
|
||||
static int gfx_v9_4_3_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return gfx_v9_4_3_hw_fini(adev);
|
||||
return gfx_v9_4_3_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int gfx_v9_4_3_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -1032,9 +1032,9 @@ static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
|
||||
adev->mmhub.funcs->gart_disable(adev);
|
||||
}
|
||||
|
||||
static int gmc_v10_0_hw_fini(void *handle)
|
||||
static int gmc_v10_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
gmc_v10_0_gart_disable(adev);
|
||||
|
||||
@@ -1055,9 +1055,7 @@ static int gmc_v10_0_hw_fini(void *handle)
|
||||
|
||||
static int gmc_v10_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
gmc_v10_0_hw_fini(adev);
|
||||
gmc_v10_0_hw_fini(ip_block);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -940,9 +940,9 @@ static void gmc_v11_0_gart_disable(struct amdgpu_device *adev)
|
||||
adev->mmhub.funcs->gart_disable(adev);
|
||||
}
|
||||
|
||||
static int gmc_v11_0_hw_fini(void *handle)
|
||||
static int gmc_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
/* full access mode, so don't touch any GMC register */
|
||||
@@ -963,9 +963,7 @@ static int gmc_v11_0_hw_fini(void *handle)
|
||||
|
||||
static int gmc_v11_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
gmc_v11_0_hw_fini(adev);
|
||||
gmc_v11_0_hw_fini(ip_block);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -924,9 +924,9 @@ static void gmc_v12_0_gart_disable(struct amdgpu_device *adev)
|
||||
adev->mmhub.funcs->gart_disable(adev);
|
||||
}
|
||||
|
||||
static int gmc_v12_0_hw_fini(void *handle)
|
||||
static int gmc_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
/* full access mode, so don't touch any GMC register */
|
||||
@@ -947,9 +947,7 @@ static int gmc_v12_0_hw_fini(void *handle)
|
||||
|
||||
static int gmc_v12_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
gmc_v12_0_hw_fini(adev);
|
||||
gmc_v12_0_hw_fini(ip_block);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -925,9 +925,9 @@ static int gmc_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmc_v6_0_hw_fini(void *handle)
|
||||
static int gmc_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
|
||||
gmc_v6_0_gart_disable(adev);
|
||||
@@ -937,9 +937,7 @@ static int gmc_v6_0_hw_fini(void *handle)
|
||||
|
||||
static int gmc_v6_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
gmc_v6_0_hw_fini(adev);
|
||||
gmc_v6_0_hw_fini(ip_block);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1101,9 +1101,9 @@ static int gmc_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmc_v7_0_hw_fini(void *handle)
|
||||
static int gmc_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
|
||||
gmc_v7_0_gart_disable(adev);
|
||||
@@ -1113,9 +1113,7 @@ static int gmc_v7_0_hw_fini(void *handle)
|
||||
|
||||
static int gmc_v7_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
gmc_v7_0_hw_fini(adev);
|
||||
gmc_v7_0_hw_fini(ip_block);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1232,9 +1232,9 @@ static int gmc_v8_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmc_v8_0_hw_fini(void *handle)
|
||||
static int gmc_v8_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
|
||||
gmc_v8_0_gart_disable(adev);
|
||||
@@ -1244,9 +1244,7 @@ static int gmc_v8_0_hw_fini(void *handle)
|
||||
|
||||
static int gmc_v8_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
gmc_v8_0_hw_fini(adev);
|
||||
gmc_v8_0_hw_fini(ip_block);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2393,9 +2393,9 @@ static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
|
||||
adev->mmhub.funcs->gart_disable(adev);
|
||||
}
|
||||
|
||||
static int gmc_v9_0_hw_fini(void *handle)
|
||||
static int gmc_v9_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
gmc_v9_0_gart_disable(adev);
|
||||
|
||||
@@ -2430,9 +2430,7 @@ static int gmc_v9_0_hw_fini(void *handle)
|
||||
|
||||
static int gmc_v9_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return gmc_v9_0_hw_fini(adev);
|
||||
return gmc_v9_0_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -318,20 +318,16 @@ static int iceland_ih_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return iceland_ih_irq_init(adev);
|
||||
}
|
||||
|
||||
static int iceland_ih_hw_fini(void *handle)
|
||||
static int iceland_ih_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
iceland_ih_irq_disable(adev);
|
||||
iceland_ih_irq_disable(ip_block->adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iceland_ih_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return iceland_ih_hw_fini(adev);
|
||||
return iceland_ih_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int iceland_ih_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -635,20 +635,16 @@ static int ih_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ih_v6_0_hw_fini(void *handle)
|
||||
static int ih_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
ih_v6_0_irq_disable(adev);
|
||||
ih_v6_0_irq_disable(ip_block->adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ih_v6_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return ih_v6_0_hw_fini(adev);
|
||||
return ih_v6_0_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int ih_v6_0_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -614,20 +614,16 @@ static int ih_v6_1_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ih_v6_1_hw_fini(void *handle)
|
||||
static int ih_v6_1_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
ih_v6_1_irq_disable(adev);
|
||||
ih_v6_1_irq_disable(ip_block->adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ih_v6_1_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return ih_v6_1_hw_fini(adev);
|
||||
return ih_v6_1_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int ih_v6_1_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -604,20 +604,16 @@ static int ih_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ih_v7_0_hw_fini(void *handle)
|
||||
static int ih_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
ih_v7_0_irq_disable(adev);
|
||||
ih_v7_0_irq_disable(ip_block->adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ih_v7_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return ih_v7_0_hw_fini(adev);
|
||||
return ih_v7_0_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int ih_v7_0_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -146,9 +146,9 @@ static int jpeg_v2_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
*
|
||||
* Stop the JPEG block, mark ring as not ready any more
|
||||
*/
|
||||
static int jpeg_v2_0_hw_fini(void *handle)
|
||||
static int jpeg_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
|
||||
@@ -168,14 +168,13 @@ static int jpeg_v2_0_hw_fini(void *handle)
|
||||
*/
|
||||
static int jpeg_v2_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int r;
|
||||
|
||||
r = jpeg_v2_0_hw_fini(adev);
|
||||
r = jpeg_v2_0_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_jpeg_suspend(adev);
|
||||
r = amdgpu_jpeg_suspend(ip_block->adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -206,9 +206,9 @@ static int jpeg_v2_5_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
*
|
||||
* Stop the JPEG block, mark ring as not ready any more
|
||||
*/
|
||||
static int jpeg_v2_5_hw_fini(void *handle)
|
||||
static int jpeg_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int i;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
@@ -237,14 +237,13 @@ static int jpeg_v2_5_hw_fini(void *handle)
|
||||
*/
|
||||
static int jpeg_v2_5_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int r;
|
||||
|
||||
r = jpeg_v2_5_hw_fini(adev);
|
||||
r = jpeg_v2_5_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_jpeg_suspend(adev);
|
||||
r = amdgpu_jpeg_suspend(ip_block->adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -160,9 +160,9 @@ static int jpeg_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
*
|
||||
* Stop the JPEG block, mark ring as not ready any more
|
||||
*/
|
||||
static int jpeg_v3_0_hw_fini(void *handle)
|
||||
static int jpeg_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
|
||||
@@ -182,14 +182,13 @@ static int jpeg_v3_0_hw_fini(void *handle)
|
||||
*/
|
||||
static int jpeg_v3_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int r;
|
||||
|
||||
r = jpeg_v3_0_hw_fini(adev);
|
||||
r = jpeg_v3_0_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_jpeg_suspend(adev);
|
||||
r = amdgpu_jpeg_suspend(ip_block->adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -191,9 +191,9 @@ static int jpeg_v4_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
*
|
||||
* Stop the JPEG block, mark ring as not ready any more
|
||||
*/
|
||||
static int jpeg_v4_0_hw_fini(void *handle)
|
||||
static int jpeg_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
@@ -216,14 +216,13 @@ static int jpeg_v4_0_hw_fini(void *handle)
|
||||
*/
|
||||
static int jpeg_v4_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int r;
|
||||
|
||||
r = jpeg_v4_0_hw_fini(adev);
|
||||
r = jpeg_v4_0_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_jpeg_suspend(adev);
|
||||
r = amdgpu_jpeg_suspend(ip_block->adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -362,9 +362,9 @@ static int jpeg_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
*
|
||||
* Stop the JPEG block, mark ring as not ready any more
|
||||
*/
|
||||
static int jpeg_v4_0_3_hw_fini(void *handle)
|
||||
static int jpeg_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int ret = 0;
|
||||
|
||||
cancel_delayed_work_sync(&adev->jpeg.idle_work);
|
||||
@@ -386,14 +386,13 @@ static int jpeg_v4_0_3_hw_fini(void *handle)
|
||||
*/
|
||||
static int jpeg_v4_0_3_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int r;
|
||||
|
||||
r = jpeg_v4_0_3_hw_fini(adev);
|
||||
r = jpeg_v4_0_3_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_jpeg_suspend(adev);
|
||||
r = amdgpu_jpeg_suspend(ip_block->adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -214,9 +214,9 @@ static int jpeg_v4_0_5_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
*
|
||||
* Stop the JPEG block, mark ring as not ready any more
|
||||
*/
|
||||
static int jpeg_v4_0_5_hw_fini(void *handle)
|
||||
static int jpeg_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int i;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
@@ -243,14 +243,13 @@ static int jpeg_v4_0_5_hw_fini(void *handle)
|
||||
*/
|
||||
static int jpeg_v4_0_5_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int r;
|
||||
|
||||
r = jpeg_v4_0_5_hw_fini(adev);
|
||||
r = jpeg_v4_0_5_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_jpeg_suspend(adev);
|
||||
r = amdgpu_jpeg_suspend(ip_block->adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -157,9 +157,9 @@ static int jpeg_v5_0_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
*
|
||||
* Stop the JPEG block, mark ring as not ready any more
|
||||
*/
|
||||
static int jpeg_v5_0_0_hw_fini(void *handle)
|
||||
static int jpeg_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
|
||||
@@ -179,14 +179,13 @@ static int jpeg_v5_0_0_hw_fini(void *handle)
|
||||
*/
|
||||
static int jpeg_v5_0_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int r;
|
||||
|
||||
r = jpeg_v5_0_0_hw_fini(adev);
|
||||
r = jpeg_v5_0_0_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_jpeg_suspend(adev);
|
||||
r = amdgpu_jpeg_suspend(ip_block->adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_5_2_mes_2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_5_2_mes1.bin");
|
||||
|
||||
static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block);
|
||||
static int mes_v11_0_hw_fini(void *handle);
|
||||
static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block);
|
||||
static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev);
|
||||
static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev);
|
||||
|
||||
@@ -1522,6 +1522,12 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
|
||||
|
||||
mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring);
|
||||
|
||||
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_MES);
|
||||
if (unlikely(!ip_block)) {
|
||||
dev_err(adev->dev, "Failed to get MES handle\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
|
||||
if (r)
|
||||
goto failure;
|
||||
@@ -1532,12 +1538,6 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
|
||||
adev->mes.enable_legacy_queue_map = false;
|
||||
|
||||
if (adev->mes.enable_legacy_queue_map) {
|
||||
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_MES);
|
||||
if (unlikely(!ip_block)) {
|
||||
dev_err(adev->dev, "Failed to get MES handle\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = mes_v11_0_hw_init(ip_block);
|
||||
if (r)
|
||||
goto failure;
|
||||
@@ -1546,7 +1546,7 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
|
||||
return r;
|
||||
|
||||
failure:
|
||||
mes_v11_0_hw_fini(adev);
|
||||
mes_v11_0_hw_fini(ip_block);
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -1622,13 +1622,13 @@ static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
|
||||
failure:
|
||||
mes_v11_0_hw_fini(adev);
|
||||
mes_v11_0_hw_fini(ip_block);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int mes_v11_0_hw_fini(void *handle)
|
||||
static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
if (amdgpu_sriov_is_mes_info_enable(adev)) {
|
||||
amdgpu_bo_free_kernel(&adev->mes.resource_1, &adev->mes.resource_1_gpu_addr,
|
||||
&adev->mes.resource_1_addr);
|
||||
@@ -1639,13 +1639,12 @@ static int mes_v11_0_hw_fini(void *handle)
|
||||
static int mes_v11_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
r = amdgpu_mes_suspend(adev);
|
||||
r = amdgpu_mes_suspend(ip_block->adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return mes_v11_0_hw_fini(adev);
|
||||
return mes_v11_0_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int mes_v11_0_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -40,7 +40,7 @@ MODULE_FIRMWARE("amdgpu/gc_12_0_1_mes1.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_12_0_1_uni_mes.bin");
|
||||
|
||||
static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block);
|
||||
static int mes_v12_0_hw_fini(void *handle);
|
||||
static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block);
|
||||
static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev);
|
||||
static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev);
|
||||
|
||||
@@ -1480,6 +1480,12 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
|
||||
|
||||
mes_v12_0_enable(adev, true);
|
||||
|
||||
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_MES);
|
||||
if (unlikely(!ip_block)) {
|
||||
dev_err(adev->dev, "Failed to get MES handle\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = mes_v12_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
|
||||
if (r)
|
||||
goto failure;
|
||||
@@ -1493,12 +1499,6 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
if (adev->mes.enable_legacy_queue_map) {
|
||||
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_MES);
|
||||
if (unlikely(!ip_block)) {
|
||||
dev_err(adev->dev, "Failed to get MES handle\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = mes_v12_0_hw_init(ip_block);
|
||||
if (r)
|
||||
goto failure;
|
||||
@@ -1507,7 +1507,7 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
|
||||
return r;
|
||||
|
||||
failure:
|
||||
mes_v12_0_hw_fini(adev);
|
||||
mes_v12_0_hw_fini(ip_block);
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -1591,11 +1591,11 @@ static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
|
||||
failure:
|
||||
mes_v12_0_hw_fini(adev);
|
||||
mes_v12_0_hw_fini(ip_block);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int mes_v12_0_hw_fini(void *handle)
|
||||
static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -1603,13 +1603,12 @@ static int mes_v12_0_hw_fini(void *handle)
|
||||
static int mes_v12_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
r = amdgpu_mes_suspend(adev);
|
||||
r = amdgpu_mes_suspend(ip_block->adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return mes_v12_0_hw_fini(adev);
|
||||
return mes_v12_0_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int mes_v12_0_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -609,20 +609,16 @@ static int navi10_ih_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return navi10_ih_irq_init(adev);
|
||||
}
|
||||
|
||||
static int navi10_ih_hw_fini(void *handle)
|
||||
static int navi10_ih_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
navi10_ih_irq_disable(adev);
|
||||
navi10_ih_irq_disable(ip_block->adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int navi10_ih_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return navi10_ih_hw_fini(adev);
|
||||
return navi10_ih_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int navi10_ih_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -1014,9 +1014,9 @@ static int nv_common_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nv_common_hw_fini(void *handle)
|
||||
static int nv_common_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
/* Disable the doorbell aperture and selfring doorbell aperture
|
||||
* separately in hw_fini because nv_enable_doorbell_aperture
|
||||
@@ -1031,9 +1031,7 @@ static int nv_common_hw_fini(void *handle)
|
||||
|
||||
static int nv_common_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return nv_common_hw_fini(adev);
|
||||
return nv_common_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int nv_common_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -892,20 +892,16 @@ static int sdma_v2_4_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return r;
|
||||
}
|
||||
|
||||
static int sdma_v2_4_hw_fini(void *handle)
|
||||
static int sdma_v2_4_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
sdma_v2_4_enable(adev, false);
|
||||
sdma_v2_4_enable(ip_block->adev, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sdma_v2_4_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return sdma_v2_4_hw_fini(adev);
|
||||
return sdma_v2_4_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int sdma_v2_4_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -1178,9 +1178,9 @@ static int sdma_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return r;
|
||||
}
|
||||
|
||||
static int sdma_v3_0_hw_fini(void *handle)
|
||||
static int sdma_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
sdma_v3_0_ctx_switch_enable(adev, false);
|
||||
sdma_v3_0_enable(adev, false);
|
||||
@@ -1190,9 +1190,7 @@ static int sdma_v3_0_hw_fini(void *handle)
|
||||
|
||||
static int sdma_v3_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return sdma_v3_0_hw_fini(adev);
|
||||
return sdma_v3_0_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int sdma_v3_0_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -1964,9 +1964,9 @@ static int sdma_v4_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return sdma_v4_0_start(adev);
|
||||
}
|
||||
|
||||
static int sdma_v4_0_hw_fini(void *handle)
|
||||
static int sdma_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int i;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
@@ -1998,7 +1998,7 @@ static int sdma_v4_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return sdma_v4_0_hw_fini(adev);
|
||||
return sdma_v4_0_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int sdma_v4_0_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -1482,9 +1482,9 @@ static int sdma_v4_4_2_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return r;
|
||||
}
|
||||
|
||||
static int sdma_v4_4_2_hw_fini(void *handle)
|
||||
static int sdma_v4_4_2_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
uint32_t inst_mask;
|
||||
int i;
|
||||
|
||||
@@ -1515,7 +1515,7 @@ static int sdma_v4_4_2_suspend(struct amdgpu_ip_block *ip_block)
|
||||
if (amdgpu_in_reset(adev))
|
||||
sdma_v4_4_2_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
|
||||
|
||||
return sdma_v4_4_2_hw_fini(adev);
|
||||
return sdma_v4_4_2_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int sdma_v4_4_2_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -1489,9 +1489,9 @@ static int sdma_v5_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return r;
|
||||
}
|
||||
|
||||
static int sdma_v5_0_hw_fini(void *handle)
|
||||
static int sdma_v5_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
@@ -1504,9 +1504,7 @@ static int sdma_v5_0_hw_fini(void *handle)
|
||||
|
||||
static int sdma_v5_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return sdma_v5_0_hw_fini(adev);
|
||||
return sdma_v5_0_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int sdma_v5_0_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -1389,9 +1389,9 @@ static int sdma_v5_2_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return sdma_v5_2_start(adev);
|
||||
}
|
||||
|
||||
static int sdma_v5_2_hw_fini(void *handle)
|
||||
static int sdma_v5_2_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
@@ -1404,9 +1404,7 @@ static int sdma_v5_2_hw_fini(void *handle)
|
||||
|
||||
static int sdma_v5_2_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return sdma_v5_2_hw_fini(adev);
|
||||
return sdma_v5_2_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int sdma_v5_2_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -1387,9 +1387,9 @@ static int sdma_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return sdma_v6_0_start(adev);
|
||||
}
|
||||
|
||||
static int sdma_v6_0_hw_fini(void *handle)
|
||||
static int sdma_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
@@ -1402,9 +1402,7 @@ static int sdma_v6_0_hw_fini(void *handle)
|
||||
|
||||
static int sdma_v6_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return sdma_v6_0_hw_fini(adev);
|
||||
return sdma_v6_0_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int sdma_v6_0_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -1345,9 +1345,9 @@ static int sdma_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return sdma_v7_0_start(adev);
|
||||
}
|
||||
|
||||
static int sdma_v7_0_hw_fini(void *handle)
|
||||
static int sdma_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
@@ -1360,9 +1360,7 @@ static int sdma_v7_0_hw_fini(void *handle)
|
||||
|
||||
static int sdma_v7_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return sdma_v7_0_hw_fini(adev);
|
||||
return sdma_v7_0_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int sdma_v7_0_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -2645,16 +2645,14 @@ static int si_common_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_common_hw_fini(void *handle)
|
||||
static int si_common_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_common_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return si_common_hw_fini(adev);
|
||||
return si_common_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int si_common_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -524,20 +524,16 @@ static int si_dma_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return si_dma_start(adev);
|
||||
}
|
||||
|
||||
static int si_dma_hw_fini(void *handle)
|
||||
static int si_dma_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
si_dma_stop(adev);
|
||||
si_dma_stop(ip_block->adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_dma_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return si_dma_hw_fini(adev);
|
||||
return si_dma_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int si_dma_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -193,20 +193,16 @@ static int si_ih_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return si_ih_irq_init(adev);
|
||||
}
|
||||
|
||||
static int si_ih_hw_fini(void *handle)
|
||||
static int si_ih_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
si_ih_irq_disable(adev);
|
||||
si_ih_irq_disable(ip_block->adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_ih_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return si_ih_hw_fini(adev);
|
||||
return si_ih_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int si_ih_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -1284,9 +1284,9 @@ static int soc15_common_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int soc15_common_hw_fini(void *handle)
|
||||
static int soc15_common_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
/* Disable the doorbell aperture and selfring doorbell aperture
|
||||
* separately in hw_fini because soc15_enable_doorbell_aperture
|
||||
@@ -1320,9 +1320,7 @@ static int soc15_common_hw_fini(void *handle)
|
||||
|
||||
static int soc15_common_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return soc15_common_hw_fini(adev);
|
||||
return soc15_common_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int soc15_common_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -867,9 +867,9 @@ static int soc21_common_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int soc21_common_hw_fini(void *handle)
|
||||
static int soc21_common_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
/* Disable the doorbell aperture and selfring doorbell aperture
|
||||
* separately in hw_fini because soc21_enable_doorbell_aperture
|
||||
@@ -892,9 +892,7 @@ static int soc21_common_hw_fini(void *handle)
|
||||
|
||||
static int soc21_common_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return soc21_common_hw_fini(adev);
|
||||
return soc21_common_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static bool soc21_need_reset_on_resume(struct amdgpu_device *adev)
|
||||
|
||||
@@ -494,9 +494,9 @@ static int soc24_common_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int soc24_common_hw_fini(void *handle)
|
||||
static int soc24_common_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
/* Disable the doorbell aperture and selfring doorbell aperture
|
||||
* separately in hw_fini because soc21_enable_doorbell_aperture
|
||||
@@ -514,9 +514,7 @@ static int soc24_common_hw_fini(void *handle)
|
||||
|
||||
static int soc24_common_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return soc24_common_hw_fini(adev);
|
||||
return soc24_common_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int soc24_common_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -336,20 +336,16 @@ static int tonga_ih_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tonga_ih_hw_fini(void *handle)
|
||||
static int tonga_ih_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
tonga_ih_irq_disable(adev);
|
||||
tonga_ih_irq_disable(ip_block->adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tonga_ih_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return tonga_ih_hw_fini(adev);
|
||||
return tonga_ih_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int tonga_ih_resume(struct amdgpu_ip_block *ip_block)
|
||||
@@ -405,12 +401,10 @@ static bool tonga_ih_check_soft_reset(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
static int tonga_ih_pre_soft_reset(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
if (!adev->irq.srbm_soft_reset)
|
||||
if (!ip_block->adev->irq.srbm_soft_reset)
|
||||
return 0;
|
||||
|
||||
return tonga_ih_hw_fini(adev);
|
||||
return tonga_ih_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int tonga_ih_post_soft_reset(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -692,9 +692,9 @@ static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
*
|
||||
* Stop the UVD block, mark ring as not ready any more
|
||||
*/
|
||||
static int uvd_v3_1_hw_fini(void *handle)
|
||||
static int uvd_v3_1_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
|
||||
@@ -740,7 +740,7 @@ static int uvd_v3_1_suspend(struct amdgpu_ip_block *ip_block)
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
|
||||
r = uvd_v3_1_hw_fini(adev);
|
||||
r = uvd_v3_1_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
||||
@@ -206,9 +206,9 @@ static int uvd_v4_2_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
*
|
||||
* Stop the UVD block, mark ring as not ready any more
|
||||
*/
|
||||
static int uvd_v4_2_hw_fini(void *handle)
|
||||
static int uvd_v4_2_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
|
||||
@@ -254,7 +254,7 @@ static int uvd_v4_2_suspend(struct amdgpu_ip_block *ip_block)
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
|
||||
r = uvd_v4_2_hw_fini(adev);
|
||||
r = uvd_v4_2_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
||||
@@ -204,9 +204,9 @@ static int uvd_v5_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
*
|
||||
* Stop the UVD block, mark ring as not ready any more
|
||||
*/
|
||||
static int uvd_v5_0_hw_fini(void *handle)
|
||||
static int uvd_v5_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
|
||||
@@ -252,7 +252,7 @@ static int uvd_v5_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
|
||||
r = uvd_v5_0_hw_fini(adev);
|
||||
r = uvd_v5_0_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
||||
@@ -528,9 +528,9 @@ static int uvd_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
*
|
||||
* Stop the UVD block, mark ring as not ready any more
|
||||
*/
|
||||
static int uvd_v6_0_hw_fini(void *handle)
|
||||
static int uvd_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
|
||||
@@ -576,7 +576,7 @@ static int uvd_v6_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
|
||||
r = uvd_v6_0_hw_fini(adev);
|
||||
r = uvd_v6_0_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
||||
@@ -592,9 +592,9 @@ static int uvd_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
*
|
||||
* Stop the UVD block, mark ring as not ready any more
|
||||
*/
|
||||
static int uvd_v7_0_hw_fini(void *handle)
|
||||
static int uvd_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
|
||||
@@ -644,7 +644,7 @@ static int uvd_v7_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
|
||||
r = uvd_v7_0_hw_fini(adev);
|
||||
r = uvd_v7_0_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
||||
@@ -481,11 +481,9 @@ static int vce_v2_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vce_v2_0_hw_fini(void *handle)
|
||||
static int vce_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vce.idle_work);
|
||||
cancel_delayed_work_sync(&ip_block->adev->vce.idle_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -519,7 +517,7 @@ static int vce_v2_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
|
||||
r = vce_v2_0_hw_fini(adev);
|
||||
r = vce_v2_0_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
||||
@@ -485,15 +485,10 @@ static int vce_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vce_v3_0_hw_fini(void *handle)
|
||||
static int vce_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ip_block *ip_block;
|
||||
|
||||
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE);
|
||||
if (!ip_block)
|
||||
return -EINVAL;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vce.idle_work);
|
||||
|
||||
@@ -533,7 +528,7 @@ static int vce_v3_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
|
||||
r = vce_v3_0_hw_fini(adev);
|
||||
r = vce_v3_0_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
||||
@@ -536,14 +536,9 @@ static int vce_v4_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vce_v4_0_hw_fini(void *handle)
|
||||
static int vce_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ip_block *ip_block;
|
||||
|
||||
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE);
|
||||
if (!ip_block)
|
||||
return -EINVAL;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vce.idle_work);
|
||||
|
||||
@@ -599,7 +594,7 @@ static int vce_v4_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
|
||||
r = vce_v4_0_hw_fini(adev);
|
||||
r = vce_v4_0_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
||||
@@ -272,9 +272,9 @@ static int vcn_v1_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
*
|
||||
* Stop the VCN block, mark ring as not ready any more
|
||||
*/
|
||||
static int vcn_v1_0_hw_fini(void *handle)
|
||||
static int vcn_v1_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
|
||||
@@ -306,7 +306,7 @@ static int vcn_v1_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
amdgpu_dpm_enable_uvd(adev, false);
|
||||
}
|
||||
|
||||
r = vcn_v1_0_hw_fini(adev);
|
||||
r = vcn_v1_0_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
||||
@@ -309,9 +309,9 @@ static int vcn_v2_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
*
|
||||
* Stop the VCN block, mark ring as not ready any more
|
||||
*/
|
||||
static int vcn_v2_0_hw_fini(void *handle)
|
||||
static int vcn_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
|
||||
@@ -333,13 +333,12 @@ static int vcn_v2_0_hw_fini(void *handle)
|
||||
static int vcn_v2_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
r = vcn_v2_0_hw_fini(adev);
|
||||
r = vcn_v2_0_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_vcn_suspend(adev);
|
||||
r = amdgpu_vcn_suspend(ip_block->adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -385,9 +385,9 @@ static int vcn_v2_5_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
*
|
||||
* Stop the VCN block, mark ring as not ready any more
|
||||
*/
|
||||
static int vcn_v2_5_hw_fini(void *handle)
|
||||
static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int i;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
@@ -418,13 +418,12 @@ static int vcn_v2_5_hw_fini(void *handle)
|
||||
static int vcn_v2_5_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
r = vcn_v2_5_hw_fini(adev);
|
||||
r = vcn_v2_5_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_vcn_suspend(adev);
|
||||
r = amdgpu_vcn_suspend(ip_block->adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -417,9 +417,9 @@ static int vcn_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
*
|
||||
* Stop the VCN block, mark ring as not ready any more
|
||||
*/
|
||||
static int vcn_v3_0_hw_fini(void *handle)
|
||||
static int vcn_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int i;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
@@ -450,13 +450,12 @@ static int vcn_v3_0_hw_fini(void *handle)
|
||||
static int vcn_v3_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
r = vcn_v3_0_hw_fini(adev);
|
||||
r = vcn_v3_0_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_vcn_suspend(adev);
|
||||
r = amdgpu_vcn_suspend(ip_block->adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -345,9 +345,9 @@ static int vcn_v4_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
*
|
||||
* Stop the VCN block, mark ring as not ready any more
|
||||
*/
|
||||
static int vcn_v4_0_hw_fini(void *handle)
|
||||
static int vcn_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int i;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
@@ -379,13 +379,12 @@ static int vcn_v4_0_hw_fini(void *handle)
|
||||
static int vcn_v4_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
r = vcn_v4_0_hw_fini(adev);
|
||||
r = vcn_v4_0_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_vcn_suspend(adev);
|
||||
r = amdgpu_vcn_suspend(ip_block->adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -312,9 +312,9 @@ static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
*
|
||||
* Stop the VCN block, mark ring as not ready any more
|
||||
*/
|
||||
static int vcn_v4_0_3_hw_fini(void *handle)
|
||||
static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
|
||||
@@ -333,14 +333,13 @@ static int vcn_v4_0_3_hw_fini(void *handle)
|
||||
*/
|
||||
static int vcn_v4_0_3_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int r;
|
||||
|
||||
r = vcn_v4_0_3_hw_fini(adev);
|
||||
r = vcn_v4_0_3_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_vcn_suspend(adev);
|
||||
r = amdgpu_vcn_suspend(ip_block->adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -295,9 +295,9 @@ static int vcn_v4_0_5_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
*
|
||||
* Stop the VCN block, mark ring as not ready any more
|
||||
*/
|
||||
static int vcn_v4_0_5_hw_fini(void *handle)
|
||||
static int vcn_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int i;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
@@ -327,13 +327,12 @@ static int vcn_v4_0_5_hw_fini(void *handle)
|
||||
static int vcn_v4_0_5_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
r = vcn_v4_0_5_hw_fini(adev);
|
||||
r = vcn_v4_0_5_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_vcn_suspend(adev);
|
||||
r = amdgpu_vcn_suspend(ip_block->adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -259,9 +259,9 @@ static int vcn_v5_0_0_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
*
|
||||
* Stop the VCN block, mark ring as not ready any more
|
||||
*/
|
||||
static int vcn_v5_0_0_hw_fini(void *handle)
|
||||
static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
int i;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
@@ -291,13 +291,12 @@ static int vcn_v5_0_0_hw_fini(void *handle)
|
||||
static int vcn_v5_0_0_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
r = vcn_v5_0_0_hw_fini(adev);
|
||||
r = vcn_v5_0_0_hw_fini(ip_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_vcn_suspend(adev);
|
||||
r = amdgpu_vcn_suspend(ip_block->adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -539,20 +539,16 @@ static int vega10_ih_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return vega10_ih_irq_init(ip_block->adev);
|
||||
}
|
||||
|
||||
static int vega10_ih_hw_fini(void *handle)
|
||||
static int vega10_ih_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
vega10_ih_irq_disable(adev);
|
||||
vega10_ih_irq_disable(ip_block->adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vega10_ih_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return vega10_ih_hw_fini(adev);
|
||||
return vega10_ih_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int vega10_ih_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -607,20 +607,16 @@ static int vega20_ih_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vega20_ih_hw_fini(void *handle)
|
||||
static int vega20_ih_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
vega20_ih_irq_disable(adev);
|
||||
vega20_ih_irq_disable(ip_block->adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vega20_ih_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return vega20_ih_hw_fini(adev);
|
||||
return vega20_ih_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int vega20_ih_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -1718,9 +1718,9 @@ static int vi_common_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vi_common_hw_fini(void *handle)
|
||||
static int vi_common_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
/* enable the doorbell aperture */
|
||||
vi_enable_doorbell_aperture(adev, false);
|
||||
@@ -1733,9 +1733,7 @@ static int vi_common_hw_fini(void *handle)
|
||||
|
||||
static int vi_common_suspend(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
return vi_common_hw_fini(adev);
|
||||
return vi_common_hw_fini(ip_block);
|
||||
}
|
||||
|
||||
static int vi_common_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
@@ -2830,9 +2830,9 @@ static int dm_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
* cleanup. This involves cleaning up the DRM device, DC, and any modules that
|
||||
* were loaded. Also flush IRQ workqueues and disable them.
|
||||
*/
|
||||
static int dm_hw_fini(void *handle)
|
||||
static int dm_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
amdgpu_dm_hpd_fini(adev);
|
||||
|
||||
|
||||
@@ -385,7 +385,7 @@ struct amd_ip_funcs {
|
||||
int (*sw_fini)(struct amdgpu_ip_block *ip_block);
|
||||
int (*early_fini)(struct amdgpu_ip_block *ip_block);
|
||||
int (*hw_init)(struct amdgpu_ip_block *ip_block);
|
||||
int (*hw_fini)(void *handle);
|
||||
int (*hw_fini)(struct amdgpu_ip_block *ip_block);
|
||||
void (*late_fini)(struct amdgpu_ip_block *ip_block);
|
||||
int (*prepare_suspend)(struct amdgpu_ip_block *ip_block);
|
||||
int (*suspend)(struct amdgpu_ip_block *ip_block);
|
||||
|
||||
@@ -3052,9 +3052,9 @@ static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kv_dpm_hw_fini(void *handle)
|
||||
static int kv_dpm_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
if (adev->pm.dpm_enabled)
|
||||
kv_dpm_disable(adev);
|
||||
|
||||
@@ -7795,9 +7795,9 @@ static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int si_dpm_hw_fini(void *handle)
|
||||
static int si_dpm_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
if (adev->pm.dpm_enabled)
|
||||
si_dpm_disable(adev);
|
||||
|
||||
@@ -173,10 +173,9 @@ static int pp_hw_init(struct amdgpu_ip_block *ip_block)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pp_hw_fini(void *handle)
|
||||
static int pp_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = handle;
|
||||
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
|
||||
struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle;
|
||||
|
||||
cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
|
||||
|
||||
|
||||
@@ -2009,9 +2009,9 @@ static int smu_reset_mp1_state(struct smu_context *smu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_hw_fini(void *handle)
|
||||
static int smu_hw_fini(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
struct smu_context *smu = adev->powerplay.pp_handle;
|
||||
int ret;
|
||||
|
||||
@@ -2060,7 +2060,7 @@ static int smu_reset(struct smu_context *smu)
|
||||
if (!ip_block)
|
||||
return -EINVAL;
|
||||
|
||||
ret = smu_hw_fini(adev);
|
||||
ret = smu_hw_fini(ip_block);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user