mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-10 15:13:44 -04:00
drm/amdgpu/sdma_v4: Register the new sdma function pointers
Register stop/start/soft_reset queue functions for sdma v4_4_2. Suggested-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Jesse Zhang <jesse.zhang@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
committed by
Alex Deucher
parent
2989184215
commit
ce1d40196d
@@ -108,6 +108,8 @@ static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev);
|
||||
static void sdma_v4_4_2_set_engine_reset_funcs(struct amdgpu_device *adev);
|
||||
static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev);
|
||||
static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring);
|
||||
static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring);
|
||||
|
||||
static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev,
|
||||
u32 instance, u32 offset)
|
||||
@@ -1333,6 +1335,11 @@ static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
static const struct amdgpu_sdma_funcs sdma_v4_4_2_sdma_funcs = {
|
||||
.stop_kernel_queue = &sdma_v4_4_2_stop_queue,
|
||||
.start_kernel_queue = &sdma_v4_4_2_restore_queue,
|
||||
};
|
||||
|
||||
static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
@@ -1352,7 +1359,6 @@ static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
|
||||
sdma_v4_4_2_set_irq_funcs(adev);
|
||||
sdma_v4_4_2_set_ras_funcs(adev);
|
||||
sdma_v4_4_2_set_engine_reset_funcs(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1447,6 +1453,7 @@ static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
/* Initialize guilty flags for GFX and PAGE queues */
|
||||
adev->sdma.instance[i].gfx_guilty = false;
|
||||
adev->sdma.instance[i].page_guilty = false;
|
||||
adev->sdma.instance[i].funcs = &sdma_v4_4_2_sdma_funcs;
|
||||
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
ring->ring_obj = NULL;
|
||||
@@ -1678,11 +1685,12 @@ static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
|
||||
return r;
|
||||
}
|
||||
|
||||
static int sdma_v4_4_2_stop_queue(struct amdgpu_device *adev, uint32_t instance_id)
|
||||
static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
u32 instance_id = GET_INST(SDMA0, ring->me);
|
||||
u32 inst_mask;
|
||||
uint64_t rptr;
|
||||
struct amdgpu_ring *ring = &adev->sdma.instance[instance_id].ring;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return -EINVAL;
|
||||
@@ -1715,11 +1723,11 @@ static int sdma_v4_4_2_stop_queue(struct amdgpu_device *adev, uint32_t instance_
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sdma_v4_4_2_restore_queue(struct amdgpu_device *adev, uint32_t instance_id)
|
||||
static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring)
|
||||
{
|
||||
int i;
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
u32 inst_mask;
|
||||
struct amdgpu_ring *ring = &adev->sdma.instance[instance_id].ring;
|
||||
int i;
|
||||
|
||||
inst_mask = 1 << ring->me;
|
||||
udelay(50);
|
||||
@@ -1740,8 +1748,6 @@ static int sdma_v4_4_2_restore_queue(struct amdgpu_device *adev, uint32_t instan
|
||||
}
|
||||
|
||||
static struct sdma_on_reset_funcs sdma_v4_4_2_engine_reset_funcs = {
|
||||
.pre_reset = sdma_v4_4_2_stop_queue,
|
||||
.post_reset = sdma_v4_4_2_restore_queue,
|
||||
};
|
||||
|
||||
static void sdma_v4_4_2_set_engine_reset_funcs(struct amdgpu_device *adev)
|
||||
|
||||
Reference in New Issue
Block a user