drm/amdgpu:remove old sdma reset callback mechanism

This patch removes the deprecated SDMA reset callback mechanism, which was previously used to register pre-reset and post-reset callbacks for SDMA engine resets.
 The callback mechanism has been replaced with a more direct and efficient approach using `stop_queue` and `start_queue` functions in the ring's function table.

The SDMA reset callback mechanism allowed KFD and AMDGPU to register pre-reset and post-reset functions for handling SDMA engine resets.
However, this approach added unnecessary complexity and was no longer needed after the introduction of the `stop_queue` and `start_queue` functions in the ring's function table.

1. **Remove Callback Mechanism**:
   - Removed the `amdgpu_sdma_register_on_reset_callbacks` function and its associated data structures (`sdma_on_reset_funcs`).
   - Removed the callback registration logic from the SDMA v4.4.2 initialization code.

2. **Clean Up Related Code**:
   - Removed the `sdma_v4_4_2_set_engine_reset_funcs` function, which was used to register the callbacks.
   - Removed the `sdma_v4_4_2_engine_reset_funcs` structure, which contained the pre-reset and post-reset callback functions.

Signed-off-by: Jesse Zhang <jesse.zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Jesse.zhang@amd.com
2025-04-11 15:30:23 +08:00
committed by Alex Deucher
parent 3f8b6d8282
commit 2200b41428
3 changed files with 0 additions and 43 deletions

View File

@@ -531,31 +531,6 @@ bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_rin
return false;
}
/**
* amdgpu_sdma_register_on_reset_callbacks - Register SDMA reset callbacks
* @adev: Pointer to the AMDGPU device
* @funcs: Pointer to the callback structure containing pre_reset and post_reset functions
*
* This function allows KFD and AMDGPU to register their own callbacks for handling
* pre-reset and post-reset operations for engine reset. These are needed because engine
* reset will stop all queues on that engine.
*/
void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct sdma_on_reset_funcs *funcs)
{
if (!funcs)
return;
/* Ensure the reset_callback_list is initialized */
if (!adev->sdma.reset_callback_list.next) {
INIT_LIST_HEAD(&adev->sdma.reset_callback_list);
}
/* Initialize the list node in the callback structure */
INIT_LIST_HEAD(&funcs->list);
/* Add the callback structure to the global list */
list_add_tail(&funcs->list, &adev->sdma.reset_callback_list);
}
static int amdgpu_sdma_soft_reset(struct amdgpu_device *adev, u32 instance_id)
{
struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];

View File

@@ -109,13 +109,6 @@ struct amdgpu_sdma_ras {
struct amdgpu_ras_block_object ras_block;
};
struct sdma_on_reset_funcs {
int (*pre_reset)(struct amdgpu_device *adev, uint32_t instance_id);
int (*post_reset)(struct amdgpu_device *adev, uint32_t instance_id);
/* Linked list node to store this structure in a list; */
struct list_head list;
};
struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
struct amdgpu_irq_src trap_irq;
@@ -178,7 +171,6 @@ struct amdgpu_buffer_funcs {
uint32_t byte_count);
};
void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct sdma_on_reset_funcs *funcs);
int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id);
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t))

View File

@@ -106,7 +106,6 @@ static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_engine_reset_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev);
static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring);
static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring);
@@ -1358,7 +1357,6 @@ static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
sdma_v4_4_2_set_vm_pte_funcs(adev);
sdma_v4_4_2_set_irq_funcs(adev);
sdma_v4_4_2_set_ras_funcs(adev);
sdma_v4_4_2_set_engine_reset_funcs(adev);
return 0;
}
@@ -1747,14 +1745,6 @@ static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring)
return sdma_v4_4_2_inst_start(adev, inst_mask, true);
}
static struct sdma_on_reset_funcs sdma_v4_4_2_engine_reset_funcs = {
};
static void sdma_v4_4_2_set_engine_reset_funcs(struct amdgpu_device *adev)
{
amdgpu_sdma_register_on_reset_callbacks(adev, &sdma_v4_4_2_engine_reset_funcs);
}
static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,