drm/amdgpu/uq: remove gfx11 specifics from UQ setup

This can all be handled by in the IP specific mpd init
code.

V2: Removed setting of gds_va, which was removed during UAPI
    review (Shashank)

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Shashank Sharma <shashank.sharma@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Alex Deucher
2024-10-21 15:16:12 +02:00
parent 21926b5db8
commit b965c5d871

View File

@@ -23,8 +23,6 @@
*/
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "v11_structs.h"
#include "mes_v11_0.h"
#include "mes_v11_0_userqueue.h"
#include "amdgpu_userq_fence.h"
@@ -183,52 +181,6 @@ static int mes_v11_0_userq_create_ctx_space(struct amdgpu_userq_mgr *uq_mgr,
return r;
}
/* Shadow, GDS and CSA objects come directly from userspace */
if (mqd_user->ip_type == AMDGPU_HW_IP_GFX) {
struct v11_gfx_mqd *mqd = queue->mqd.cpu_ptr;
struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11;
if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) {
DRM_ERROR("Invalid GFX MQD\n");
return -EINVAL;
}
mqd_gfx_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
if (IS_ERR(mqd_gfx_v11)) {
DRM_ERROR("Failed to read user MQD\n");
amdgpu_userqueue_destroy_object(uq_mgr, ctx);
return -ENOMEM;
}
mqd->shadow_base_lo = mqd_gfx_v11->shadow_va & 0xFFFFFFFC;
mqd->shadow_base_hi = upper_32_bits(mqd_gfx_v11->shadow_va);
mqd->gds_bkup_base_lo = 0;
mqd->gds_bkup_base_hi = 0;
mqd->fw_work_area_base_lo = mqd_gfx_v11->csa_va & 0xFFFFFFFC;
mqd->fw_work_area_base_hi = upper_32_bits(mqd_gfx_v11->csa_va);
kfree(mqd_gfx_v11);
} else if (mqd_user->ip_type == AMDGPU_HW_IP_DMA) {
struct v11_sdma_mqd *mqd = queue->mqd.cpu_ptr;
struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
if (mqd_user->mqd_size != sizeof(*mqd_sdma_v11) || !mqd_user->mqd) {
DRM_ERROR("Invalid SDMA MQD\n");
return -EINVAL;
}
mqd_sdma_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
if (IS_ERR(mqd_sdma_v11)) {
DRM_ERROR("Failed to read sdma user MQD\n");
amdgpu_userqueue_destroy_object(uq_mgr, ctx);
return -ENOMEM;
}
mqd->sdmax_rlcx_csa_addr_lo = mqd_sdma_v11->csa_va & 0xFFFFFFFC;
mqd->sdmax_rlcx_csa_addr_hi = upper_32_bits(mqd_sdma_v11->csa_va);
}
return 0;
}
@@ -300,6 +252,41 @@ static int mes_v11_0_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
userq_props->hqd_active = false;
kfree(compute_mqd);
} else if (queue->queue_type == AMDGPU_HW_IP_GFX) {
struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11;
if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) {
DRM_ERROR("Invalid GFX MQD\n");
return -EINVAL;
}
mqd_gfx_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
if (IS_ERR(mqd_gfx_v11)) {
DRM_ERROR("Failed to read user MQD\n");
amdgpu_userqueue_destroy_object(uq_mgr, ctx);
return -ENOMEM;
}
userq_props->shadow_addr = mqd_gfx_v11->shadow_va;
userq_props->csa_addr = mqd_gfx_v11->csa_va;
kfree(mqd_gfx_v11);
} else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
if (mqd_user->mqd_size != sizeof(*mqd_sdma_v11) || !mqd_user->mqd) {
DRM_ERROR("Invalid SDMA MQD\n");
return -EINVAL;
}
mqd_sdma_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
if (IS_ERR(mqd_sdma_v11)) {
DRM_ERROR("Failed to read sdma user MQD\n");
amdgpu_userqueue_destroy_object(uq_mgr, ctx);
return -ENOMEM;
}
userq_props->csa_addr = mqd_sdma_v11->csa_va;
kfree(mqd_sdma_v11);
}
queue->userq_prop = userq_props;