RDMA/irdma: Remove unused CQ registry

The CQ registry was never actually used (ceq->reg_cq was always NULL),
so remove the dead code.

Signed-off-by: Jacob Moroni <jmoroni@google.com>
Link: https://patch.msgid.link/20251105162841.31786-1-jmoroni@google.com
Acked-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
This commit is contained in:
Jacob Moroni
2025-11-05 16:28:41 +00:00
committed by Leon Romanovsky
parent 6e79e21005
commit 5dd68a5914
3 changed files with 3 additions and 122 deletions

View File

@@ -2943,8 +2943,6 @@ static int irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
__le64 *wqe;
struct irdma_sc_cqp *cqp;
u64 hdr;
struct irdma_sc_ceq *ceq;
int ret_code = 0;
cqp = cq->dev->cqp;
if (cq->cq_uk.cq_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt)
@@ -2953,19 +2951,9 @@ static int irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
if (cq->ceq_id >= cq->dev->hmc_fpm_misc.max_ceqs)
return -EINVAL;
ceq = cq->dev->ceq[cq->ceq_id];
if (ceq && ceq->reg_cq)
ret_code = irdma_sc_add_cq_ctx(ceq, cq);
if (ret_code)
return ret_code;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe) {
if (ceq && ceq->reg_cq)
irdma_sc_remove_cq_ctx(ceq, cq);
if (!wqe)
return -ENOMEM;
}
set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
@@ -3018,17 +3006,12 @@ int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq)
struct irdma_sc_cqp *cqp;
__le64 *wqe;
u64 hdr;
struct irdma_sc_ceq *ceq;
cqp = cq->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOMEM;
ceq = cq->dev->ceq[cq->ceq_id];
if (ceq && ceq->reg_cq)
irdma_sc_remove_cq_ctx(ceq, cq);
set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
set_64bit_val(wqe, 40, cq->shadow_area_pa);
@@ -3601,71 +3584,6 @@ static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
return 0;
}
/**
* irdma_sc_find_reg_cq - find cq ctx index
* @ceq: ceq sc structure
* @cq: cq sc structure
*/
static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq,
struct irdma_sc_cq *cq)
{
u32 i;
for (i = 0; i < ceq->reg_cq_size; i++) {
if (cq == ceq->reg_cq[i])
return i;
}
return IRDMA_INVALID_CQ_IDX;
}
/**
* irdma_sc_add_cq_ctx - add cq ctx tracking for ceq
* @ceq: ceq sc structure
* @cq: cq sc structure
*/
int irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
{
unsigned long flags;
spin_lock_irqsave(&ceq->req_cq_lock, flags);
if (ceq->reg_cq_size == ceq->elem_cnt) {
spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
return -ENOMEM;
}
ceq->reg_cq[ceq->reg_cq_size++] = cq;
spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
return 0;
}
/**
* irdma_sc_remove_cq_ctx - remove cq ctx tracking for ceq
* @ceq: ceq sc structure
* @cq: cq sc structure
*/
void irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
{
unsigned long flags;
u32 cq_ctx_idx;
spin_lock_irqsave(&ceq->req_cq_lock, flags);
cq_ctx_idx = irdma_sc_find_reg_cq(ceq, cq);
if (cq_ctx_idx == IRDMA_INVALID_CQ_IDX)
goto exit;
ceq->reg_cq_size--;
if (cq_ctx_idx != ceq->reg_cq_size)
ceq->reg_cq[cq_ctx_idx] = ceq->reg_cq[ceq->reg_cq_size];
ceq->reg_cq[ceq->reg_cq_size] = NULL;
exit:
spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
}
/**
* irdma_sc_cqp_init - Initialize buffers for a control Queue Pair
* @cqp: IWARP control queue pair pointer
@@ -4387,9 +4305,6 @@ int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
ceq->ceq_elem_pa = info->ceqe_pa;
ceq->virtual_map = info->virtual_map;
ceq->itr_no_expire = info->itr_no_expire;
ceq->reg_cq = info->reg_cq;
ceq->reg_cq_size = 0;
spin_lock_init(&ceq->req_cq_lock);
ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
@@ -4472,9 +4387,6 @@ int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq)
{
struct irdma_sc_cqp *cqp;
if (ceq->reg_cq)
irdma_sc_remove_cq_ctx(ceq, ceq->dev->ccq);
cqp = ceq->dev->cqp;
cqp->process_cqp_sds = irdma_update_sds_noccq;
@@ -4493,11 +4405,6 @@ int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
struct irdma_sc_dev *dev = ceq->dev;
dev->ccq->vsi_idx = ceq->vsi_idx;
if (ceq->reg_cq) {
ret_code = irdma_sc_add_cq_ctx(ceq, ceq->dev->ccq);
if (ret_code)
return ret_code;
}
ret_code = irdma_sc_ceq_create(ceq, scratch, true);
if (!ret_code)
@@ -4562,7 +4469,6 @@ void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq)
struct irdma_sc_cq *temp_cq;
u8 polarity;
u32 cq_idx;
unsigned long flags;
do {
cq_idx = 0;
@@ -4583,11 +4489,6 @@ void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq)
}
cq = temp_cq;
if (ceq->reg_cq) {
spin_lock_irqsave(&ceq->req_cq_lock, flags);
cq_idx = irdma_sc_find_reg_cq(ceq, cq);
spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
}
IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))

View File

@@ -726,7 +726,6 @@ static int irdma_puda_cq_wqe(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
struct irdma_sc_cqp *cqp;
u64 hdr;
struct irdma_ccq_cqe_info compl_info;
int status = 0;
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);
@@ -756,16 +755,8 @@ static int irdma_puda_cq_wqe(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
print_hex_dump_debug("PUDA: PUDA CREATE CQ", DUMP_PREFIX_OFFSET, 16,
8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
irdma_sc_cqp_post_sq(dev->cqp);
status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_CQ,
&compl_info);
if (!status) {
struct irdma_sc_ceq *ceq = dev->ceq[0];
if (ceq && ceq->reg_cq)
status = irdma_sc_add_cq_ctx(ceq, cq);
}
return status;
return irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_CQ,
&compl_info);
}
/**
@@ -897,23 +888,17 @@ void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
struct irdma_puda_buf *buf = NULL;
struct irdma_puda_buf *nextbuf = NULL;
struct irdma_virt_mem *vmem;
struct irdma_sc_ceq *ceq;
ceq = vsi->dev->ceq[0];
switch (type) {
case IRDMA_PUDA_RSRC_TYPE_ILQ:
rsrc = vsi->ilq;
vmem = &vsi->ilq_mem;
vsi->ilq = NULL;
if (ceq && ceq->reg_cq)
irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
break;
case IRDMA_PUDA_RSRC_TYPE_IEQ:
rsrc = vsi->ieq;
vmem = &vsi->ieq_mem;
vsi->ieq = NULL;
if (ceq && ceq->reg_cq)
irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
break;
default:
ibdev_dbg(to_ibdev(dev), "PUDA: error resource type = 0x%x\n",

View File

@@ -492,9 +492,6 @@ struct irdma_sc_ceq {
u32 first_pm_pbl_idx;
u8 polarity;
u16 vsi_idx;
struct irdma_sc_cq **reg_cq;
u32 reg_cq_size;
spinlock_t req_cq_lock; /* protect access to reg_cq array */
bool virtual_map:1;
bool tph_en:1;
bool itr_no_expire:1;
@@ -894,8 +891,6 @@ struct irdma_ceq_init_info {
u8 tph_val;
u16 vsi_idx;
u32 first_pm_pbl_idx;
struct irdma_sc_cq **reg_cq;
u32 reg_cq_idx;
};
struct irdma_aeq_init_info {