mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-24 10:10:20 -04:00
RDMA/rxe: Remove __rxe_do_task()
The subroutine __rxe_do_task is not thread safe and it has no way to guarantee that the tasks, which are designed with the assumption that they are non-reentrant, are not reentered. All of its uses are non-performance critical. This patch replaces calls to __rxe_do_task with calls to rxe_sched_task. It also removes irrelevant or unneeded if tests. Instead of calling the task machinery a single call to the tasklet function (rxe_requester, etc.) is sufficient to draing the queues if task execution has been disabled or stopped. Together these changes allow the removal of __rxe_do_task. Link: https://lore.kernel.org/r/20230304174533.11296-7-rpearsonhpe@gmail.com Signed-off-by: Ian Ziemba <ian.ziemba@hpe.com> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
committed by
Jason Gunthorpe
parent
a246aa2e8a
commit
960ebe97e5
@@ -473,29 +473,23 @@ static void rxe_qp_reset(struct rxe_qp *qp)
|
||||
{
|
||||
/* stop tasks from running */
|
||||
rxe_disable_task(&qp->resp.task);
|
||||
|
||||
/* stop request/comp */
|
||||
if (qp->sq.queue) {
|
||||
if (qp_type(qp) == IB_QPT_RC)
|
||||
rxe_disable_task(&qp->comp.task);
|
||||
rxe_disable_task(&qp->req.task);
|
||||
}
|
||||
rxe_disable_task(&qp->comp.task);
|
||||
rxe_disable_task(&qp->req.task);
|
||||
|
||||
/* move qp to the reset state */
|
||||
qp->req.state = QP_STATE_RESET;
|
||||
qp->comp.state = QP_STATE_RESET;
|
||||
qp->resp.state = QP_STATE_RESET;
|
||||
|
||||
/* let state machines reset themselves drain work and packet queues
|
||||
* etc.
|
||||
*/
|
||||
__rxe_do_task(&qp->resp.task);
|
||||
/* drain work and packet queuesc */
|
||||
rxe_requester(qp);
|
||||
rxe_completer(qp);
|
||||
rxe_responder(qp);
|
||||
|
||||
if (qp->sq.queue) {
|
||||
__rxe_do_task(&qp->comp.task);
|
||||
__rxe_do_task(&qp->req.task);
|
||||
if (qp->rq.queue)
|
||||
rxe_queue_reset(qp->rq.queue);
|
||||
if (qp->sq.queue)
|
||||
rxe_queue_reset(qp->sq.queue);
|
||||
}
|
||||
|
||||
/* cleanup attributes */
|
||||
atomic_set(&qp->ssn, 0);
|
||||
@@ -518,13 +512,8 @@ static void rxe_qp_reset(struct rxe_qp *qp)
|
||||
|
||||
/* reenable tasks */
|
||||
rxe_enable_task(&qp->resp.task);
|
||||
|
||||
if (qp->sq.queue) {
|
||||
if (qp_type(qp) == IB_QPT_RC)
|
||||
rxe_enable_task(&qp->comp.task);
|
||||
|
||||
rxe_enable_task(&qp->req.task);
|
||||
}
|
||||
rxe_enable_task(&qp->comp.task);
|
||||
rxe_enable_task(&qp->req.task);
|
||||
}
|
||||
|
||||
/* drain the send queue */
|
||||
@@ -533,10 +522,7 @@ static void rxe_qp_drain(struct rxe_qp *qp)
|
||||
if (qp->sq.queue) {
|
||||
if (qp->req.state != QP_STATE_DRAINED) {
|
||||
qp->req.state = QP_STATE_DRAIN;
|
||||
if (qp_type(qp) == IB_QPT_RC)
|
||||
rxe_sched_task(&qp->comp.task);
|
||||
else
|
||||
__rxe_do_task(&qp->comp.task);
|
||||
rxe_sched_task(&qp->comp.task);
|
||||
rxe_sched_task(&qp->req.task);
|
||||
}
|
||||
}
|
||||
@@ -552,11 +538,7 @@ void rxe_qp_error(struct rxe_qp *qp)
|
||||
|
||||
/* drain work and packet queues */
|
||||
rxe_sched_task(&qp->resp.task);
|
||||
|
||||
if (qp_type(qp) == IB_QPT_RC)
|
||||
rxe_sched_task(&qp->comp.task);
|
||||
else
|
||||
__rxe_do_task(&qp->comp.task);
|
||||
rxe_sched_task(&qp->comp.task);
|
||||
rxe_sched_task(&qp->req.task);
|
||||
}
|
||||
|
||||
@@ -773,24 +755,20 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
|
||||
|
||||
qp->valid = 0;
|
||||
qp->qp_timeout_jiffies = 0;
|
||||
rxe_cleanup_task(&qp->resp.task);
|
||||
|
||||
if (qp_type(qp) == IB_QPT_RC) {
|
||||
del_timer_sync(&qp->retrans_timer);
|
||||
del_timer_sync(&qp->rnr_nak_timer);
|
||||
}
|
||||
|
||||
rxe_cleanup_task(&qp->resp.task);
|
||||
rxe_cleanup_task(&qp->req.task);
|
||||
rxe_cleanup_task(&qp->comp.task);
|
||||
|
||||
/* flush out any receive wr's or pending requests */
|
||||
if (qp->req.task.func)
|
||||
__rxe_do_task(&qp->req.task);
|
||||
|
||||
if (qp->sq.queue) {
|
||||
__rxe_do_task(&qp->comp.task);
|
||||
__rxe_do_task(&qp->req.task);
|
||||
}
|
||||
rxe_requester(qp);
|
||||
rxe_completer(qp);
|
||||
rxe_responder(qp);
|
||||
|
||||
if (qp->sq.queue)
|
||||
rxe_queue_cleanup(qp->sq.queue);
|
||||
|
||||
@@ -6,19 +6,6 @@
|
||||
|
||||
#include "rxe.h"
|
||||
|
||||
int __rxe_do_task(struct rxe_task *task)
|
||||
|
||||
{
|
||||
int ret;
|
||||
|
||||
while ((ret = task->func(task->qp)) == 0)
|
||||
;
|
||||
|
||||
task->ret = ret;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* this locking is due to a potential race where
|
||||
* a second caller finds the task already running
|
||||
|
||||
@@ -39,12 +39,6 @@ int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
|
||||
/* cleanup task */
|
||||
void rxe_cleanup_task(struct rxe_task *task);
|
||||
|
||||
/*
|
||||
* raw call to func in loop without any checking
|
||||
* can call when tasklets are disabled
|
||||
*/
|
||||
int __rxe_do_task(struct rxe_task *task);
|
||||
|
||||
void rxe_run_task(struct rxe_task *task);
|
||||
|
||||
void rxe_sched_task(struct rxe_task *task);
|
||||
|
||||
Reference in New Issue
Block a user