mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-06 00:47:56 -04:00
nvmet-fc: avoid deadlock on delete association path
When deleting an association the shutdown path is deadlocking because we try to flush the nvmet_wq nested. Avoid this by deadlock by deferring the put work into its own work item. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Daniel Wagner <dwagner@suse.de> Signed-off-by: Keith Busch <kbusch@kernel.org>
This commit is contained in:
committed by
Keith Busch
parent
3146345c2e
commit
710c69dbac
@@ -111,6 +111,8 @@ struct nvmet_fc_tgtport {
|
|||||||
struct nvmet_fc_port_entry *pe;
|
struct nvmet_fc_port_entry *pe;
|
||||||
struct kref ref;
|
struct kref ref;
|
||||||
u32 max_sg_cnt;
|
u32 max_sg_cnt;
|
||||||
|
|
||||||
|
struct work_struct put_work;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nvmet_fc_port_entry {
|
struct nvmet_fc_port_entry {
|
||||||
@@ -247,6 +249,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
|
|||||||
static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
|
static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
|
||||||
static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
|
static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
|
||||||
static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
|
static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
|
||||||
|
static void nvmet_fc_put_tgtport_work(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct nvmet_fc_tgtport *tgtport =
|
||||||
|
container_of(work, struct nvmet_fc_tgtport, put_work);
|
||||||
|
|
||||||
|
nvmet_fc_tgtport_put(tgtport);
|
||||||
|
}
|
||||||
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
|
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
|
||||||
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
|
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
|
||||||
struct nvmet_fc_fcp_iod *fod);
|
struct nvmet_fc_fcp_iod *fod);
|
||||||
@@ -358,7 +367,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
|
|||||||
|
|
||||||
if (!lsop->req_queued) {
|
if (!lsop->req_queued) {
|
||||||
spin_unlock_irqrestore(&tgtport->lock, flags);
|
spin_unlock_irqrestore(&tgtport->lock, flags);
|
||||||
goto out_puttgtport;
|
goto out_putwork;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_del(&lsop->lsreq_list);
|
list_del(&lsop->lsreq_list);
|
||||||
@@ -371,8 +380,8 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
|
|||||||
(lsreq->rqstlen + lsreq->rsplen),
|
(lsreq->rqstlen + lsreq->rsplen),
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
out_puttgtport:
|
out_putwork:
|
||||||
nvmet_fc_tgtport_put(tgtport);
|
queue_work(nvmet_wq, &tgtport->put_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@@ -1396,6 +1405,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
|
|||||||
kref_init(&newrec->ref);
|
kref_init(&newrec->ref);
|
||||||
ida_init(&newrec->assoc_cnt);
|
ida_init(&newrec->assoc_cnt);
|
||||||
newrec->max_sg_cnt = template->max_sgl_segments;
|
newrec->max_sg_cnt = template->max_sgl_segments;
|
||||||
|
INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work);
|
||||||
|
|
||||||
ret = nvmet_fc_alloc_ls_iodlist(newrec);
|
ret = nvmet_fc_alloc_ls_iodlist(newrec);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
|||||||
Reference in New Issue
Block a user