mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-15 21:21:49 -04:00
Merge tag 'nvme-7.1-2026-05-14' of git://git.infradead.org/nvme into block-7.1
Pull NVMe fixes from Keith: "- Fix memory leak on a passthrough integrity mapping failure (Keith) - Hide secrets behind debug option (Hannes) - Fix pci use-after-free for host memory buffer (Chia-Lin Kao) - Fix tcp taregt use-after-free for data digest (Sagi) - Revert a mistaken quirk (Alan Cui) - Fix uevent and controller state race condition (Maurizio) - Fix apple submission queue re-initialization (Nick Chan)" * tag 'nvme-7.1-2026-05-14' of git://git.infradead.org/nvme: nvme-apple: Reset q->sq_tail during queue init nvme: fix race condition between connected uevent and STARTED_ONCE flag Revert "nvme: add quirk NVME_QUIRK_IGNORE_DEV_SUBNQN for 144d:a808" nvmet-tcp: Fix potential UAF when ddgst mismatch nvme-pci: fix use-after-free in nvme_free_host_mem() nvmet-auth: Do not print DH-HMAC-CHAP secrets nvme: fix bio leak on mapping failure nvme: make prp passthrough usage less scary
This commit is contained in:
@@ -1009,6 +1009,7 @@ static void apple_nvme_init_queue(struct apple_nvme_queue *q)
|
||||
unsigned int depth = apple_nvme_queue_depth(q);
|
||||
struct apple_nvme *anv = queue_to_apple_nvme(q);
|
||||
|
||||
q->sq_tail = 0;
|
||||
q->cq_head = 0;
|
||||
q->cq_phase = 1;
|
||||
if (anv->hw->has_lsq_nvmmu)
|
||||
|
||||
@@ -3749,6 +3749,10 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended)
|
||||
ret = nvme_hwmon_init(ctrl);
|
||||
if (ret == -EINTR)
|
||||
return ret;
|
||||
|
||||
if (!nvme_ctrl_sgl_supported(ctrl))
|
||||
dev_info(ctrl->device,
|
||||
"passthrough uses implicit buffer lengths\n");
|
||||
}
|
||||
|
||||
clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags);
|
||||
@@ -5041,8 +5045,8 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
|
||||
nvme_mpath_update(ctrl);
|
||||
}
|
||||
|
||||
nvme_change_uevent(ctrl, "NVME_EVENT=connected");
|
||||
set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags);
|
||||
nvme_change_uevent(ctrl, "NVME_EVENT=connected");
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
|
||||
|
||||
|
||||
@@ -120,21 +120,11 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
|
||||
struct nvme_ns *ns = q->queuedata;
|
||||
struct block_device *bdev = ns ? ns->disk->part0 : NULL;
|
||||
bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk);
|
||||
struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
|
||||
bool has_metadata = meta_buffer && meta_len;
|
||||
struct bio *bio = NULL;
|
||||
int ret;
|
||||
|
||||
if (!nvme_ctrl_sgl_supported(ctrl))
|
||||
dev_warn_once(ctrl->device, "using unchecked data buffer\n");
|
||||
if (has_metadata) {
|
||||
if (!supports_metadata)
|
||||
return -EINVAL;
|
||||
|
||||
if (!nvme_ctrl_meta_sgl_supported(ctrl))
|
||||
dev_warn_once(ctrl->device,
|
||||
"using unchecked metadata buffer\n");
|
||||
}
|
||||
if (has_metadata && !supports_metadata)
|
||||
return -EINVAL;
|
||||
|
||||
if (iter)
|
||||
ret = blk_rq_map_user_iov(q, req, NULL, iter, GFP_KERNEL);
|
||||
@@ -154,8 +144,8 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
|
||||
return ret;
|
||||
|
||||
out_unmap:
|
||||
if (bio)
|
||||
blk_rq_unmap_user(bio);
|
||||
if (req->bio)
|
||||
blk_rq_unmap_user(req->bio);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -2533,11 +2533,13 @@ static void nvme_free_host_mem_multi(struct nvme_dev *dev)
|
||||
|
||||
static void nvme_free_host_mem(struct nvme_dev *dev)
|
||||
{
|
||||
if (dev->hmb_sgt)
|
||||
if (dev->hmb_sgt) {
|
||||
dma_free_noncontiguous(dev->dev, dev->host_mem_size,
|
||||
dev->hmb_sgt, DMA_BIDIRECTIONAL);
|
||||
else
|
||||
dev->hmb_sgt = NULL;
|
||||
} else {
|
||||
nvme_free_host_mem_multi(dev);
|
||||
}
|
||||
|
||||
dma_free_coherent(dev->dev, dev->host_mem_descs_size,
|
||||
dev->host_mem_descs, dev->host_mem_descs_dma);
|
||||
@@ -4107,8 +4109,6 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
|
||||
{ PCI_DEVICE(0x1c5f, 0x0555), /* Memblaze Pblaze5 adapter */
|
||||
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
|
||||
{ PCI_DEVICE(0x144d, 0xa808), /* Samsung PM981/983 */
|
||||
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
{ PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
|
||||
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
|
||||
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
|
||||
|
||||
@@ -117,6 +117,15 @@ config NVME_TARGET_AUTH
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config NVME_TARGET_AUTH_DEBUG
|
||||
bool "NVMe over Fabrics In-band Authentication debug messages"
|
||||
depends on NVME_TARGET_AUTH
|
||||
help
|
||||
This enables additional debug messages including the generated
|
||||
DH-HMAC-CHAP secrets to help debugging authentication failures.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config NVME_TARGET_PCI_EPF
|
||||
tristate "NVMe PCI Endpoint Function target support"
|
||||
depends on NVME_TARGET && PCI_ENDPOINT
|
||||
|
||||
@@ -144,7 +144,6 @@ u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, bool reset)
|
||||
goto out_unlock;
|
||||
|
||||
list_for_each_entry(p, &ctrl->subsys->hosts, entry) {
|
||||
pr_debug("check %s\n", nvmet_host_name(p->host));
|
||||
if (strcmp(nvmet_host_name(p->host), ctrl->hostnqn))
|
||||
continue;
|
||||
host = p->host;
|
||||
@@ -189,11 +188,12 @@ u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, bool reset)
|
||||
ctrl->host_key = NULL;
|
||||
goto out_free_hash;
|
||||
}
|
||||
#ifdef CONFIG_NVME_TARGET_AUTH_DEBUG
|
||||
pr_debug("%s: using hash %s key %*ph\n", __func__,
|
||||
ctrl->host_key->hash > 0 ?
|
||||
nvme_auth_hmac_name(ctrl->host_key->hash) : "none",
|
||||
(int)ctrl->host_key->len, ctrl->host_key->key);
|
||||
|
||||
#endif
|
||||
nvme_auth_free_key(ctrl->ctrl_key);
|
||||
if (!host->dhchap_ctrl_secret) {
|
||||
ctrl->ctrl_key = NULL;
|
||||
@@ -207,11 +207,12 @@ u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, bool reset)
|
||||
ctrl->ctrl_key = NULL;
|
||||
goto out_free_hash;
|
||||
}
|
||||
#ifdef CONFIG_NVME_TARGET_AUTH_DEBUG
|
||||
pr_debug("%s: using ctrl hash %s key %*ph\n", __func__,
|
||||
ctrl->ctrl_key->hash > 0 ?
|
||||
nvme_auth_hmac_name(ctrl->ctrl_key->hash) : "none",
|
||||
(int)ctrl->ctrl_key->len, ctrl->ctrl_key->key);
|
||||
|
||||
#endif
|
||||
out_free_hash:
|
||||
if (ret) {
|
||||
if (ctrl->host_key) {
|
||||
@@ -317,7 +318,6 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
|
||||
if (ret)
|
||||
goto out_free_challenge;
|
||||
}
|
||||
|
||||
pr_debug("ctrl %d qid %d host response seq %u transaction %d\n",
|
||||
ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
|
||||
req->sq->dhchap_tid);
|
||||
@@ -434,8 +434,10 @@ int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
memcpy(buf, ctrl->dh_key, buf_size);
|
||||
#ifdef CONFIG_NVME_TARGET_AUTH_DEBUG
|
||||
pr_debug("%s: ctrl %d public key %*ph\n", __func__,
|
||||
ctrl->cntlid, (int)buf_size, buf);
|
||||
#endif
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -458,11 +460,12 @@ int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
|
||||
ctrl->shash_id);
|
||||
if (ret)
|
||||
pr_debug("failed to compute session key, err %d\n", ret);
|
||||
#ifdef CONFIG_NVME_TARGET_AUTH_DEBUG
|
||||
else
|
||||
pr_debug("%s: session key %*ph\n", __func__,
|
||||
(int)req->sq->dhchap_skey_len,
|
||||
req->sq->dhchap_skey);
|
||||
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -1321,8 +1321,10 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
|
||||
queue->idx, cmd->req.cmd->common.command_id,
|
||||
queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
|
||||
le32_to_cpu(cmd->exp_ddgst));
|
||||
if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED))
|
||||
if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED)) {
|
||||
cmd->req.cqe->status = NVME_SC_CMD_SEQ_ERROR;
|
||||
nvmet_req_uninit(&cmd->req);
|
||||
}
|
||||
nvmet_tcp_free_cmd_buffers(cmd);
|
||||
ret = -EPROTO;
|
||||
goto out;
|
||||
|
||||
Reference in New Issue
Block a user