mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 12:21:22 -05:00
Merge tag 'nvme-6.18-2025-09-23' of git://git.infradead.org/nvme into for-6.18/block
Pull NVMe updates from Keith: " - FC target fixes (Daniel) - Authentication fixes and updates (Martin, Chris) - Admin controller handling (Kamaljit) - Target lockdep assertions (Max) - Keep-alive updates for discovery (Alastair) - Suspend quirk (Georg)" * tag 'nvme-6.18-2025-09-23' of git://git.infradead.org/nvme: nvme: Use non zero KATO for persistent discovery connections nvmet: add safety check for subsys lock nvme-core: use nvme_is_io_ctrl() for I/O controller check nvme-core: do ioccsz/iorcsz validation only for I/O controllers nvme-core: add method to check for an I/O controller nvme-pci: Add TUXEDO IBS Gen8 to Samsung sleep quirk nvme-auth: use hkdf_expand_label() nvme-auth: add hkdf_expand_label() nvme-tcp: send only permitted commands for secure concat nvme-fc: use lock accessing port_state and rport state nvmet-fcloop: call done callback even when remote port is gone nvmet-fc: avoid scheduling association deletion twice nvmet-fc: move lsop put work to nvmet_fc_ls_req_op nvme-auth: update bi_directional flag
This commit is contained in:
@@ -683,6 +683,59 @@ int nvme_auth_generate_digest(u8 hmac_id, u8 *psk, size_t psk_len,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_auth_generate_digest);
|
||||
|
||||
/**
|
||||
* hkdf_expand_label - HKDF-Expand-Label (RFC 8846 section 7.1)
|
||||
* @hmac_tfm: hash context keyed with pseudorandom key
|
||||
* @label: ASCII label without "tls13 " prefix
|
||||
* @labellen: length of @label
|
||||
* @context: context bytes
|
||||
* @contextlen: length of @context
|
||||
* @okm: output keying material
|
||||
* @okmlen: length of @okm
|
||||
*
|
||||
* Build the TLS 1.3 HkdfLabel structure and invoke hkdf_expand().
|
||||
*
|
||||
* Returns 0 on success with output keying material stored in @okm,
|
||||
* or a negative errno value otherwise.
|
||||
*/
|
||||
static int hkdf_expand_label(struct crypto_shash *hmac_tfm,
|
||||
const u8 *label, unsigned int labellen,
|
||||
const u8 *context, unsigned int contextlen,
|
||||
u8 *okm, unsigned int okmlen)
|
||||
{
|
||||
int err;
|
||||
u8 *info;
|
||||
unsigned int infolen;
|
||||
const char *tls13_prefix = "tls13 ";
|
||||
unsigned int prefixlen = strlen(tls13_prefix);
|
||||
|
||||
if (WARN_ON(labellen > (255 - prefixlen)))
|
||||
return -EINVAL;
|
||||
if (WARN_ON(contextlen > 255))
|
||||
return -EINVAL;
|
||||
|
||||
infolen = 2 + (1 + prefixlen + labellen) + (1 + contextlen);
|
||||
info = kzalloc(infolen, GFP_KERNEL);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
/* HkdfLabel.Length */
|
||||
put_unaligned_be16(okmlen, info);
|
||||
|
||||
/* HkdfLabel.Label */
|
||||
info[2] = prefixlen + labellen;
|
||||
memcpy(info + 3, tls13_prefix, prefixlen);
|
||||
memcpy(info + 3 + prefixlen, label, labellen);
|
||||
|
||||
/* HkdfLabel.Context */
|
||||
info[3 + prefixlen + labellen] = contextlen;
|
||||
memcpy(info + 4 + prefixlen + labellen, context, contextlen);
|
||||
|
||||
err = hkdf_expand(hmac_tfm, info, infolen, okm, okmlen);
|
||||
kfree_sensitive(info);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* nvme_auth_derive_tls_psk - Derive TLS PSK
|
||||
* @hmac_id: Hash function identifier
|
||||
@@ -715,10 +768,10 @@ int nvme_auth_derive_tls_psk(int hmac_id, u8 *psk, size_t psk_len,
|
||||
{
|
||||
struct crypto_shash *hmac_tfm;
|
||||
const char *hmac_name;
|
||||
const char *psk_prefix = "tls13 nvme-tls-psk";
|
||||
const char *label = "nvme-tls-psk";
|
||||
static const char default_salt[HKDF_MAX_HASHLEN];
|
||||
size_t info_len, prk_len;
|
||||
char *info;
|
||||
size_t prk_len;
|
||||
const char *ctx;
|
||||
unsigned char *prk, *tls_key;
|
||||
int ret;
|
||||
|
||||
@@ -758,36 +811,29 @@ int nvme_auth_derive_tls_psk(int hmac_id, u8 *psk, size_t psk_len,
|
||||
if (ret)
|
||||
goto out_free_prk;
|
||||
|
||||
/*
|
||||
* 2 additional bytes for the length field from HDKF-Expand-Label,
|
||||
* 2 additional bytes for the HMAC ID, and one byte for the space
|
||||
* separator.
|
||||
*/
|
||||
info_len = strlen(psk_digest) + strlen(psk_prefix) + 5;
|
||||
info = kzalloc(info_len + 1, GFP_KERNEL);
|
||||
if (!info) {
|
||||
ctx = kasprintf(GFP_KERNEL, "%02d %s", hmac_id, psk_digest);
|
||||
if (!ctx) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free_prk;
|
||||
}
|
||||
|
||||
put_unaligned_be16(psk_len, info);
|
||||
memcpy(info + 2, psk_prefix, strlen(psk_prefix));
|
||||
sprintf(info + 2 + strlen(psk_prefix), "%02d %s", hmac_id, psk_digest);
|
||||
|
||||
tls_key = kzalloc(psk_len, GFP_KERNEL);
|
||||
if (!tls_key) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free_info;
|
||||
goto out_free_ctx;
|
||||
}
|
||||
ret = hkdf_expand(hmac_tfm, info, info_len, tls_key, psk_len);
|
||||
ret = hkdf_expand_label(hmac_tfm,
|
||||
label, strlen(label),
|
||||
ctx, strlen(ctx),
|
||||
tls_key, psk_len);
|
||||
if (ret) {
|
||||
kfree(tls_key);
|
||||
goto out_free_info;
|
||||
goto out_free_ctx;
|
||||
}
|
||||
*ret_psk = tls_key;
|
||||
|
||||
out_free_info:
|
||||
kfree(info);
|
||||
out_free_ctx:
|
||||
kfree(ctx);
|
||||
out_free_prk:
|
||||
kfree(prk);
|
||||
out_free_shash:
|
||||
|
||||
@@ -331,9 +331,10 @@ static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
|
||||
} else {
|
||||
memset(chap->c2, 0, chap->hash_len);
|
||||
}
|
||||
if (ctrl->opts->concat)
|
||||
if (ctrl->opts->concat) {
|
||||
chap->s2 = 0;
|
||||
else
|
||||
chap->bi_directional = false;
|
||||
} else
|
||||
chap->s2 = nvme_auth_get_seqnum();
|
||||
data->seqnum = cpu_to_le32(chap->s2);
|
||||
if (chap->host_key_len) {
|
||||
|
||||
@@ -3163,6 +3163,11 @@ static inline bool nvme_admin_ctrl(struct nvme_ctrl *ctrl)
|
||||
return ctrl->cntrltype == NVME_CTRL_ADMIN;
|
||||
}
|
||||
|
||||
static inline bool nvme_is_io_ctrl(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
return !nvme_discovery_ctrl(ctrl) && !nvme_admin_ctrl(ctrl);
|
||||
}
|
||||
|
||||
static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
|
||||
struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
|
||||
{
|
||||
@@ -3365,7 +3370,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
|
||||
else
|
||||
ctrl->max_zeroes_sectors = 0;
|
||||
|
||||
if (ctrl->subsys->subtype != NVME_NQN_NVME ||
|
||||
if (!nvme_is_io_ctrl(ctrl) ||
|
||||
!nvme_id_cns_ok(ctrl, NVME_ID_CNS_CS_CTRL) ||
|
||||
test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags))
|
||||
return 0;
|
||||
@@ -3487,14 +3492,14 @@ static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ct
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!nvme_discovery_ctrl(ctrl) && ctrl->ioccsz < 4) {
|
||||
if (nvme_is_io_ctrl(ctrl) && ctrl->ioccsz < 4) {
|
||||
dev_err(ctrl->device,
|
||||
"I/O queue command capsule supported size %d < 4\n",
|
||||
ctrl->ioccsz);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!nvme_discovery_ctrl(ctrl) && ctrl->iorcsz < 1) {
|
||||
if (nvme_is_io_ctrl(ctrl) && ctrl->iorcsz < 1) {
|
||||
dev_err(ctrl->device,
|
||||
"I/O queue response capsule supported size %d < 1\n",
|
||||
ctrl->iorcsz);
|
||||
@@ -4986,8 +4991,14 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
|
||||
* checking that they started once before, hence are reconnecting back.
|
||||
*/
|
||||
if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
|
||||
nvme_discovery_ctrl(ctrl))
|
||||
nvme_discovery_ctrl(ctrl)) {
|
||||
if (!ctrl->kato) {
|
||||
nvme_stop_keep_alive(ctrl);
|
||||
ctrl->kato = NVME_DEFAULT_KATO;
|
||||
nvme_start_keep_alive(ctrl);
|
||||
}
|
||||
nvme_change_uevent(ctrl, "NVME_EVENT=rediscover");
|
||||
}
|
||||
|
||||
if (ctrl->queue_count > 1) {
|
||||
nvme_queue_scan(ctrl);
|
||||
|
||||
@@ -3032,11 +3032,17 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
|
||||
|
||||
++ctrl->ctrl.nr_reconnects;
|
||||
|
||||
if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
|
||||
spin_lock_irqsave(&ctrl->rport->lock, flags);
|
||||
if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) {
|
||||
spin_unlock_irqrestore(&ctrl->rport->lock, flags);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (nvme_fc_ctlr_active_on_rport(ctrl))
|
||||
if (nvme_fc_ctlr_active_on_rport(ctrl)) {
|
||||
spin_unlock_irqrestore(&ctrl->rport->lock, flags);
|
||||
return -ENOTUNIQ;
|
||||
}
|
||||
spin_unlock_irqrestore(&ctrl->rport->lock, flags);
|
||||
|
||||
dev_info(ctrl->ctrl.device,
|
||||
"NVME-FC{%d}: create association : host wwpn 0x%016llx "
|
||||
|
||||
@@ -3330,10 +3330,12 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
|
||||
* Exclude Samsung 990 Evo from NVME_QUIRK_SIMPLE_SUSPEND
|
||||
* because of high power consumption (> 2 Watt) in s2idle
|
||||
* sleep. Only some boards with Intel CPU are affected.
|
||||
* (Note for testing: Samsung 990 Evo Plus has same PCI ID)
|
||||
*/
|
||||
if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") ||
|
||||
dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
|
||||
dmi_match(DMI_BOARD_NAME, "GXxMRXx") ||
|
||||
dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") ||
|
||||
dmi_match(DMI_BOARD_NAME, "PH4PG31") ||
|
||||
dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") ||
|
||||
dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71"))
|
||||
|
||||
@@ -2250,6 +2250,9 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
|
||||
if (error)
|
||||
goto out_cleanup_tagset;
|
||||
|
||||
if (ctrl->opts->concat && !ctrl->tls_pskid)
|
||||
return 0;
|
||||
|
||||
error = nvme_enable_ctrl(ctrl);
|
||||
if (error)
|
||||
goto out_stop_queue;
|
||||
|
||||
@@ -513,9 +513,6 @@ static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: ctrl->subsys->lock should be held when calling this function
|
||||
*/
|
||||
static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
|
||||
struct nvmet_ns *ns)
|
||||
{
|
||||
@@ -523,6 +520,8 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
|
||||
struct pci_dev *p2p_dev;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&ctrl->subsys->lock);
|
||||
|
||||
if (!ctrl->p2p_client || !ns->use_p2pmem)
|
||||
return;
|
||||
|
||||
@@ -1539,15 +1538,14 @@ bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: ctrl->subsys->lock should be held when calling this function
|
||||
*/
|
||||
static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
|
||||
struct device *p2p_client)
|
||||
{
|
||||
struct nvmet_ns *ns;
|
||||
unsigned long idx;
|
||||
|
||||
lockdep_assert_held(&ctrl->subsys->lock);
|
||||
|
||||
if (!p2p_client)
|
||||
return;
|
||||
|
||||
@@ -1557,14 +1555,13 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
|
||||
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: ctrl->subsys->lock should be held when calling this function
|
||||
*/
|
||||
static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
|
||||
{
|
||||
struct radix_tree_iter iter;
|
||||
void __rcu **slot;
|
||||
|
||||
lockdep_assert_held(&ctrl->subsys->lock);
|
||||
|
||||
radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
|
||||
pci_dev_put(radix_tree_deref_slot(slot));
|
||||
|
||||
|
||||
@@ -54,6 +54,8 @@ struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */
|
||||
int ls_error;
|
||||
struct list_head lsreq_list; /* tgtport->ls_req_list */
|
||||
bool req_queued;
|
||||
|
||||
struct work_struct put_work;
|
||||
};
|
||||
|
||||
|
||||
@@ -111,8 +113,6 @@ struct nvmet_fc_tgtport {
|
||||
struct nvmet_fc_port_entry *pe;
|
||||
struct kref ref;
|
||||
u32 max_sg_cnt;
|
||||
|
||||
struct work_struct put_work;
|
||||
};
|
||||
|
||||
struct nvmet_fc_port_entry {
|
||||
@@ -235,12 +235,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
|
||||
static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
|
||||
static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
|
||||
static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
|
||||
static void nvmet_fc_put_tgtport_work(struct work_struct *work)
|
||||
static void nvmet_fc_put_lsop_work(struct work_struct *work)
|
||||
{
|
||||
struct nvmet_fc_tgtport *tgtport =
|
||||
container_of(work, struct nvmet_fc_tgtport, put_work);
|
||||
struct nvmet_fc_ls_req_op *lsop =
|
||||
container_of(work, struct nvmet_fc_ls_req_op, put_work);
|
||||
|
||||
nvmet_fc_tgtport_put(tgtport);
|
||||
nvmet_fc_tgtport_put(lsop->tgtport);
|
||||
kfree(lsop);
|
||||
}
|
||||
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
|
||||
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
|
||||
@@ -367,7 +368,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
out_putwork:
|
||||
queue_work(nvmet_wq, &tgtport->put_work);
|
||||
queue_work(nvmet_wq, &lsop->put_work);
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -388,6 +389,7 @@ __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
|
||||
lsreq->done = done;
|
||||
lsop->req_queued = false;
|
||||
INIT_LIST_HEAD(&lsop->lsreq_list);
|
||||
INIT_WORK(&lsop->put_work, nvmet_fc_put_lsop_work);
|
||||
|
||||
lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
|
||||
lsreq->rqstlen + lsreq->rsplen,
|
||||
@@ -447,8 +449,6 @@ nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
|
||||
__nvmet_fc_finish_ls_req(lsop);
|
||||
|
||||
/* fc-nvme target doesn't care about success or failure of cmd */
|
||||
|
||||
kfree(lsop);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1075,6 +1075,14 @@ nvmet_fc_delete_assoc_work(struct work_struct *work)
|
||||
static void
|
||||
nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
|
||||
{
|
||||
int terminating;
|
||||
|
||||
terminating = atomic_xchg(&assoc->terminating, 1);
|
||||
|
||||
/* if already terminating, do nothing */
|
||||
if (terminating)
|
||||
return;
|
||||
|
||||
nvmet_fc_tgtport_get(assoc->tgtport);
|
||||
if (!queue_work(nvmet_wq, &assoc->del_work))
|
||||
nvmet_fc_tgtport_put(assoc->tgtport);
|
||||
@@ -1202,13 +1210,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
|
||||
{
|
||||
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
|
||||
unsigned long flags;
|
||||
int i, terminating;
|
||||
|
||||
terminating = atomic_xchg(&assoc->terminating, 1);
|
||||
|
||||
/* if already terminating, do nothing */
|
||||
if (terminating)
|
||||
return;
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&tgtport->lock, flags);
|
||||
list_del_rcu(&assoc->a_list);
|
||||
@@ -1410,7 +1412,6 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
|
||||
kref_init(&newrec->ref);
|
||||
ida_init(&newrec->assoc_cnt);
|
||||
newrec->max_sg_cnt = template->max_sgl_segments;
|
||||
INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work);
|
||||
|
||||
ret = nvmet_fc_alloc_ls_iodlist(newrec);
|
||||
if (ret) {
|
||||
|
||||
@@ -496,13 +496,15 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
|
||||
if (!targetport) {
|
||||
/*
|
||||
* The target port is gone. The target doesn't expect any
|
||||
* response anymore and the ->done call is not valid
|
||||
* because the resources have been freed by
|
||||
* nvmet_fc_free_pending_reqs.
|
||||
* response anymore and thus lsreq can't be accessed anymore.
|
||||
*
|
||||
* We end up here from delete association exchange:
|
||||
* nvmet_fc_xmt_disconnect_assoc sends an async request.
|
||||
*
|
||||
* Return success because this is what LLDDs do; silently
|
||||
* drop the response.
|
||||
*/
|
||||
lsrsp->done(lsrsp);
|
||||
kmem_cache_free(lsreq_cache, tls_req);
|
||||
return 0;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user