mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 13:30:45 -05:00
Pull more block updates from Jens Axboe:
- NVMe pull request via Christoph:
- TCP error handling fix (Shin'ichiro Kawasaki)
- TCP I/O stall handling fixes (Hannes Reinecke)
- fix command limits status code (Keith Busch)
- support vectored buffers also for passthrough (Pavel Begunkov)
- spelling fixes (Yi Zhang)
- MD pull request via Yu:
- fix REQ_RAHEAD and REQ_NOWAIT IO err handling for raid1/10
- fix max_write_behind setting for dm-raid
- some minor cleanups
- Integrity data direction fix and cleanup
- bcache NULL pointer fix
- Fix for loop missing write start/end handling
- Decouple hardware queues and IO threads in ublk
- Slew of ublk selftests additions and updates
* tag 'block-6.16-20250606' of git://git.kernel.dk/linux: (29 commits)
nvme: spelling fixes
nvme-tcp: fix I/O stalls on congested sockets
nvme-tcp: sanitize request list handling
nvme-tcp: remove tag set when second admin queue config fails
nvme: enable vectored registered bufs for passthrough cmds
nvme: fix implicit bool to flags conversion
nvme: fix command limits status code
selftests: ublk: kublk: improve behavior on init failure
block: flip iter directions in blk_rq_integrity_map_user()
block: drop direction param from bio_integrity_copy_user()
selftests: ublk: cover PER_IO_DAEMON in more stress tests
Documentation: ublk: document UBLK_F_PER_IO_DAEMON
selftests: ublk: add stress test for per io daemons
selftests: ublk: add functional test for per io daemons
selftests: ublk: kublk: decouple ublk_queues from ublk server threads
selftests: ublk: kublk: move per-thread data out of ublk_queue
selftests: ublk: kublk: lift queue initialization out of thread
selftests: ublk: kublk: tie sqe allocation to io instead of queue
selftests: ublk: kublk: plumb q_id in io_uring user_data
ublk: have a per-io daemon instead of a per-queue daemon
...
137 lines
3.9 KiB
Plaintext
137 lines
3.9 KiB
Plaintext
# SPDX-License-Identifier: GPL-2.0-only
|
|
config NVME_CORE
|
|
tristate
|
|
|
|
config BLK_DEV_NVME
|
|
tristate "NVM Express block device"
|
|
depends on PCI && BLOCK
|
|
select NVME_CORE
|
|
help
|
|
The NVM Express driver is for solid state drives directly
|
|
connected to the PCI or PCI Express bus. If you know you
|
|
don't have one of these, it is safe to answer N.
|
|
|
|
To compile this driver as a module, choose M here: the
|
|
module will be called nvme.
|
|
|
|
config NVME_MULTIPATH
|
|
bool "NVMe multipath support"
|
|
depends on NVME_CORE
|
|
help
|
|
This option controls support for multipath access to NVMe
|
|
subsystems. If this option is enabled support for NVMe multipath
|
|
access is included in the kernel. If this option is disabled support
|
|
for NVMe multipath access is excluded from the kernel. When this
|
|
option is disabled each controller/namespace receives its
|
|
own /dev/nvmeXnY device entry and NVMe multipath access is
|
|
not supported.
|
|
|
|
If unsure, say Y.
|
|
|
|
config NVME_VERBOSE_ERRORS
|
|
bool "NVMe verbose error reporting"
|
|
depends on NVME_CORE
|
|
help
|
|
This option enables verbose reporting for NVMe errors. The
|
|
error translation table will grow the kernel image size by
|
|
about 4 KB.
|
|
|
|
config NVME_HWMON
|
|
bool "NVMe hardware monitoring"
|
|
depends on (NVME_CORE=y && HWMON=y) || (NVME_CORE=m && HWMON)
|
|
help
|
|
This provides support for NVMe hardware monitoring. If enabled,
|
|
a hardware monitoring device will be created for each NVMe drive
|
|
in the system.
|
|
|
|
config NVME_FABRICS
|
|
select NVME_CORE
|
|
select NVME_KEYRING if NVME_TCP_TLS
|
|
tristate
|
|
|
|
config NVME_RDMA
|
|
tristate "NVM Express over Fabrics RDMA host driver"
|
|
depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK
|
|
select NVME_FABRICS
|
|
select SG_POOL
|
|
help
|
|
This provides support for the NVMe over Fabrics protocol using
|
|
the RDMA (Infiniband, RoCE, iWarp) transport. This allows you
|
|
to use remote block devices exported using the NVMe protocol set.
|
|
|
|
To configure a NVMe over Fabrics controller use the nvme-cli tool
|
|
from https://github.com/linux-nvme/nvme-cli.
|
|
|
|
If unsure, say N.
|
|
|
|
config NVME_FC
|
|
tristate "NVM Express over Fabrics FC host driver"
|
|
depends on BLOCK
|
|
depends on HAS_DMA
|
|
select NVME_FABRICS
|
|
select SG_POOL
|
|
help
|
|
This provides support for the NVMe over Fabrics protocol using
|
|
the FC transport. This allows you to use remote block devices
|
|
exported using the NVMe protocol set.
|
|
|
|
To configure a NVMe over Fabrics controller use the nvme-cli tool
|
|
from https://github.com/linux-nvme/nvme-cli.
|
|
|
|
If unsure, say N.
|
|
|
|
config NVME_TCP
|
|
tristate "NVM Express over Fabrics TCP host driver"
|
|
depends on INET
|
|
depends on BLOCK
|
|
select CRC32
|
|
select NET_CRC32C
|
|
select NVME_FABRICS
|
|
help
|
|
This provides support for the NVMe over Fabrics protocol using
|
|
the TCP transport. This allows you to use remote block devices
|
|
exported using the NVMe protocol set.
|
|
|
|
To configure a NVMe over Fabrics controller use the nvme-cli tool
|
|
from https://github.com/linux-nvme/nvme-cli.
|
|
|
|
If unsure, say N.
|
|
|
|
config NVME_TCP_TLS
|
|
bool "NVMe over Fabrics TCP TLS encryption support"
|
|
depends on NVME_TCP
|
|
select NET_HANDSHAKE
|
|
select KEYS
|
|
select TLS
|
|
help
|
|
Enables TLS encryption for NVMe TCP using the netlink handshake API.
|
|
|
|
The TLS handshake daemon is available at
|
|
https://github.com/oracle/ktls-utils.
|
|
|
|
If unsure, say N.
|
|
|
|
config NVME_HOST_AUTH
|
|
bool "NVMe over Fabrics In-Band Authentication in host side"
|
|
depends on NVME_CORE
|
|
select NVME_AUTH
|
|
select NVME_KEYRING
|
|
help
|
|
This provides support for NVMe over Fabrics In-Band Authentication in
|
|
host side.
|
|
|
|
If unsure, say N.
|
|
|
|
config NVME_APPLE
|
|
tristate "Apple ANS2 NVM Express host driver"
|
|
depends on OF && BLOCK
|
|
depends on APPLE_RTKIT && APPLE_SART
|
|
depends on ARCH_APPLE || COMPILE_TEST
|
|
select NVME_CORE
|
|
help
|
|
This provides support for the NVMe controller embedded in Apple SoCs
|
|
such as the M1.
|
|
|
|
To compile this driver as a module, choose M here: the
|
|
module will be called nvme-apple.
|