selftests: ublk: add single sqe allocator helper

Unify the sqe allocator helper, and we will use it for supporting
more cases, such as ublk stripe, in which variable sqe allocation
is required.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20250322093218.431419-3-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Ming Lei
2025-03-22 17:32:10 +08:00
committed by Jens Axboe
parent 723977cab4
commit f2639ed11e
3 changed files with 44 additions and 52 deletions

View File

@@ -69,44 +69,42 @@ static int loop_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_de
{
int zc = ublk_queue_use_zc(q);
enum io_uring_op op = ublk_to_uring_op(iod, zc);
struct io_uring_sqe *reg;
struct io_uring_sqe *rw;
struct io_uring_sqe *ureg;
struct io_uring_sqe *sqe[3];
if (!zc) {
rw = ublk_queue_alloc_sqe(q);
if (!rw)
ublk_queue_alloc_sqes(q, sqe, 1);
if (!sqe[0])
return -ENOMEM;
io_uring_prep_rw(op, rw, 1 /*fds[1]*/,
io_uring_prep_rw(op, sqe[0], 1 /*fds[1]*/,
(void *)iod->addr,
iod->nr_sectors << 9,
iod->start_sector << 9);
io_uring_sqe_set_flags(rw, IOSQE_FIXED_FILE);
io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
q->io_inflight++;
/* bit63 marks us as tgt io */
rw->user_data = build_user_data(tag, op, UBLK_IO_TGT_NORMAL, 1);
sqe[0]->user_data = build_user_data(tag, op, UBLK_IO_TGT_NORMAL, 1);
return 0;
}
ublk_queue_alloc_sqe3(q, &reg, &rw, &ureg);
ublk_queue_alloc_sqes(q, sqe, 3);
io_uring_prep_buf_register(reg, 0, tag, q->q_id, tag);
reg->user_data = build_user_data(tag, 0xfe, 1, 1);
reg->flags |= IOSQE_CQE_SKIP_SUCCESS;
reg->flags |= IOSQE_IO_LINK;
io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag);
sqe[0]->user_data = build_user_data(tag, 0xfe, 1, 1);
sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS;
sqe[0]->flags |= IOSQE_IO_LINK;
io_uring_prep_rw(op, rw, 1 /*fds[1]*/, 0,
io_uring_prep_rw(op, sqe[1], 1 /*fds[1]*/, 0,
iod->nr_sectors << 9,
iod->start_sector << 9);
rw->buf_index = tag;
rw->flags |= IOSQE_FIXED_FILE;
rw->flags |= IOSQE_IO_LINK;
rw->user_data = build_user_data(tag, op, UBLK_IO_TGT_ZC_OP, 1);
sqe[1]->buf_index = tag;
sqe[1]->flags |= IOSQE_FIXED_FILE;
sqe[1]->flags |= IOSQE_IO_LINK;
sqe[1]->user_data = build_user_data(tag, op, UBLK_IO_TGT_ZC_OP, 1);
q->io_inflight++;
io_uring_prep_buf_unregister(ureg, 0, tag, q->q_id, tag);
ureg->user_data = build_user_data(tag, 0xff, UBLK_IO_TGT_ZC_BUF, 1);
io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, tag);
sqe[2]->user_data = build_user_data(tag, 0xff, UBLK_IO_TGT_ZC_BUF, 1);
q->io_inflight++;
return 0;
@@ -116,17 +114,17 @@ static int loop_queue_tgt_io(struct ublk_queue *q, int tag)
{
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
unsigned ublk_op = ublksrv_get_op(iod);
struct io_uring_sqe *sqe;
struct io_uring_sqe *sqe[1];
switch (ublk_op) {
case UBLK_IO_OP_FLUSH:
sqe = ublk_queue_alloc_sqe(q);
if (!sqe)
ublk_queue_alloc_sqes(q, sqe, 1);
if (!sqe[0])
return -ENOMEM;
io_uring_prep_fsync(sqe, 1 /*fds[1]*/, IORING_FSYNC_DATASYNC);
io_uring_sqe_set_flags(sqe, IOSQE_FIXED_FILE);
io_uring_prep_fsync(sqe[0], 1 /*fds[1]*/, IORING_FSYNC_DATASYNC);
io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
q->io_inflight++;
sqe->user_data = build_user_data(tag, ublk_op, UBLK_IO_TGT_NORMAL, 1);
sqe[0]->user_data = build_user_data(tag, ublk_op, UBLK_IO_TGT_NORMAL, 1);
break;
case UBLK_IO_OP_WRITE_ZEROES:
case UBLK_IO_OP_DISCARD:

View File

@@ -420,7 +420,7 @@ static void ublk_dev_unprep(struct ublk_dev *dev)
int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag)
{
struct ublksrv_io_cmd *cmd;
struct io_uring_sqe *sqe;
struct io_uring_sqe *sqe[1];
unsigned int cmd_op = 0;
__u64 user_data;
@@ -441,24 +441,24 @@ int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag)
if (io_uring_sq_space_left(&q->ring) < 1)
io_uring_submit(&q->ring);
sqe = ublk_queue_alloc_sqe(q);
if (!sqe) {
ublk_queue_alloc_sqes(q, sqe, 1);
if (!sqe[0]) {
ublk_err("%s: run out of sqe %d, tag %d\n",
__func__, q->q_id, tag);
return -1;
}
cmd = (struct ublksrv_io_cmd *)ublk_get_sqe_cmd(sqe);
cmd = (struct ublksrv_io_cmd *)ublk_get_sqe_cmd(sqe[0]);
if (cmd_op == UBLK_U_IO_COMMIT_AND_FETCH_REQ)
cmd->result = io->result;
/* These fields should be written once, never change */
ublk_set_sqe_cmd_op(sqe, cmd_op);
sqe->fd = 0; /* dev->fds[0] */
sqe->opcode = IORING_OP_URING_CMD;
sqe->flags = IOSQE_FIXED_FILE;
sqe->rw_flags = 0;
ublk_set_sqe_cmd_op(sqe[0], cmd_op);
sqe[0]->fd = 0; /* dev->fds[0] */
sqe[0]->opcode = IORING_OP_URING_CMD;
sqe[0]->flags = IOSQE_FIXED_FILE;
sqe[0]->rw_flags = 0;
cmd->tag = tag;
cmd->q_id = q->q_id;
if (!(q->state & UBLKSRV_NO_BUF))
@@ -467,7 +467,7 @@ int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag)
cmd->addr = 0;
user_data = build_user_data(tag, _IOC_NR(cmd_op), 0, 0);
io_uring_sqe_set_data64(sqe, user_data);
io_uring_sqe_set_data64(sqe[0], user_data);
io->flags = 0;

View File

@@ -221,28 +221,22 @@ static inline void ublk_dbg(int level, const char *fmt, ...)
}
}
static inline struct io_uring_sqe *ublk_queue_alloc_sqe(struct ublk_queue *q)
static inline int ublk_queue_alloc_sqes(struct ublk_queue *q,
struct io_uring_sqe *sqes[], int nr_sqes)
{
unsigned left = io_uring_sq_space_left(&q->ring);
int i;
if (left < 1)
if (left < nr_sqes)
io_uring_submit(&q->ring);
return io_uring_get_sqe(&q->ring);
}
static inline void ublk_queue_alloc_sqe3(struct ublk_queue *q,
struct io_uring_sqe **sqe1, struct io_uring_sqe **sqe2,
struct io_uring_sqe **sqe3)
{
struct io_uring *r = &q->ring;
unsigned left = io_uring_sq_space_left(r);
for (i = 0; i < nr_sqes; i++) {
sqes[i] = io_uring_get_sqe(&q->ring);
if (!sqes[i])
return i;
}
if (left < 3)
io_uring_submit(r);
*sqe1 = io_uring_get_sqe(r);
*sqe2 = io_uring_get_sqe(r);
*sqe3 = io_uring_get_sqe(r);
return nr_sqes;
}
static inline void io_uring_prep_buf_register(struct io_uring_sqe *sqe,