io_uring/kbuf: move locking into io_kbuf_drop()

Move the burden of locking out of the caller into io_kbuf_drop(), that
will help with furher refactoring.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/530f0cf1f06963029399f819a9a58b1a34bebef3.1738724373.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov
2025-02-05 11:36:44 +00:00
committed by Jens Axboe
parent 9afe6847cf
commit dd4fbb11e7
2 changed files with 3 additions and 6 deletions

View File

@@ -398,11 +398,8 @@ static bool req_need_defer(struct io_kiocb *req, u32 seq)
static void io_clean_op(struct io_kiocb *req)
{
if (req->flags & REQ_F_BUFFER_SELECTED) {
spin_lock(&req->ctx->completion_lock);
if (unlikely(req->flags & REQ_F_BUFFER_SELECTED))
io_kbuf_drop(req);
spin_unlock(&req->ctx->completion_lock);
}
if (req->flags & REQ_F_NEED_CLEANUP) {
const struct io_cold_def *def = &io_cold_defs[req->opcode];

View File

@@ -174,13 +174,13 @@ static inline void __io_put_kbuf_list(struct io_kiocb *req, int len,
static inline void io_kbuf_drop(struct io_kiocb *req)
{
lockdep_assert_held(&req->ctx->completion_lock);
if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
return;
spin_lock(&req->ctx->completion_lock);
/* len == 0 is fine here, non-ring will always drop all of it */
__io_put_kbuf_list(req, 0, &req->ctx->io_buffers_comp);
spin_unlock(&req->ctx->completion_lock);
}
static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int len,