mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 00:51:51 -04:00
io_uring/rw: add defensive hardening for negative kbuf lengths
No real bug here, just being a bit defensive in ensuring that whatever gets passed into io_put_kbuf() is always >= 0 and not some random error value. Reviewed-by: Gabriel Krisman Bertazi <krisman@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
@@ -580,7 +580,7 @@ void io_req_rw_complete(struct io_tw_req tw_req, io_tw_token_t tw)
|
||||
io_req_io_end(req);
|
||||
|
||||
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))
|
||||
req->cqe.flags |= io_put_kbuf(req, req->cqe.res, NULL);
|
||||
req->cqe.flags |= io_put_kbuf(req, max(req->cqe.res, 0), NULL);
|
||||
|
||||
io_req_rw_cleanup(req, 0);
|
||||
io_req_task_complete(tw_req, tw);
|
||||
@@ -1379,7 +1379,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
|
||||
list_del(&req->iopoll_node);
|
||||
wq_list_add_tail(&req->comp_list, &ctx->submit_state.compl_reqs);
|
||||
nr_events++;
|
||||
req->cqe.flags = io_put_kbuf(req, req->cqe.res, NULL);
|
||||
req->cqe.flags = io_put_kbuf(req, max(req->cqe.res, 0), NULL);
|
||||
if (!io_is_uring_cmd(req))
|
||||
io_req_rw_cleanup(req, 0);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user