mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 02:01:18 -04:00
io_uring: fix iowq_limits data race in tctx node addition
__io_uring_add_tctx_node() reads ctx->int_flags and
ctx->iowq_limits[0..1] without holding ctx->uring_lock, while
io_register_iowq_max_workers() writes these same fields under the lock.
Mostly an application problem if you try and make these race, but let's
silence KCSAN by just grabbing the ->uring_lock around the operation.
This is a slow path operation anyway, and ->uring_lock will be grabbed
by submission right after anyway.
Fixes: 2e480058dd ("io-wq: provide a way to limit max number of workers")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
@@ -146,9 +146,13 @@ int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
|
||||
if (IS_ERR(tctx))
|
||||
return PTR_ERR(tctx);
|
||||
|
||||
if (ctx->int_flags & IO_RING_F_IOWQ_LIMITS_SET) {
|
||||
unsigned int limits[2] = { ctx->iowq_limits[0],
|
||||
ctx->iowq_limits[1], };
|
||||
if (data_race(ctx->int_flags) & IO_RING_F_IOWQ_LIMITS_SET) {
|
||||
unsigned int limits[2];
|
||||
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
limits[0] = ctx->iowq_limits[0];
|
||||
limits[1] = ctx->iowq_limits[1];
|
||||
mutex_unlock(&ctx->uring_lock);
|
||||
|
||||
ret = io_wq_max_workers(tctx->io_wq, limits);
|
||||
if (ret)
|
||||
|
||||
Reference in New Issue
Block a user