mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-04 01:20:01 -04:00
Merge tag 'io_uring-6.15-20250410' of git://git.kernel.dk/linux
Pull io_uring fixes from Jens Axboe: - Reject zero sized legacy provided buffers upfront. No ill side effects from this one, only really done to shut up a silly syzbot test case. - Fix for a regression in tag posting for registered files or buffers, where the tag would be posted even when the registration failed. - two minor zcrx cleanups for code added this merge window. * tag 'io_uring-6.15-20250410' of git://git.kernel.dk/linux: io_uring/kbuf: reject zero sized provided buffers io_uring/zcrx: separate niov number from pages io_uring/zcrx: put refill data into separate cache line io_uring: don't post tag CQEs on file/buffer registration failure
This commit is contained in:
@@ -504,6 +504,8 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
|
||||
p->nbufs = tmp;
|
||||
p->addr = READ_ONCE(sqe->addr);
|
||||
p->len = READ_ONCE(sqe->len);
|
||||
if (!p->len)
|
||||
return -EINVAL;
|
||||
|
||||
if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
|
||||
&size))
|
||||
|
||||
@@ -175,6 +175,18 @@ void io_rsrc_cache_free(struct io_ring_ctx *ctx)
|
||||
io_alloc_cache_free(&ctx->imu_cache, kfree);
|
||||
}
|
||||
|
||||
static void io_clear_table_tags(struct io_rsrc_data *data)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < data->nr; i++) {
|
||||
struct io_rsrc_node *node = data->nodes[i];
|
||||
|
||||
if (node)
|
||||
node->tag = 0;
|
||||
}
|
||||
}
|
||||
|
||||
__cold void io_rsrc_data_free(struct io_ring_ctx *ctx,
|
||||
struct io_rsrc_data *data)
|
||||
{
|
||||
@@ -583,6 +595,7 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
||||
io_file_table_set_alloc_range(ctx, 0, ctx->file_table.data.nr);
|
||||
return 0;
|
||||
fail:
|
||||
io_clear_table_tags(&ctx->file_table.data);
|
||||
io_sqe_files_unregister(ctx);
|
||||
return ret;
|
||||
}
|
||||
@@ -902,8 +915,10 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
|
||||
}
|
||||
|
||||
ctx->buf_table = data;
|
||||
if (ret)
|
||||
if (ret) {
|
||||
io_clear_table_tags(&ctx->buf_table);
|
||||
io_sqe_buffers_unregister(ctx);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -181,7 +181,7 @@ static void io_zcrx_free_area(struct io_zcrx_area *area)
|
||||
kvfree(area->nia.niovs);
|
||||
kvfree(area->user_refs);
|
||||
if (area->pages) {
|
||||
unpin_user_pages(area->pages, area->nia.num_niovs);
|
||||
unpin_user_pages(area->pages, area->nr_folios);
|
||||
kvfree(area->pages);
|
||||
}
|
||||
kfree(area);
|
||||
@@ -192,7 +192,7 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
|
||||
struct io_uring_zcrx_area_reg *area_reg)
|
||||
{
|
||||
struct io_zcrx_area *area;
|
||||
int i, ret, nr_pages;
|
||||
int i, ret, nr_pages, nr_iovs;
|
||||
struct iovec iov;
|
||||
|
||||
if (area_reg->flags || area_reg->rq_area_token)
|
||||
@@ -220,27 +220,28 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
|
||||
area->pages = NULL;
|
||||
goto err;
|
||||
}
|
||||
area->nia.num_niovs = nr_pages;
|
||||
area->nr_folios = nr_iovs = nr_pages;
|
||||
area->nia.num_niovs = nr_iovs;
|
||||
|
||||
area->nia.niovs = kvmalloc_array(nr_pages, sizeof(area->nia.niovs[0]),
|
||||
area->nia.niovs = kvmalloc_array(nr_iovs, sizeof(area->nia.niovs[0]),
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (!area->nia.niovs)
|
||||
goto err;
|
||||
|
||||
area->freelist = kvmalloc_array(nr_pages, sizeof(area->freelist[0]),
|
||||
area->freelist = kvmalloc_array(nr_iovs, sizeof(area->freelist[0]),
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (!area->freelist)
|
||||
goto err;
|
||||
|
||||
for (i = 0; i < nr_pages; i++)
|
||||
for (i = 0; i < nr_iovs; i++)
|
||||
area->freelist[i] = i;
|
||||
|
||||
area->user_refs = kvmalloc_array(nr_pages, sizeof(area->user_refs[0]),
|
||||
area->user_refs = kvmalloc_array(nr_iovs, sizeof(area->user_refs[0]),
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (!area->user_refs)
|
||||
goto err;
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
for (i = 0; i < nr_iovs; i++) {
|
||||
struct net_iov *niov = &area->nia.niovs[i];
|
||||
|
||||
niov->owner = &area->nia;
|
||||
@@ -248,7 +249,7 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
|
||||
atomic_set(&area->user_refs[i], 0);
|
||||
}
|
||||
|
||||
area->free_count = nr_pages;
|
||||
area->free_count = nr_iovs;
|
||||
area->ifq = ifq;
|
||||
/* we're only supporting one area per ifq for now */
|
||||
area->area_id = 0;
|
||||
|
||||
@@ -15,6 +15,7 @@ struct io_zcrx_area {
|
||||
bool is_mapped;
|
||||
u16 area_id;
|
||||
struct page **pages;
|
||||
unsigned long nr_folios;
|
||||
|
||||
/* freelist */
|
||||
spinlock_t freelist_lock ____cacheline_aligned_in_smp;
|
||||
@@ -26,11 +27,11 @@ struct io_zcrx_ifq {
|
||||
struct io_ring_ctx *ctx;
|
||||
struct io_zcrx_area *area;
|
||||
|
||||
spinlock_t rq_lock ____cacheline_aligned_in_smp;
|
||||
struct io_uring *rq_ring;
|
||||
struct io_uring_zcrx_rqe *rqes;
|
||||
u32 rq_entries;
|
||||
u32 cached_rq_head;
|
||||
spinlock_t rq_lock;
|
||||
u32 rq_entries;
|
||||
|
||||
u32 if_rxq;
|
||||
struct device *dev;
|
||||
|
||||
Reference in New Issue
Block a user