Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'io_uring-6.15-20250410' of git://git.kernel.dk/linux

Pull io_uring fixes from Jens Axboe:

- Reject zero sized legacy provided buffers upfront. No ill side
effects from this one, only really done to shut up a silly syzbot
test case.

- Fix for a regression in tag posting for registered files or buffers,
where the tag would be posted even when the registration failed.

- two minor zcrx cleanups for code added this merge window.

* tag 'io_uring-6.15-20250410' of git://git.kernel.dk/linux:
io_uring/kbuf: reject zero sized provided buffers
io_uring/zcrx: separate niov number from pages
io_uring/zcrx: put refill data into separate cache line
io_uring: don't post tag CQEs on file/buffer registration failure

+31 -12
+2
io_uring/kbuf.c
··· 504 504 p->nbufs = tmp; 505 505 p->addr = READ_ONCE(sqe->addr); 506 506 p->len = READ_ONCE(sqe->len); 507 + if (!p->len) 508 + return -EINVAL; 507 509 508 510 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs, 509 511 &size))
+16 -1
io_uring/rsrc.c
··· 175 175 io_alloc_cache_free(&ctx->imu_cache, kfree); 176 176 } 177 177 178 + static void io_clear_table_tags(struct io_rsrc_data *data) 179 + { 180 + int i; 181 + 182 + for (i = 0; i < data->nr; i++) { 183 + struct io_rsrc_node *node = data->nodes[i]; 184 + 185 + if (node) 186 + node->tag = 0; 187 + } 188 + } 189 + 178 190 __cold void io_rsrc_data_free(struct io_ring_ctx *ctx, 179 191 struct io_rsrc_data *data) 180 192 { ··· 595 583 io_file_table_set_alloc_range(ctx, 0, ctx->file_table.data.nr); 596 584 return 0; 597 585 fail: 586 + io_clear_table_tags(&ctx->file_table.data); 598 587 io_sqe_files_unregister(ctx); 599 588 return ret; 600 589 } ··· 915 902 } 916 903 917 904 ctx->buf_table = data; 918 - if (ret) 905 + if (ret) { 906 + io_clear_table_tags(&ctx->buf_table); 919 907 io_sqe_buffers_unregister(ctx); 908 + } 920 909 return ret; 921 910 } 922 911
+10 -9
io_uring/zcrx.c
··· 181 181 kvfree(area->nia.niovs); 182 182 kvfree(area->user_refs); 183 183 if (area->pages) { 184 - unpin_user_pages(area->pages, area->nia.num_niovs); 184 + unpin_user_pages(area->pages, area->nr_folios); 185 185 kvfree(area->pages); 186 186 } 187 187 kfree(area); ··· 192 192 struct io_uring_zcrx_area_reg *area_reg) 193 193 { 194 194 struct io_zcrx_area *area; 195 - int i, ret, nr_pages; 195 + int i, ret, nr_pages, nr_iovs; 196 196 struct iovec iov; 197 197 198 198 if (area_reg->flags || area_reg->rq_area_token) ··· 220 220 area->pages = NULL; 221 221 goto err; 222 222 } 223 - area->nia.num_niovs = nr_pages; 223 + area->nr_folios = nr_iovs = nr_pages; 224 + area->nia.num_niovs = nr_iovs; 224 225 225 - area->nia.niovs = kvmalloc_array(nr_pages, sizeof(area->nia.niovs[0]), 226 + area->nia.niovs = kvmalloc_array(nr_iovs, sizeof(area->nia.niovs[0]), 226 227 GFP_KERNEL | __GFP_ZERO); 227 228 if (!area->nia.niovs) 228 229 goto err; 229 230 230 - area->freelist = kvmalloc_array(nr_pages, sizeof(area->freelist[0]), 231 + area->freelist = kvmalloc_array(nr_iovs, sizeof(area->freelist[0]), 231 232 GFP_KERNEL | __GFP_ZERO); 232 233 if (!area->freelist) 233 234 goto err; 234 235 235 - for (i = 0; i < nr_pages; i++) 236 + for (i = 0; i < nr_iovs; i++) 236 237 area->freelist[i] = i; 237 238 238 - area->user_refs = kvmalloc_array(nr_pages, sizeof(area->user_refs[0]), 239 + area->user_refs = kvmalloc_array(nr_iovs, sizeof(area->user_refs[0]), 239 240 GFP_KERNEL | __GFP_ZERO); 240 241 if (!area->user_refs) 241 242 goto err; 242 243 243 - for (i = 0; i < nr_pages; i++) { 244 + for (i = 0; i < nr_iovs; i++) { 244 245 struct net_iov *niov = &area->nia.niovs[i]; 245 246 246 247 niov->owner = &area->nia; ··· 249 248 atomic_set(&area->user_refs[i], 0); 250 249 } 251 250 252 - area->free_count = nr_pages; 251 + area->free_count = nr_iovs; 253 252 area->ifq = ifq; 254 253 /* we're only supporting one area per ifq for now */ 255 254 area->area_id = 0;
+3 -2
io_uring/zcrx.h
··· 15 15 bool is_mapped; 16 16 u16 area_id; 17 17 struct page **pages; 18 + unsigned long nr_folios; 18 19 19 20 /* freelist */ 20 21 spinlock_t freelist_lock ____cacheline_aligned_in_smp; ··· 27 26 struct io_ring_ctx *ctx; 28 27 struct io_zcrx_area *area; 29 28 29 + spinlock_t rq_lock ____cacheline_aligned_in_smp; 30 30 struct io_uring *rq_ring; 31 31 struct io_uring_zcrx_rqe *rqes; 32 - u32 rq_entries; 33 32 u32 cached_rq_head; 34 - spinlock_t rq_lock; 33 + u32 rq_entries; 35 34 36 35 u32 if_rxq; 37 36 struct device *dev;