Merge tag 'io_uring-6.17-20250808' of git://git.kernel.dk/linux

Pull io_uring fixes from Jens Axboe:

- Allow vectorized payloads for send/send-zc - like sendmsg, but
without the hassle of a msghdr.

- Fix for an integer wrap that should go to stable, spotted by syzbot.
Nothing alarming here, as you need to be root to hit this.
Nevertheless, it should get fixed.

FWIW, kudos to the syzbot crew for having much nicer reproducers now,
and with nicely annotated source code as well. This is particularly
useful as syzbot uses the raw interface rather than liburing,
historically it's been difficult to turn a syzbot reproducer into a
meaningful test case. With the recent changes, not true anymore!

* tag 'io_uring-6.17-20250808' of git://git.kernel.dk/linux:
io_uring/memmap: cast nr_pages to size_t before shifting
io_uring/net: Allow to do vectorized send

+12 -3
+4
include/uapi/linux/io_uring.h
··· 392 * the starting buffer ID in cqe->flags as per 393 * usual for provided buffer usage. The buffers 394 * will be contiguous from the starting buffer ID. 395 */ 396 #define IORING_RECVSEND_POLL_FIRST (1U << 0) 397 #define IORING_RECV_MULTISHOT (1U << 1) 398 #define IORING_RECVSEND_FIXED_BUF (1U << 2) 399 #define IORING_SEND_ZC_REPORT_USAGE (1U << 3) 400 #define IORING_RECVSEND_BUNDLE (1U << 4) 401 402 /* 403 * cqe.res for IORING_CQE_F_NOTIF if
··· 392 * the starting buffer ID in cqe->flags as per 393 * usual for provided buffer usage. The buffers 394 * will be contiguous from the starting buffer ID. 395 + * 396 + * IORING_SEND_VECTORIZED If set, SEND[_ZC] will take a pointer to a io_vec 397 + * to allow vectorized send operations. 398 */ 399 #define IORING_RECVSEND_POLL_FIRST (1U << 0) 400 #define IORING_RECV_MULTISHOT (1U << 1) 401 #define IORING_RECVSEND_FIXED_BUF (1U << 2) 402 #define IORING_SEND_ZC_REPORT_USAGE (1U << 3) 403 #define IORING_RECVSEND_BUNDLE (1U << 4) 404 + #define IORING_SEND_VECTORIZED (1U << 5) 405 406 /* 407 * cqe.res for IORING_CQE_F_NOTIF if
+1 -1
io_uring/memmap.c
··· 156 unsigned long mmap_offset) 157 { 158 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN; 159 - unsigned long size = mr->nr_pages << PAGE_SHIFT; 160 unsigned long nr_allocated; 161 struct page **pages; 162 void *p;
··· 156 unsigned long mmap_offset) 157 { 158 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN; 159 + size_t size = (size_t) mr->nr_pages << PAGE_SHIFT; 160 unsigned long nr_allocated; 161 struct page **pages; 162 void *p;
+7 -2
io_uring/net.c
··· 382 } 383 if (req->flags & REQ_F_BUFFER_SELECT) 384 return 0; 385 return import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter); 386 } 387 ··· 413 return io_net_import_vec(req, kmsg, msg.msg_iov, msg.msg_iovlen, ITER_SOURCE); 414 } 415 416 - #define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE) 417 418 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 419 { ··· 1322 } 1323 1324 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF) 1325 - #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE) 1326 1327 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1328 {
··· 382 } 383 if (req->flags & REQ_F_BUFFER_SELECT) 384 return 0; 385 + 386 + if (sr->flags & IORING_SEND_VECTORIZED) 387 + return io_net_import_vec(req, kmsg, sr->buf, sr->len, ITER_SOURCE); 388 + 389 return import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter); 390 } 391 ··· 409 return io_net_import_vec(req, kmsg, msg.msg_iov, msg.msg_iovlen, ITER_SOURCE); 410 } 411 412 + #define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE | IORING_SEND_VECTORIZED) 413 414 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 415 { ··· 1318 } 1319 1320 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF) 1321 + #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE | \ 1322 + IORING_SEND_VECTORIZED) 1323 1324 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1325 {