Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/fs.h>
5#include <linux/file.h>
6#include <linux/mm.h>
7#include <linux/slab.h>
8#include <linux/namei.h>
9#include <linux/poll.h>
10#include <linux/vmalloc.h>
11#include <linux/io_uring.h>
12
13#include <uapi/linux/io_uring.h>
14
15#include "io_uring.h"
16#include "opdef.h"
17#include "kbuf.h"
18#include "memmap.h"
19
20/* BIDs are addressed by a 16-bit field in a CQE */
21#define MAX_BIDS_PER_BGID (1 << 16)
22
23struct kmem_cache *io_buf_cachep;
24
25struct io_provide_buf {
26 struct file *file;
27 __u64 addr;
28 __u32 len;
29 __u32 bgid;
30 __u32 nbufs;
31 __u16 bid;
32};
33
34static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
35 unsigned int bgid)
36{
37 lockdep_assert_held(&ctx->uring_lock);
38
39 return xa_load(&ctx->io_bl_xa, bgid);
40}
41
42static int io_buffer_add_list(struct io_ring_ctx *ctx,
43 struct io_buffer_list *bl, unsigned int bgid)
44{
45 /*
46 * Store buffer group ID and finally mark the list as visible.
47 * The normal lookup doesn't care about the visibility as we're
48 * always under the ->uring_lock, but lookups from mmap do.
49 */
50 bl->bgid = bgid;
51 guard(mutex)(&ctx->mmap_lock);
52 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
53}
54
55bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
56{
57 struct io_ring_ctx *ctx = req->ctx;
58 struct io_buffer_list *bl;
59 struct io_buffer *buf;
60
61 io_ring_submit_lock(ctx, issue_flags);
62
63 buf = req->kbuf;
64 bl = io_buffer_get_list(ctx, buf->bgid);
65 list_add(&buf->list, &bl->buf_list);
66 req->flags &= ~REQ_F_BUFFER_SELECTED;
67 req->buf_index = buf->bgid;
68
69 io_ring_submit_unlock(ctx, issue_flags);
70 return true;
71}
72
73void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags)
74{
75 /*
76 * We can add this buffer back to two lists:
77 *
78 * 1) The io_buffers_cache list. This one is protected by the
79 * ctx->uring_lock. If we already hold this lock, add back to this
80 * list as we can grab it from issue as well.
81 * 2) The io_buffers_comp list. This one is protected by the
82 * ctx->completion_lock.
83 *
84 * We migrate buffers from the comp_list to the issue cache list
85 * when we need one.
86 */
87 if (issue_flags & IO_URING_F_UNLOCKED) {
88 struct io_ring_ctx *ctx = req->ctx;
89
90 spin_lock(&ctx->completion_lock);
91 __io_put_kbuf_list(req, len, &ctx->io_buffers_comp);
92 spin_unlock(&ctx->completion_lock);
93 } else {
94 lockdep_assert_held(&req->ctx->uring_lock);
95
96 __io_put_kbuf_list(req, len, &req->ctx->io_buffers_cache);
97 }
98}
99
100static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
101 struct io_buffer_list *bl)
102{
103 if (!list_empty(&bl->buf_list)) {
104 struct io_buffer *kbuf;
105
106 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
107 list_del(&kbuf->list);
108 if (*len == 0 || *len > kbuf->len)
109 *len = kbuf->len;
110 if (list_empty(&bl->buf_list))
111 req->flags |= REQ_F_BL_EMPTY;
112 req->flags |= REQ_F_BUFFER_SELECTED;
113 req->kbuf = kbuf;
114 req->buf_index = kbuf->bid;
115 return u64_to_user_ptr(kbuf->addr);
116 }
117 return NULL;
118}
119
120static int io_provided_buffers_select(struct io_kiocb *req, size_t *len,
121 struct io_buffer_list *bl,
122 struct iovec *iov)
123{
124 void __user *buf;
125
126 buf = io_provided_buffer_select(req, len, bl);
127 if (unlikely(!buf))
128 return -ENOBUFS;
129
130 iov[0].iov_base = buf;
131 iov[0].iov_len = *len;
132 return 1;
133}
134
135static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
136 struct io_buffer_list *bl,
137 unsigned int issue_flags)
138{
139 struct io_uring_buf_ring *br = bl->buf_ring;
140 __u16 tail, head = bl->head;
141 struct io_uring_buf *buf;
142 void __user *ret;
143
144 tail = smp_load_acquire(&br->tail);
145 if (unlikely(tail == head))
146 return NULL;
147
148 if (head + 1 == tail)
149 req->flags |= REQ_F_BL_EMPTY;
150
151 buf = io_ring_head_to_buf(br, head, bl->mask);
152 if (*len == 0 || *len > buf->len)
153 *len = buf->len;
154 req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
155 req->buf_list = bl;
156 req->buf_index = buf->bid;
157 ret = u64_to_user_ptr(buf->addr);
158
159 if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
160 /*
161 * If we came in unlocked, we have no choice but to consume the
162 * buffer here, otherwise nothing ensures that the buffer won't
163 * get used by others. This does mean it'll be pinned until the
164 * IO completes, coming in unlocked means we're being called from
165 * io-wq context and there may be further retries in async hybrid
166 * mode. For the locked case, the caller must call commit when
167 * the transfer completes (or if we get -EAGAIN and must poll of
168 * retry).
169 */
170 io_kbuf_commit(req, bl, *len, 1);
171 req->buf_list = NULL;
172 }
173 return ret;
174}
175
176void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
177 unsigned int issue_flags)
178{
179 struct io_ring_ctx *ctx = req->ctx;
180 struct io_buffer_list *bl;
181 void __user *ret = NULL;
182
183 io_ring_submit_lock(req->ctx, issue_flags);
184
185 bl = io_buffer_get_list(ctx, req->buf_index);
186 if (likely(bl)) {
187 if (bl->flags & IOBL_BUF_RING)
188 ret = io_ring_buffer_select(req, len, bl, issue_flags);
189 else
190 ret = io_provided_buffer_select(req, len, bl);
191 }
192 io_ring_submit_unlock(req->ctx, issue_flags);
193 return ret;
194}
195
196/* cap it at a reasonable 256, will be one page even for 4K */
197#define PEEK_MAX_IMPORT 256
198
199static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
200 struct io_buffer_list *bl)
201{
202 struct io_uring_buf_ring *br = bl->buf_ring;
203 struct iovec *iov = arg->iovs;
204 int nr_iovs = arg->nr_iovs;
205 __u16 nr_avail, tail, head;
206 struct io_uring_buf *buf;
207
208 tail = smp_load_acquire(&br->tail);
209 head = bl->head;
210 nr_avail = min_t(__u16, tail - head, UIO_MAXIOV);
211 if (unlikely(!nr_avail))
212 return -ENOBUFS;
213
214 buf = io_ring_head_to_buf(br, head, bl->mask);
215 if (arg->max_len) {
216 u32 len = READ_ONCE(buf->len);
217
218 if (unlikely(!len))
219 return -ENOBUFS;
220 /*
221 * Limit incremental buffers to 1 segment. No point trying
222 * to peek ahead and map more than we need, when the buffers
223 * themselves should be large when setup with
224 * IOU_PBUF_RING_INC.
225 */
226 if (bl->flags & IOBL_INC) {
227 nr_avail = 1;
228 } else {
229 size_t needed;
230
231 needed = (arg->max_len + len - 1) / len;
232 needed = min_not_zero(needed, (size_t) PEEK_MAX_IMPORT);
233 if (nr_avail > needed)
234 nr_avail = needed;
235 }
236 }
237
238 /*
239 * only alloc a bigger array if we know we have data to map, eg not
240 * a speculative peek operation.
241 */
242 if (arg->mode & KBUF_MODE_EXPAND && nr_avail > nr_iovs && arg->max_len) {
243 iov = kmalloc_array(nr_avail, sizeof(struct iovec), GFP_KERNEL);
244 if (unlikely(!iov))
245 return -ENOMEM;
246 if (arg->mode & KBUF_MODE_FREE)
247 kfree(arg->iovs);
248 arg->iovs = iov;
249 nr_iovs = nr_avail;
250 } else if (nr_avail < nr_iovs) {
251 nr_iovs = nr_avail;
252 }
253
254 /* set it to max, if not set, so we can use it unconditionally */
255 if (!arg->max_len)
256 arg->max_len = INT_MAX;
257
258 req->buf_index = buf->bid;
259 do {
260 u32 len = buf->len;
261
262 /* truncate end piece, if needed, for non partial buffers */
263 if (len > arg->max_len) {
264 len = arg->max_len;
265 if (!(bl->flags & IOBL_INC))
266 buf->len = len;
267 }
268
269 iov->iov_base = u64_to_user_ptr(buf->addr);
270 iov->iov_len = len;
271 iov++;
272
273 arg->out_len += len;
274 arg->max_len -= len;
275 if (!arg->max_len)
276 break;
277
278 buf = io_ring_head_to_buf(br, ++head, bl->mask);
279 } while (--nr_iovs);
280
281 if (head == tail)
282 req->flags |= REQ_F_BL_EMPTY;
283
284 req->flags |= REQ_F_BUFFER_RING;
285 req->buf_list = bl;
286 return iov - arg->iovs;
287}
288
289int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
290 unsigned int issue_flags)
291{
292 struct io_ring_ctx *ctx = req->ctx;
293 struct io_buffer_list *bl;
294 int ret = -ENOENT;
295
296 io_ring_submit_lock(ctx, issue_flags);
297 bl = io_buffer_get_list(ctx, req->buf_index);
298 if (unlikely(!bl))
299 goto out_unlock;
300
301 if (bl->flags & IOBL_BUF_RING) {
302 ret = io_ring_buffers_peek(req, arg, bl);
303 /*
304 * Don't recycle these buffers if we need to go through poll.
305 * Nobody else can use them anyway, and holding on to provided
306 * buffers for a send/write operation would happen on the app
307 * side anyway with normal buffers. Besides, we already
308 * committed them, they cannot be put back in the queue.
309 */
310 if (ret > 0) {
311 req->flags |= REQ_F_BUFFERS_COMMIT | REQ_F_BL_NO_RECYCLE;
312 io_kbuf_commit(req, bl, arg->out_len, ret);
313 }
314 } else {
315 ret = io_provided_buffers_select(req, &arg->out_len, bl, arg->iovs);
316 }
317out_unlock:
318 io_ring_submit_unlock(ctx, issue_flags);
319 return ret;
320}
321
322int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg)
323{
324 struct io_ring_ctx *ctx = req->ctx;
325 struct io_buffer_list *bl;
326 int ret;
327
328 lockdep_assert_held(&ctx->uring_lock);
329
330 bl = io_buffer_get_list(ctx, req->buf_index);
331 if (unlikely(!bl))
332 return -ENOENT;
333
334 if (bl->flags & IOBL_BUF_RING) {
335 ret = io_ring_buffers_peek(req, arg, bl);
336 if (ret > 0)
337 req->flags |= REQ_F_BUFFERS_COMMIT;
338 return ret;
339 }
340
341 /* don't support multiple buffer selections for legacy */
342 return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs);
343}
344
345static int __io_remove_buffers(struct io_ring_ctx *ctx,
346 struct io_buffer_list *bl, unsigned nbufs)
347{
348 unsigned i = 0;
349
350 /* shouldn't happen */
351 if (!nbufs)
352 return 0;
353
354 if (bl->flags & IOBL_BUF_RING) {
355 i = bl->buf_ring->tail - bl->head;
356 io_free_region(ctx, &bl->region);
357 /* make sure it's seen as empty */
358 INIT_LIST_HEAD(&bl->buf_list);
359 bl->flags &= ~IOBL_BUF_RING;
360 return i;
361 }
362
363 /* protects io_buffers_cache */
364 lockdep_assert_held(&ctx->uring_lock);
365
366 while (!list_empty(&bl->buf_list)) {
367 struct io_buffer *nxt;
368
369 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
370 list_move(&nxt->list, &ctx->io_buffers_cache);
371 if (++i == nbufs)
372 return i;
373 cond_resched();
374 }
375
376 return i;
377}
378
379static void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
380{
381 __io_remove_buffers(ctx, bl, -1U);
382 kfree(bl);
383}
384
385void io_destroy_buffers(struct io_ring_ctx *ctx)
386{
387 struct io_buffer_list *bl;
388 struct list_head *item, *tmp;
389 struct io_buffer *buf;
390
391 while (1) {
392 unsigned long index = 0;
393
394 scoped_guard(mutex, &ctx->mmap_lock) {
395 bl = xa_find(&ctx->io_bl_xa, &index, ULONG_MAX, XA_PRESENT);
396 if (bl)
397 xa_erase(&ctx->io_bl_xa, bl->bgid);
398 }
399 if (!bl)
400 break;
401 io_put_bl(ctx, bl);
402 }
403
404 /*
405 * Move deferred locked entries to cache before pruning
406 */
407 spin_lock(&ctx->completion_lock);
408 if (!list_empty(&ctx->io_buffers_comp))
409 list_splice_init(&ctx->io_buffers_comp, &ctx->io_buffers_cache);
410 spin_unlock(&ctx->completion_lock);
411
412 list_for_each_safe(item, tmp, &ctx->io_buffers_cache) {
413 buf = list_entry(item, struct io_buffer, list);
414 kmem_cache_free(io_buf_cachep, buf);
415 }
416}
417
418int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
419{
420 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
421 u64 tmp;
422
423 if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
424 sqe->splice_fd_in)
425 return -EINVAL;
426
427 tmp = READ_ONCE(sqe->fd);
428 if (!tmp || tmp > MAX_BIDS_PER_BGID)
429 return -EINVAL;
430
431 memset(p, 0, sizeof(*p));
432 p->nbufs = tmp;
433 p->bgid = READ_ONCE(sqe->buf_group);
434 return 0;
435}
436
437int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
438{
439 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
440 struct io_ring_ctx *ctx = req->ctx;
441 struct io_buffer_list *bl;
442 int ret = 0;
443
444 io_ring_submit_lock(ctx, issue_flags);
445
446 ret = -ENOENT;
447 bl = io_buffer_get_list(ctx, p->bgid);
448 if (bl) {
449 ret = -EINVAL;
450 /* can't use provide/remove buffers command on mapped buffers */
451 if (!(bl->flags & IOBL_BUF_RING))
452 ret = __io_remove_buffers(ctx, bl, p->nbufs);
453 }
454 io_ring_submit_unlock(ctx, issue_flags);
455 if (ret < 0)
456 req_set_fail(req);
457 io_req_set_res(req, ret, 0);
458 return IOU_OK;
459}
460
461int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
462{
463 unsigned long size, tmp_check;
464 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
465 u64 tmp;
466
467 if (sqe->rw_flags || sqe->splice_fd_in)
468 return -EINVAL;
469
470 tmp = READ_ONCE(sqe->fd);
471 if (!tmp || tmp > MAX_BIDS_PER_BGID)
472 return -E2BIG;
473 p->nbufs = tmp;
474 p->addr = READ_ONCE(sqe->addr);
475 p->len = READ_ONCE(sqe->len);
476
477 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
478 &size))
479 return -EOVERFLOW;
480 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
481 return -EOVERFLOW;
482
483 size = (unsigned long)p->len * p->nbufs;
484 if (!access_ok(u64_to_user_ptr(p->addr), size))
485 return -EFAULT;
486
487 p->bgid = READ_ONCE(sqe->buf_group);
488 tmp = READ_ONCE(sqe->off);
489 if (tmp > USHRT_MAX)
490 return -E2BIG;
491 if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
492 return -EINVAL;
493 p->bid = tmp;
494 return 0;
495}
496
497#define IO_BUFFER_ALLOC_BATCH 64
498
499static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
500{
501 struct io_buffer *bufs[IO_BUFFER_ALLOC_BATCH];
502 int allocated;
503
504 /*
505 * Completions that don't happen inline (eg not under uring_lock) will
506 * add to ->io_buffers_comp. If we don't have any free buffers, check
507 * the completion list and splice those entries first.
508 */
509 if (!list_empty_careful(&ctx->io_buffers_comp)) {
510 spin_lock(&ctx->completion_lock);
511 if (!list_empty(&ctx->io_buffers_comp)) {
512 list_splice_init(&ctx->io_buffers_comp,
513 &ctx->io_buffers_cache);
514 spin_unlock(&ctx->completion_lock);
515 return 0;
516 }
517 spin_unlock(&ctx->completion_lock);
518 }
519
520 /*
521 * No free buffers and no completion entries either. Allocate a new
522 * batch of buffer entries and add those to our freelist.
523 */
524
525 allocated = kmem_cache_alloc_bulk(io_buf_cachep, GFP_KERNEL_ACCOUNT,
526 ARRAY_SIZE(bufs), (void **) bufs);
527 if (unlikely(!allocated)) {
528 /*
529 * Bulk alloc is all-or-nothing. If we fail to get a batch,
530 * retry single alloc to be on the safe side.
531 */
532 bufs[0] = kmem_cache_alloc(io_buf_cachep, GFP_KERNEL);
533 if (!bufs[0])
534 return -ENOMEM;
535 allocated = 1;
536 }
537
538 while (allocated)
539 list_add_tail(&bufs[--allocated]->list, &ctx->io_buffers_cache);
540
541 return 0;
542}
543
544static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
545 struct io_buffer_list *bl)
546{
547 struct io_buffer *buf;
548 u64 addr = pbuf->addr;
549 int i, bid = pbuf->bid;
550
551 for (i = 0; i < pbuf->nbufs; i++) {
552 if (list_empty(&ctx->io_buffers_cache) &&
553 io_refill_buffer_cache(ctx))
554 break;
555 buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
556 list);
557 list_move_tail(&buf->list, &bl->buf_list);
558 buf->addr = addr;
559 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
560 buf->bid = bid;
561 buf->bgid = pbuf->bgid;
562 addr += pbuf->len;
563 bid++;
564 cond_resched();
565 }
566
567 return i ? 0 : -ENOMEM;
568}
569
570int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
571{
572 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
573 struct io_ring_ctx *ctx = req->ctx;
574 struct io_buffer_list *bl;
575 int ret = 0;
576
577 io_ring_submit_lock(ctx, issue_flags);
578
579 bl = io_buffer_get_list(ctx, p->bgid);
580 if (unlikely(!bl)) {
581 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
582 if (!bl) {
583 ret = -ENOMEM;
584 goto err;
585 }
586 INIT_LIST_HEAD(&bl->buf_list);
587 ret = io_buffer_add_list(ctx, bl, p->bgid);
588 if (ret) {
589 kfree(bl);
590 goto err;
591 }
592 }
593 /* can't add buffers via this command for a mapped buffer ring */
594 if (bl->flags & IOBL_BUF_RING) {
595 ret = -EINVAL;
596 goto err;
597 }
598
599 ret = io_add_buffers(ctx, p, bl);
600err:
601 io_ring_submit_unlock(ctx, issue_flags);
602
603 if (ret < 0)
604 req_set_fail(req);
605 io_req_set_res(req, ret, 0);
606 return IOU_OK;
607}
608
609int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
610{
611 struct io_uring_buf_reg reg;
612 struct io_buffer_list *bl, *free_bl = NULL;
613 struct io_uring_region_desc rd;
614 struct io_uring_buf_ring *br;
615 unsigned long mmap_offset;
616 unsigned long ring_size;
617 int ret;
618
619 lockdep_assert_held(&ctx->uring_lock);
620
621 if (copy_from_user(®, arg, sizeof(reg)))
622 return -EFAULT;
623
624 if (reg.resv[0] || reg.resv[1] || reg.resv[2])
625 return -EINVAL;
626 if (reg.flags & ~(IOU_PBUF_RING_MMAP | IOU_PBUF_RING_INC))
627 return -EINVAL;
628 if (!is_power_of_2(reg.ring_entries))
629 return -EINVAL;
630 /* cannot disambiguate full vs empty due to head/tail size */
631 if (reg.ring_entries >= 65536)
632 return -EINVAL;
633
634 bl = io_buffer_get_list(ctx, reg.bgid);
635 if (bl) {
636 /* if mapped buffer ring OR classic exists, don't allow */
637 if (bl->flags & IOBL_BUF_RING || !list_empty(&bl->buf_list))
638 return -EEXIST;
639 } else {
640 free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
641 if (!bl)
642 return -ENOMEM;
643 }
644
645 mmap_offset = (unsigned long)reg.bgid << IORING_OFF_PBUF_SHIFT;
646 ring_size = flex_array_size(br, bufs, reg.ring_entries);
647
648 memset(&rd, 0, sizeof(rd));
649 rd.size = PAGE_ALIGN(ring_size);
650 if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
651 rd.user_addr = reg.ring_addr;
652 rd.flags |= IORING_MEM_REGION_TYPE_USER;
653 }
654 ret = io_create_region_mmap_safe(ctx, &bl->region, &rd, mmap_offset);
655 if (ret)
656 goto fail;
657 br = io_region_get_ptr(&bl->region);
658
659#ifdef SHM_COLOUR
660 /*
661 * On platforms that have specific aliasing requirements, SHM_COLOUR
662 * is set and we must guarantee that the kernel and user side align
663 * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and
664 * the application mmap's the provided ring buffer. Fail the request
665 * if we, by chance, don't end up with aligned addresses. The app
666 * should use IOU_PBUF_RING_MMAP instead, and liburing will handle
667 * this transparently.
668 */
669 if (!(reg.flags & IOU_PBUF_RING_MMAP) &&
670 ((reg.ring_addr | (unsigned long)br) & (SHM_COLOUR - 1))) {
671 ret = -EINVAL;
672 goto fail;
673 }
674#endif
675
676 bl->nr_entries = reg.ring_entries;
677 bl->mask = reg.ring_entries - 1;
678 bl->flags |= IOBL_BUF_RING;
679 bl->buf_ring = br;
680 if (reg.flags & IOU_PBUF_RING_INC)
681 bl->flags |= IOBL_INC;
682 io_buffer_add_list(ctx, bl, reg.bgid);
683 return 0;
684fail:
685 io_free_region(ctx, &bl->region);
686 kfree(free_bl);
687 return ret;
688}
689
690int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
691{
692 struct io_uring_buf_reg reg;
693 struct io_buffer_list *bl;
694
695 lockdep_assert_held(&ctx->uring_lock);
696
697 if (copy_from_user(®, arg, sizeof(reg)))
698 return -EFAULT;
699 if (reg.resv[0] || reg.resv[1] || reg.resv[2])
700 return -EINVAL;
701 if (reg.flags)
702 return -EINVAL;
703
704 bl = io_buffer_get_list(ctx, reg.bgid);
705 if (!bl)
706 return -ENOENT;
707 if (!(bl->flags & IOBL_BUF_RING))
708 return -EINVAL;
709
710 scoped_guard(mutex, &ctx->mmap_lock)
711 xa_erase(&ctx->io_bl_xa, bl->bgid);
712
713 io_put_bl(ctx, bl);
714 return 0;
715}
716
717int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg)
718{
719 struct io_uring_buf_status buf_status;
720 struct io_buffer_list *bl;
721 int i;
722
723 if (copy_from_user(&buf_status, arg, sizeof(buf_status)))
724 return -EFAULT;
725
726 for (i = 0; i < ARRAY_SIZE(buf_status.resv); i++)
727 if (buf_status.resv[i])
728 return -EINVAL;
729
730 bl = io_buffer_get_list(ctx, buf_status.buf_group);
731 if (!bl)
732 return -ENOENT;
733 if (!(bl->flags & IOBL_BUF_RING))
734 return -EINVAL;
735
736 buf_status.head = bl->head;
737 if (copy_to_user(arg, &buf_status, sizeof(buf_status)))
738 return -EFAULT;
739
740 return 0;
741}
742
743struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx,
744 unsigned int bgid)
745{
746 struct io_buffer_list *bl;
747
748 lockdep_assert_held(&ctx->mmap_lock);
749
750 bl = xa_load(&ctx->io_bl_xa, bgid);
751 if (!bl || !(bl->flags & IOBL_BUF_RING))
752 return NULL;
753 return &bl->region;
754}