Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/io_uring/kbuf.c
29266 views
1
// SPDX-License-Identifier: GPL-2.0
2
#include <linux/kernel.h>
3
#include <linux/errno.h>
4
#include <linux/fs.h>
5
#include <linux/file.h>
6
#include <linux/mm.h>
7
#include <linux/slab.h>
8
#include <linux/namei.h>
9
#include <linux/poll.h>
10
#include <linux/vmalloc.h>
11
#include <linux/io_uring.h>
12
13
#include <uapi/linux/io_uring.h>
14
15
#include "io_uring.h"
16
#include "opdef.h"
17
#include "kbuf.h"
18
#include "memmap.h"
19
20
/* BIDs are addressed by a 16-bit field in a CQE */
21
#define MAX_BIDS_PER_BGID (1 << 16)
22
23
/* Mapped buffer ring, return io_uring_buf from head */
24
#define io_ring_head_to_buf(br, head, mask) &(br)->bufs[(head) & (mask)]
25
26
struct io_provide_buf {
27
struct file *file;
28
__u64 addr;
29
__u32 len;
30
__u32 bgid;
31
__u32 nbufs;
32
__u16 bid;
33
};
34
35
static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len)
36
{
37
while (len) {
38
struct io_uring_buf *buf;
39
u32 buf_len, this_len;
40
41
buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
42
buf_len = READ_ONCE(buf->len);
43
this_len = min_t(u32, len, buf_len);
44
buf_len -= this_len;
45
/* Stop looping for invalid buffer length of 0 */
46
if (buf_len || !this_len) {
47
buf->addr += this_len;
48
buf->len = buf_len;
49
return false;
50
}
51
buf->len = 0;
52
bl->head++;
53
len -= this_len;
54
}
55
return true;
56
}
57
58
bool io_kbuf_commit(struct io_kiocb *req,
59
struct io_buffer_list *bl, int len, int nr)
60
{
61
if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT)))
62
return true;
63
64
req->flags &= ~REQ_F_BUFFERS_COMMIT;
65
66
if (unlikely(len < 0))
67
return true;
68
if (bl->flags & IOBL_INC)
69
return io_kbuf_inc_commit(bl, len);
70
bl->head += nr;
71
return true;
72
}
73
74
static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
75
unsigned int bgid)
76
{
77
lockdep_assert_held(&ctx->uring_lock);
78
79
return xa_load(&ctx->io_bl_xa, bgid);
80
}
81
82
static int io_buffer_add_list(struct io_ring_ctx *ctx,
83
struct io_buffer_list *bl, unsigned int bgid)
84
{
85
/*
86
* Store buffer group ID and finally mark the list as visible.
87
* The normal lookup doesn't care about the visibility as we're
88
* always under the ->uring_lock, but lookups from mmap do.
89
*/
90
bl->bgid = bgid;
91
guard(mutex)(&ctx->mmap_lock);
92
return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
93
}
94
95
void io_kbuf_drop_legacy(struct io_kiocb *req)
96
{
97
if (WARN_ON_ONCE(!(req->flags & REQ_F_BUFFER_SELECTED)))
98
return;
99
req->flags &= ~REQ_F_BUFFER_SELECTED;
100
kfree(req->kbuf);
101
req->kbuf = NULL;
102
}
103
104
bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
105
{
106
struct io_ring_ctx *ctx = req->ctx;
107
struct io_buffer_list *bl;
108
struct io_buffer *buf;
109
110
io_ring_submit_lock(ctx, issue_flags);
111
112
buf = req->kbuf;
113
bl = io_buffer_get_list(ctx, buf->bgid);
114
list_add(&buf->list, &bl->buf_list);
115
bl->nbufs++;
116
req->flags &= ~REQ_F_BUFFER_SELECTED;
117
118
io_ring_submit_unlock(ctx, issue_flags);
119
return true;
120
}
121
122
static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
123
struct io_buffer_list *bl)
124
{
125
if (!list_empty(&bl->buf_list)) {
126
struct io_buffer *kbuf;
127
128
kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
129
list_del(&kbuf->list);
130
bl->nbufs--;
131
if (*len == 0 || *len > kbuf->len)
132
*len = kbuf->len;
133
if (list_empty(&bl->buf_list))
134
req->flags |= REQ_F_BL_EMPTY;
135
req->flags |= REQ_F_BUFFER_SELECTED;
136
req->kbuf = kbuf;
137
req->buf_index = kbuf->bid;
138
return u64_to_user_ptr(kbuf->addr);
139
}
140
return NULL;
141
}
142
143
static int io_provided_buffers_select(struct io_kiocb *req, size_t *len,
144
struct io_buffer_list *bl,
145
struct iovec *iov)
146
{
147
void __user *buf;
148
149
buf = io_provided_buffer_select(req, len, bl);
150
if (unlikely(!buf))
151
return -ENOBUFS;
152
153
iov[0].iov_base = buf;
154
iov[0].iov_len = *len;
155
return 1;
156
}
157
158
static struct io_br_sel io_ring_buffer_select(struct io_kiocb *req, size_t *len,
159
struct io_buffer_list *bl,
160
unsigned int issue_flags)
161
{
162
struct io_uring_buf_ring *br = bl->buf_ring;
163
__u16 tail, head = bl->head;
164
struct io_br_sel sel = { };
165
struct io_uring_buf *buf;
166
u32 buf_len;
167
168
tail = smp_load_acquire(&br->tail);
169
if (unlikely(tail == head))
170
return sel;
171
172
if (head + 1 == tail)
173
req->flags |= REQ_F_BL_EMPTY;
174
175
buf = io_ring_head_to_buf(br, head, bl->mask);
176
buf_len = READ_ONCE(buf->len);
177
if (*len == 0 || *len > buf_len)
178
*len = buf_len;
179
req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
180
req->buf_index = buf->bid;
181
sel.buf_list = bl;
182
sel.addr = u64_to_user_ptr(buf->addr);
183
184
if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
185
/*
186
* If we came in unlocked, we have no choice but to consume the
187
* buffer here, otherwise nothing ensures that the buffer won't
188
* get used by others. This does mean it'll be pinned until the
189
* IO completes, coming in unlocked means we're being called from
190
* io-wq context and there may be further retries in async hybrid
191
* mode. For the locked case, the caller must call commit when
192
* the transfer completes (or if we get -EAGAIN and must poll of
193
* retry).
194
*/
195
io_kbuf_commit(req, sel.buf_list, *len, 1);
196
sel.buf_list = NULL;
197
}
198
return sel;
199
}
200
201
struct io_br_sel io_buffer_select(struct io_kiocb *req, size_t *len,
202
unsigned buf_group, unsigned int issue_flags)
203
{
204
struct io_ring_ctx *ctx = req->ctx;
205
struct io_br_sel sel = { };
206
struct io_buffer_list *bl;
207
208
io_ring_submit_lock(req->ctx, issue_flags);
209
210
bl = io_buffer_get_list(ctx, buf_group);
211
if (likely(bl)) {
212
if (bl->flags & IOBL_BUF_RING)
213
sel = io_ring_buffer_select(req, len, bl, issue_flags);
214
else
215
sel.addr = io_provided_buffer_select(req, len, bl);
216
}
217
io_ring_submit_unlock(req->ctx, issue_flags);
218
return sel;
219
}
220
221
/* cap it at a reasonable 256, will be one page even for 4K */
222
#define PEEK_MAX_IMPORT 256
223
224
static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
225
struct io_buffer_list *bl)
226
{
227
struct io_uring_buf_ring *br = bl->buf_ring;
228
struct iovec *iov = arg->iovs;
229
int nr_iovs = arg->nr_iovs;
230
__u16 nr_avail, tail, head;
231
struct io_uring_buf *buf;
232
233
tail = smp_load_acquire(&br->tail);
234
head = bl->head;
235
nr_avail = min_t(__u16, tail - head, UIO_MAXIOV);
236
if (unlikely(!nr_avail))
237
return -ENOBUFS;
238
239
buf = io_ring_head_to_buf(br, head, bl->mask);
240
if (arg->max_len) {
241
u32 len = READ_ONCE(buf->len);
242
size_t needed;
243
244
if (unlikely(!len))
245
return -ENOBUFS;
246
needed = (arg->max_len + len - 1) / len;
247
needed = min_not_zero(needed, (size_t) PEEK_MAX_IMPORT);
248
if (nr_avail > needed)
249
nr_avail = needed;
250
}
251
252
/*
253
* only alloc a bigger array if we know we have data to map, eg not
254
* a speculative peek operation.
255
*/
256
if (arg->mode & KBUF_MODE_EXPAND && nr_avail > nr_iovs && arg->max_len) {
257
iov = kmalloc_array(nr_avail, sizeof(struct iovec), GFP_KERNEL);
258
if (unlikely(!iov))
259
return -ENOMEM;
260
if (arg->mode & KBUF_MODE_FREE)
261
kfree(arg->iovs);
262
arg->iovs = iov;
263
nr_iovs = nr_avail;
264
} else if (nr_avail < nr_iovs) {
265
nr_iovs = nr_avail;
266
}
267
268
/* set it to max, if not set, so we can use it unconditionally */
269
if (!arg->max_len)
270
arg->max_len = INT_MAX;
271
272
req->buf_index = buf->bid;
273
do {
274
u32 len = READ_ONCE(buf->len);
275
276
/* truncate end piece, if needed, for non partial buffers */
277
if (len > arg->max_len) {
278
len = arg->max_len;
279
if (!(bl->flags & IOBL_INC)) {
280
arg->partial_map = 1;
281
if (iov != arg->iovs)
282
break;
283
buf->len = len;
284
}
285
}
286
287
iov->iov_base = u64_to_user_ptr(buf->addr);
288
iov->iov_len = len;
289
iov++;
290
291
arg->out_len += len;
292
arg->max_len -= len;
293
if (!arg->max_len)
294
break;
295
296
buf = io_ring_head_to_buf(br, ++head, bl->mask);
297
} while (--nr_iovs);
298
299
if (head == tail)
300
req->flags |= REQ_F_BL_EMPTY;
301
302
req->flags |= REQ_F_BUFFER_RING;
303
return iov - arg->iovs;
304
}
305
306
int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
307
struct io_br_sel *sel, unsigned int issue_flags)
308
{
309
struct io_ring_ctx *ctx = req->ctx;
310
int ret = -ENOENT;
311
312
io_ring_submit_lock(ctx, issue_flags);
313
sel->buf_list = io_buffer_get_list(ctx, arg->buf_group);
314
if (unlikely(!sel->buf_list))
315
goto out_unlock;
316
317
if (sel->buf_list->flags & IOBL_BUF_RING) {
318
ret = io_ring_buffers_peek(req, arg, sel->buf_list);
319
/*
320
* Don't recycle these buffers if we need to go through poll.
321
* Nobody else can use them anyway, and holding on to provided
322
* buffers for a send/write operation would happen on the app
323
* side anyway with normal buffers. Besides, we already
324
* committed them, they cannot be put back in the queue.
325
*/
326
if (ret > 0) {
327
req->flags |= REQ_F_BUFFERS_COMMIT | REQ_F_BL_NO_RECYCLE;
328
io_kbuf_commit(req, sel->buf_list, arg->out_len, ret);
329
}
330
} else {
331
ret = io_provided_buffers_select(req, &arg->out_len, sel->buf_list, arg->iovs);
332
}
333
out_unlock:
334
if (issue_flags & IO_URING_F_UNLOCKED) {
335
sel->buf_list = NULL;
336
mutex_unlock(&ctx->uring_lock);
337
}
338
return ret;
339
}
340
341
int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
342
struct io_br_sel *sel)
343
{
344
struct io_ring_ctx *ctx = req->ctx;
345
struct io_buffer_list *bl;
346
int ret;
347
348
lockdep_assert_held(&ctx->uring_lock);
349
350
bl = io_buffer_get_list(ctx, arg->buf_group);
351
if (unlikely(!bl))
352
return -ENOENT;
353
354
if (bl->flags & IOBL_BUF_RING) {
355
ret = io_ring_buffers_peek(req, arg, bl);
356
if (ret > 0)
357
req->flags |= REQ_F_BUFFERS_COMMIT;
358
sel->buf_list = bl;
359
return ret;
360
}
361
362
/* don't support multiple buffer selections for legacy */
363
sel->buf_list = NULL;
364
return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs);
365
}
366
367
static inline bool __io_put_kbuf_ring(struct io_kiocb *req,
368
struct io_buffer_list *bl, int len, int nr)
369
{
370
bool ret = true;
371
372
if (bl)
373
ret = io_kbuf_commit(req, bl, len, nr);
374
375
req->flags &= ~REQ_F_BUFFER_RING;
376
return ret;
377
}
378
379
unsigned int __io_put_kbufs(struct io_kiocb *req, struct io_buffer_list *bl,
380
int len, int nbufs)
381
{
382
unsigned int ret;
383
384
ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
385
386
if (unlikely(!(req->flags & REQ_F_BUFFER_RING))) {
387
io_kbuf_drop_legacy(req);
388
return ret;
389
}
390
391
if (!__io_put_kbuf_ring(req, bl, len, nbufs))
392
ret |= IORING_CQE_F_BUF_MORE;
393
return ret;
394
}
395
396
static int io_remove_buffers_legacy(struct io_ring_ctx *ctx,
397
struct io_buffer_list *bl,
398
unsigned long nbufs)
399
{
400
unsigned long i = 0;
401
struct io_buffer *nxt;
402
403
/* protects io_buffers_cache */
404
lockdep_assert_held(&ctx->uring_lock);
405
WARN_ON_ONCE(bl->flags & IOBL_BUF_RING);
406
407
for (i = 0; i < nbufs && !list_empty(&bl->buf_list); i++) {
408
nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
409
list_del(&nxt->list);
410
bl->nbufs--;
411
kfree(nxt);
412
cond_resched();
413
}
414
return i;
415
}
416
417
static void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
418
{
419
if (bl->flags & IOBL_BUF_RING)
420
io_free_region(ctx, &bl->region);
421
else
422
io_remove_buffers_legacy(ctx, bl, -1U);
423
424
kfree(bl);
425
}
426
427
void io_destroy_buffers(struct io_ring_ctx *ctx)
428
{
429
struct io_buffer_list *bl;
430
431
while (1) {
432
unsigned long index = 0;
433
434
scoped_guard(mutex, &ctx->mmap_lock) {
435
bl = xa_find(&ctx->io_bl_xa, &index, ULONG_MAX, XA_PRESENT);
436
if (bl)
437
xa_erase(&ctx->io_bl_xa, bl->bgid);
438
}
439
if (!bl)
440
break;
441
io_put_bl(ctx, bl);
442
}
443
}
444
445
static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
446
{
447
scoped_guard(mutex, &ctx->mmap_lock)
448
WARN_ON_ONCE(xa_erase(&ctx->io_bl_xa, bl->bgid) != bl);
449
io_put_bl(ctx, bl);
450
}
451
452
int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
453
{
454
struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
455
u64 tmp;
456
457
if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
458
sqe->splice_fd_in)
459
return -EINVAL;
460
461
tmp = READ_ONCE(sqe->fd);
462
if (!tmp || tmp > MAX_BIDS_PER_BGID)
463
return -EINVAL;
464
465
memset(p, 0, sizeof(*p));
466
p->nbufs = tmp;
467
p->bgid = READ_ONCE(sqe->buf_group);
468
return 0;
469
}
470
471
int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
472
{
473
unsigned long size, tmp_check;
474
struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
475
u64 tmp;
476
477
if (sqe->rw_flags || sqe->splice_fd_in)
478
return -EINVAL;
479
480
tmp = READ_ONCE(sqe->fd);
481
if (!tmp || tmp > MAX_BIDS_PER_BGID)
482
return -E2BIG;
483
p->nbufs = tmp;
484
p->addr = READ_ONCE(sqe->addr);
485
p->len = READ_ONCE(sqe->len);
486
if (!p->len)
487
return -EINVAL;
488
489
if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
490
&size))
491
return -EOVERFLOW;
492
if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
493
return -EOVERFLOW;
494
if (!access_ok(u64_to_user_ptr(p->addr), size))
495
return -EFAULT;
496
497
p->bgid = READ_ONCE(sqe->buf_group);
498
tmp = READ_ONCE(sqe->off);
499
if (tmp > USHRT_MAX)
500
return -E2BIG;
501
if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
502
return -EINVAL;
503
p->bid = tmp;
504
return 0;
505
}
506
507
static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
508
struct io_buffer_list *bl)
509
{
510
struct io_buffer *buf;
511
u64 addr = pbuf->addr;
512
int ret = -ENOMEM, i, bid = pbuf->bid;
513
514
for (i = 0; i < pbuf->nbufs; i++) {
515
/*
516
* Nonsensical to have more than sizeof(bid) buffers in a
517
* buffer list, as the application then has no way of knowing
518
* which duplicate bid refers to what buffer.
519
*/
520
if (bl->nbufs == USHRT_MAX) {
521
ret = -EOVERFLOW;
522
break;
523
}
524
buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
525
if (!buf)
526
break;
527
528
list_add_tail(&buf->list, &bl->buf_list);
529
bl->nbufs++;
530
buf->addr = addr;
531
buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
532
buf->bid = bid;
533
buf->bgid = pbuf->bgid;
534
addr += pbuf->len;
535
bid++;
536
cond_resched();
537
}
538
539
return i ? 0 : ret;
540
}
541
542
static int __io_manage_buffers_legacy(struct io_kiocb *req,
543
struct io_buffer_list *bl)
544
{
545
struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
546
int ret;
547
548
if (!bl) {
549
if (req->opcode != IORING_OP_PROVIDE_BUFFERS)
550
return -ENOENT;
551
bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
552
if (!bl)
553
return -ENOMEM;
554
555
INIT_LIST_HEAD(&bl->buf_list);
556
ret = io_buffer_add_list(req->ctx, bl, p->bgid);
557
if (ret) {
558
kfree(bl);
559
return ret;
560
}
561
}
562
/* can't use provide/remove buffers command on mapped buffers */
563
if (bl->flags & IOBL_BUF_RING)
564
return -EINVAL;
565
if (req->opcode == IORING_OP_PROVIDE_BUFFERS)
566
return io_add_buffers(req->ctx, p, bl);
567
return io_remove_buffers_legacy(req->ctx, bl, p->nbufs);
568
}
569
570
int io_manage_buffers_legacy(struct io_kiocb *req, unsigned int issue_flags)
571
{
572
struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
573
struct io_ring_ctx *ctx = req->ctx;
574
struct io_buffer_list *bl;
575
int ret;
576
577
io_ring_submit_lock(ctx, issue_flags);
578
bl = io_buffer_get_list(ctx, p->bgid);
579
ret = __io_manage_buffers_legacy(req, bl);
580
io_ring_submit_unlock(ctx, issue_flags);
581
582
if (ret < 0)
583
req_set_fail(req);
584
io_req_set_res(req, ret, 0);
585
return IOU_COMPLETE;
586
}
587
588
int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
589
{
590
struct io_uring_buf_reg reg;
591
struct io_buffer_list *bl;
592
struct io_uring_region_desc rd;
593
struct io_uring_buf_ring *br;
594
unsigned long mmap_offset;
595
unsigned long ring_size;
596
int ret;
597
598
lockdep_assert_held(&ctx->uring_lock);
599
600
if (copy_from_user(&reg, arg, sizeof(reg)))
601
return -EFAULT;
602
if (!mem_is_zero(reg.resv, sizeof(reg.resv)))
603
return -EINVAL;
604
if (reg.flags & ~(IOU_PBUF_RING_MMAP | IOU_PBUF_RING_INC))
605
return -EINVAL;
606
if (!is_power_of_2(reg.ring_entries))
607
return -EINVAL;
608
/* cannot disambiguate full vs empty due to head/tail size */
609
if (reg.ring_entries >= 65536)
610
return -EINVAL;
611
612
bl = io_buffer_get_list(ctx, reg.bgid);
613
if (bl) {
614
/* if mapped buffer ring OR classic exists, don't allow */
615
if (bl->flags & IOBL_BUF_RING || !list_empty(&bl->buf_list))
616
return -EEXIST;
617
io_destroy_bl(ctx, bl);
618
}
619
620
bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
621
if (!bl)
622
return -ENOMEM;
623
624
mmap_offset = (unsigned long)reg.bgid << IORING_OFF_PBUF_SHIFT;
625
ring_size = flex_array_size(br, bufs, reg.ring_entries);
626
627
memset(&rd, 0, sizeof(rd));
628
rd.size = PAGE_ALIGN(ring_size);
629
if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
630
rd.user_addr = reg.ring_addr;
631
rd.flags |= IORING_MEM_REGION_TYPE_USER;
632
}
633
ret = io_create_region_mmap_safe(ctx, &bl->region, &rd, mmap_offset);
634
if (ret)
635
goto fail;
636
br = io_region_get_ptr(&bl->region);
637
638
#ifdef SHM_COLOUR
639
/*
640
* On platforms that have specific aliasing requirements, SHM_COLOUR
641
* is set and we must guarantee that the kernel and user side align
642
* nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and
643
* the application mmap's the provided ring buffer. Fail the request
644
* if we, by chance, don't end up with aligned addresses. The app
645
* should use IOU_PBUF_RING_MMAP instead, and liburing will handle
646
* this transparently.
647
*/
648
if (!(reg.flags & IOU_PBUF_RING_MMAP) &&
649
((reg.ring_addr | (unsigned long)br) & (SHM_COLOUR - 1))) {
650
ret = -EINVAL;
651
goto fail;
652
}
653
#endif
654
655
bl->nr_entries = reg.ring_entries;
656
bl->mask = reg.ring_entries - 1;
657
bl->flags |= IOBL_BUF_RING;
658
bl->buf_ring = br;
659
if (reg.flags & IOU_PBUF_RING_INC)
660
bl->flags |= IOBL_INC;
661
io_buffer_add_list(ctx, bl, reg.bgid);
662
return 0;
663
fail:
664
io_free_region(ctx, &bl->region);
665
kfree(bl);
666
return ret;
667
}
668
669
int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
670
{
671
struct io_uring_buf_reg reg;
672
struct io_buffer_list *bl;
673
674
lockdep_assert_held(&ctx->uring_lock);
675
676
if (copy_from_user(&reg, arg, sizeof(reg)))
677
return -EFAULT;
678
if (!mem_is_zero(reg.resv, sizeof(reg.resv)) || reg.flags)
679
return -EINVAL;
680
681
bl = io_buffer_get_list(ctx, reg.bgid);
682
if (!bl)
683
return -ENOENT;
684
if (!(bl->flags & IOBL_BUF_RING))
685
return -EINVAL;
686
687
scoped_guard(mutex, &ctx->mmap_lock)
688
xa_erase(&ctx->io_bl_xa, bl->bgid);
689
690
io_put_bl(ctx, bl);
691
return 0;
692
}
693
694
int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg)
695
{
696
struct io_uring_buf_status buf_status;
697
struct io_buffer_list *bl;
698
699
if (copy_from_user(&buf_status, arg, sizeof(buf_status)))
700
return -EFAULT;
701
if (!mem_is_zero(buf_status.resv, sizeof(buf_status.resv)))
702
return -EINVAL;
703
704
bl = io_buffer_get_list(ctx, buf_status.buf_group);
705
if (!bl)
706
return -ENOENT;
707
if (!(bl->flags & IOBL_BUF_RING))
708
return -EINVAL;
709
710
buf_status.head = bl->head;
711
if (copy_to_user(arg, &buf_status, sizeof(buf_status)))
712
return -EFAULT;
713
714
return 0;
715
}
716
717
struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx,
718
unsigned int bgid)
719
{
720
struct io_buffer_list *bl;
721
722
lockdep_assert_held(&ctx->mmap_lock);
723
724
bl = xa_load(&ctx->io_bl_xa, bgid);
725
if (!bl || !(bl->flags & IOBL_BUF_RING))
726
return NULL;
727
return &bl->region;
728
}
729
730