1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/poll.h>
10 #include <linux/io_uring.h>
11
12 #include <uapi/linux/io_uring.h>
13
14 #include "io_uring.h"
15 #include "opdef.h"
16 #include "kbuf.h"
17
18 #define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
19
20 #define BGID_ARRAY 64
21
22 struct io_provide_buf {
23 struct file *file;
24 __u64 addr;
25 __u32 len;
26 __u32 bgid;
27 __u16 nbufs;
28 __u16 bid;
29 };
30
io_buffer_get_list(struct io_ring_ctx * ctx,unsigned int bgid)31 static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
32 unsigned int bgid)
33 {
34 if (ctx->io_bl && bgid < BGID_ARRAY)
35 return &ctx->io_bl[bgid];
36
37 return xa_load(&ctx->io_bl_xa, bgid);
38 }
39
io_buffer_add_list(struct io_ring_ctx * ctx,struct io_buffer_list * bl,unsigned int bgid)40 static int io_buffer_add_list(struct io_ring_ctx *ctx,
41 struct io_buffer_list *bl, unsigned int bgid)
42 {
43 bl->bgid = bgid;
44 if (bgid < BGID_ARRAY)
45 return 0;
46
47 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
48 }
49
io_kbuf_recycle_legacy(struct io_kiocb * req,unsigned issue_flags)50 void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
51 {
52 struct io_ring_ctx *ctx = req->ctx;
53 struct io_buffer_list *bl;
54 struct io_buffer *buf;
55
56 /*
57 * For legacy provided buffer mode, don't recycle if we already did
58 * IO to this buffer. For ring-mapped provided buffer mode, we should
59 * increment ring->head to explicitly monopolize the buffer to avoid
60 * multiple use.
61 */
62 if (req->flags & REQ_F_PARTIAL_IO)
63 return;
64
65 io_ring_submit_lock(ctx, issue_flags);
66
67 buf = req->kbuf;
68 bl = io_buffer_get_list(ctx, buf->bgid);
69 list_add(&buf->list, &bl->buf_list);
70 req->flags &= ~REQ_F_BUFFER_SELECTED;
71 req->buf_index = buf->bgid;
72
73 io_ring_submit_unlock(ctx, issue_flags);
74 return;
75 }
76
__io_put_kbuf(struct io_kiocb * req,unsigned issue_flags)77 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
78 {
79 unsigned int cflags;
80
81 /*
82 * We can add this buffer back to two lists:
83 *
84 * 1) The io_buffers_cache list. This one is protected by the
85 * ctx->uring_lock. If we already hold this lock, add back to this
86 * list as we can grab it from issue as well.
87 * 2) The io_buffers_comp list. This one is protected by the
88 * ctx->completion_lock.
89 *
90 * We migrate buffers from the comp_list to the issue cache list
91 * when we need one.
92 */
93 if (req->flags & REQ_F_BUFFER_RING) {
94 /* no buffers to recycle for this case */
95 cflags = __io_put_kbuf_list(req, NULL);
96 } else if (issue_flags & IO_URING_F_UNLOCKED) {
97 struct io_ring_ctx *ctx = req->ctx;
98
99 spin_lock(&ctx->completion_lock);
100 cflags = __io_put_kbuf_list(req, &ctx->io_buffers_comp);
101 spin_unlock(&ctx->completion_lock);
102 } else {
103 lockdep_assert_held(&req->ctx->uring_lock);
104
105 cflags = __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
106 }
107 return cflags;
108 }
109
io_provided_buffer_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl)110 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
111 struct io_buffer_list *bl)
112 {
113 if (!list_empty(&bl->buf_list)) {
114 struct io_buffer *kbuf;
115
116 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
117 list_del(&kbuf->list);
118 if (*len == 0 || *len > kbuf->len)
119 *len = kbuf->len;
120 req->flags |= REQ_F_BUFFER_SELECTED;
121 req->kbuf = kbuf;
122 req->buf_index = kbuf->bid;
123 return u64_to_user_ptr(kbuf->addr);
124 }
125 return NULL;
126 }
127
io_ring_buffer_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl,unsigned int issue_flags)128 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
129 struct io_buffer_list *bl,
130 unsigned int issue_flags)
131 {
132 struct io_uring_buf_ring *br = bl->buf_ring;
133 struct io_uring_buf *buf;
134 __u16 head = bl->head;
135
136 if (unlikely(smp_load_acquire(&br->tail) == head))
137 return NULL;
138
139 head &= bl->mask;
140 if (head < IO_BUFFER_LIST_BUF_PER_PAGE) {
141 buf = &br->bufs[head];
142 } else {
143 int off = head & (IO_BUFFER_LIST_BUF_PER_PAGE - 1);
144 int index = head / IO_BUFFER_LIST_BUF_PER_PAGE;
145 buf = page_address(bl->buf_pages[index]);
146 buf += off;
147 }
148 if (*len == 0 || *len > buf->len)
149 *len = buf->len;
150 req->flags |= REQ_F_BUFFER_RING;
151 req->buf_list = bl;
152 req->buf_index = buf->bid;
153
154 if (issue_flags & IO_URING_F_UNLOCKED || !file_can_poll(req->file)) {
155 /*
156 * If we came in unlocked, we have no choice but to consume the
157 * buffer here, otherwise nothing ensures that the buffer won't
158 * get used by others. This does mean it'll be pinned until the
159 * IO completes, coming in unlocked means we're being called from
160 * io-wq context and there may be further retries in async hybrid
161 * mode. For the locked case, the caller must call commit when
162 * the transfer completes (or if we get -EAGAIN and must poll of
163 * retry).
164 */
165 req->buf_list = NULL;
166 bl->head++;
167 }
168 return u64_to_user_ptr(buf->addr);
169 }
170
io_buffer_select(struct io_kiocb * req,size_t * len,unsigned int issue_flags)171 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
172 unsigned int issue_flags)
173 {
174 struct io_ring_ctx *ctx = req->ctx;
175 struct io_buffer_list *bl;
176 void __user *ret = NULL;
177
178 io_ring_submit_lock(req->ctx, issue_flags);
179
180 bl = io_buffer_get_list(ctx, req->buf_index);
181 if (likely(bl)) {
182 if (bl->buf_nr_pages)
183 ret = io_ring_buffer_select(req, len, bl, issue_flags);
184 else
185 ret = io_provided_buffer_select(req, len, bl);
186 }
187 io_ring_submit_unlock(req->ctx, issue_flags);
188 return ret;
189 }
190
io_init_bl_list(struct io_ring_ctx * ctx)191 static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
192 {
193 int i;
194
195 ctx->io_bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list),
196 GFP_KERNEL);
197 if (!ctx->io_bl)
198 return -ENOMEM;
199
200 for (i = 0; i < BGID_ARRAY; i++) {
201 INIT_LIST_HEAD(&ctx->io_bl[i].buf_list);
202 ctx->io_bl[i].bgid = i;
203 }
204
205 return 0;
206 }
207
__io_remove_buffers(struct io_ring_ctx * ctx,struct io_buffer_list * bl,unsigned nbufs)208 static int __io_remove_buffers(struct io_ring_ctx *ctx,
209 struct io_buffer_list *bl, unsigned nbufs)
210 {
211 unsigned i = 0;
212
213 /* shouldn't happen */
214 if (!nbufs)
215 return 0;
216
217 if (bl->buf_nr_pages) {
218 int j;
219
220 i = bl->buf_ring->tail - bl->head;
221 for (j = 0; j < bl->buf_nr_pages; j++)
222 unpin_user_page(bl->buf_pages[j]);
223 kvfree(bl->buf_pages);
224 bl->buf_pages = NULL;
225 bl->buf_nr_pages = 0;
226 /* make sure it's seen as empty */
227 INIT_LIST_HEAD(&bl->buf_list);
228 return i;
229 }
230
231 /* the head kbuf is the list itself */
232 while (!list_empty(&bl->buf_list)) {
233 struct io_buffer *nxt;
234
235 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
236 list_del(&nxt->list);
237 if (++i == nbufs)
238 return i;
239 cond_resched();
240 }
241 i++;
242
243 return i;
244 }
245
io_destroy_buffers(struct io_ring_ctx * ctx)246 void io_destroy_buffers(struct io_ring_ctx *ctx)
247 {
248 struct io_buffer_list *bl;
249 unsigned long index;
250 int i;
251
252 for (i = 0; i < BGID_ARRAY; i++) {
253 if (!ctx->io_bl)
254 break;
255 __io_remove_buffers(ctx, &ctx->io_bl[i], -1U);
256 }
257
258 xa_for_each(&ctx->io_bl_xa, index, bl) {
259 xa_erase(&ctx->io_bl_xa, bl->bgid);
260 __io_remove_buffers(ctx, bl, -1U);
261 kfree(bl);
262 }
263
264 while (!list_empty(&ctx->io_buffers_pages)) {
265 struct page *page;
266
267 page = list_first_entry(&ctx->io_buffers_pages, struct page, lru);
268 list_del_init(&page->lru);
269 __free_page(page);
270 }
271 }
272
io_remove_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)273 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
274 {
275 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
276 u64 tmp;
277
278 if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
279 sqe->splice_fd_in)
280 return -EINVAL;
281
282 tmp = READ_ONCE(sqe->fd);
283 if (!tmp || tmp > USHRT_MAX)
284 return -EINVAL;
285
286 memset(p, 0, sizeof(*p));
287 p->nbufs = tmp;
288 p->bgid = READ_ONCE(sqe->buf_group);
289 return 0;
290 }
291
io_remove_buffers(struct io_kiocb * req,unsigned int issue_flags)292 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
293 {
294 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
295 struct io_ring_ctx *ctx = req->ctx;
296 struct io_buffer_list *bl;
297 int ret = 0;
298
299 io_ring_submit_lock(ctx, issue_flags);
300
301 ret = -ENOENT;
302 bl = io_buffer_get_list(ctx, p->bgid);
303 if (bl) {
304 ret = -EINVAL;
305 /* can't use provide/remove buffers command on mapped buffers */
306 if (!bl->buf_nr_pages)
307 ret = __io_remove_buffers(ctx, bl, p->nbufs);
308 }
309 if (ret < 0)
310 req_set_fail(req);
311
312 /* complete before unlock, IOPOLL may need the lock */
313 io_req_set_res(req, ret, 0);
314 __io_req_complete(req, issue_flags);
315 io_ring_submit_unlock(ctx, issue_flags);
316 return IOU_ISSUE_SKIP_COMPLETE;
317 }
318
io_provide_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)319 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
320 {
321 unsigned long size, tmp_check;
322 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
323 u64 tmp;
324
325 if (sqe->rw_flags || sqe->splice_fd_in)
326 return -EINVAL;
327
328 tmp = READ_ONCE(sqe->fd);
329 if (!tmp || tmp > USHRT_MAX)
330 return -E2BIG;
331 p->nbufs = tmp;
332 p->addr = READ_ONCE(sqe->addr);
333 p->len = READ_ONCE(sqe->len);
334
335 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
336 &size))
337 return -EOVERFLOW;
338 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
339 return -EOVERFLOW;
340
341 size = (unsigned long)p->len * p->nbufs;
342 if (!access_ok(u64_to_user_ptr(p->addr), size))
343 return -EFAULT;
344
345 p->bgid = READ_ONCE(sqe->buf_group);
346 tmp = READ_ONCE(sqe->off);
347 if (tmp > USHRT_MAX)
348 return -E2BIG;
349 if (tmp + p->nbufs >= USHRT_MAX)
350 return -EINVAL;
351 p->bid = tmp;
352 return 0;
353 }
354
io_refill_buffer_cache(struct io_ring_ctx * ctx)355 static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
356 {
357 struct io_buffer *buf;
358 struct page *page;
359 int bufs_in_page;
360
361 /*
362 * Completions that don't happen inline (eg not under uring_lock) will
363 * add to ->io_buffers_comp. If we don't have any free buffers, check
364 * the completion list and splice those entries first.
365 */
366 if (!list_empty_careful(&ctx->io_buffers_comp)) {
367 spin_lock(&ctx->completion_lock);
368 if (!list_empty(&ctx->io_buffers_comp)) {
369 list_splice_init(&ctx->io_buffers_comp,
370 &ctx->io_buffers_cache);
371 spin_unlock(&ctx->completion_lock);
372 return 0;
373 }
374 spin_unlock(&ctx->completion_lock);
375 }
376
377 /*
378 * No free buffers and no completion entries either. Allocate a new
379 * page worth of buffer entries and add those to our freelist.
380 */
381 page = alloc_page(GFP_KERNEL_ACCOUNT);
382 if (!page)
383 return -ENOMEM;
384
385 list_add(&page->lru, &ctx->io_buffers_pages);
386
387 buf = page_address(page);
388 bufs_in_page = PAGE_SIZE / sizeof(*buf);
389 while (bufs_in_page) {
390 list_add_tail(&buf->list, &ctx->io_buffers_cache);
391 buf++;
392 bufs_in_page--;
393 }
394
395 return 0;
396 }
397
io_add_buffers(struct io_ring_ctx * ctx,struct io_provide_buf * pbuf,struct io_buffer_list * bl)398 static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
399 struct io_buffer_list *bl)
400 {
401 struct io_buffer *buf;
402 u64 addr = pbuf->addr;
403 int i, bid = pbuf->bid;
404
405 for (i = 0; i < pbuf->nbufs; i++) {
406 if (list_empty(&ctx->io_buffers_cache) &&
407 io_refill_buffer_cache(ctx))
408 break;
409 buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
410 list);
411 list_move_tail(&buf->list, &bl->buf_list);
412 buf->addr = addr;
413 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
414 buf->bid = bid;
415 buf->bgid = pbuf->bgid;
416 addr += pbuf->len;
417 bid++;
418 cond_resched();
419 }
420
421 return i ? 0 : -ENOMEM;
422 }
423
io_provide_buffers(struct io_kiocb * req,unsigned int issue_flags)424 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
425 {
426 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
427 struct io_ring_ctx *ctx = req->ctx;
428 struct io_buffer_list *bl;
429 int ret = 0;
430
431 io_ring_submit_lock(ctx, issue_flags);
432
433 if (unlikely(p->bgid < BGID_ARRAY && !ctx->io_bl)) {
434 ret = io_init_bl_list(ctx);
435 if (ret)
436 goto err;
437 }
438
439 bl = io_buffer_get_list(ctx, p->bgid);
440 if (unlikely(!bl)) {
441 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
442 if (!bl) {
443 ret = -ENOMEM;
444 goto err;
445 }
446 INIT_LIST_HEAD(&bl->buf_list);
447 ret = io_buffer_add_list(ctx, bl, p->bgid);
448 if (ret) {
449 kfree(bl);
450 goto err;
451 }
452 }
453 /* can't add buffers via this command for a mapped buffer ring */
454 if (bl->buf_nr_pages) {
455 ret = -EINVAL;
456 goto err;
457 }
458
459 ret = io_add_buffers(ctx, p, bl);
460 err:
461 if (ret < 0)
462 req_set_fail(req);
463 /* complete before unlock, IOPOLL may need the lock */
464 io_req_set_res(req, ret, 0);
465 __io_req_complete(req, issue_flags);
466 io_ring_submit_unlock(ctx, issue_flags);
467 return IOU_ISSUE_SKIP_COMPLETE;
468 }
469
io_register_pbuf_ring(struct io_ring_ctx * ctx,void __user * arg)470 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
471 {
472 struct io_uring_buf_ring *br;
473 struct io_uring_buf_reg reg;
474 struct io_buffer_list *bl, *free_bl = NULL;
475 struct page **pages;
476 int nr_pages;
477
478 if (copy_from_user(®, arg, sizeof(reg)))
479 return -EFAULT;
480
481 if (reg.pad || reg.resv[0] || reg.resv[1] || reg.resv[2])
482 return -EINVAL;
483 if (!reg.ring_addr)
484 return -EFAULT;
485 if (reg.ring_addr & ~PAGE_MASK)
486 return -EINVAL;
487 if (!is_power_of_2(reg.ring_entries))
488 return -EINVAL;
489
490 /* cannot disambiguate full vs empty due to head/tail size */
491 if (reg.ring_entries >= 65536)
492 return -EINVAL;
493
494 if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) {
495 int ret = io_init_bl_list(ctx);
496 if (ret)
497 return ret;
498 }
499
500 bl = io_buffer_get_list(ctx, reg.bgid);
501 if (bl) {
502 /* if mapped buffer ring OR classic exists, don't allow */
503 if (bl->buf_nr_pages || !list_empty(&bl->buf_list))
504 return -EEXIST;
505 } else {
506 free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
507 if (!bl)
508 return -ENOMEM;
509 }
510
511 pages = io_pin_pages(reg.ring_addr,
512 struct_size(br, bufs, reg.ring_entries),
513 &nr_pages);
514 if (IS_ERR(pages)) {
515 kfree(free_bl);
516 return PTR_ERR(pages);
517 }
518
519 br = page_address(pages[0]);
520 bl->buf_pages = pages;
521 bl->buf_nr_pages = nr_pages;
522 bl->nr_entries = reg.ring_entries;
523 bl->buf_ring = br;
524 bl->mask = reg.ring_entries - 1;
525 io_buffer_add_list(ctx, bl, reg.bgid);
526 return 0;
527 }
528
io_unregister_pbuf_ring(struct io_ring_ctx * ctx,void __user * arg)529 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
530 {
531 struct io_uring_buf_reg reg;
532 struct io_buffer_list *bl;
533
534 if (copy_from_user(®, arg, sizeof(reg)))
535 return -EFAULT;
536 if (reg.pad || reg.resv[0] || reg.resv[1] || reg.resv[2])
537 return -EINVAL;
538
539 bl = io_buffer_get_list(ctx, reg.bgid);
540 if (!bl)
541 return -ENOENT;
542 if (!bl->buf_nr_pages)
543 return -EINVAL;
544
545 __io_remove_buffers(ctx, bl, -1U);
546 if (bl->bgid >= BGID_ARRAY) {
547 xa_erase(&ctx->io_bl_xa, bl->bgid);
548 kfree(bl);
549 }
550 return 0;
551 }
552