1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/blk-mq.h>
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/fsnotify.h>
10 #include <linux/poll.h>
11 #include <linux/nospec.h>
12 #include <linux/compat.h>
13 #include <linux/io_uring.h>
14
15 #include <uapi/linux/io_uring.h>
16
17 #include "io_uring.h"
18 #include "opdef.h"
19 #include "kbuf.h"
20 #include "rsrc.h"
21 #include "rw.h"
22
23 struct io_rw {
24 /* NOTE: kiocb has the file as the first member, so don't do it here */
25 struct kiocb kiocb;
26 u64 addr;
27 u32 len;
28 rwf_t flags;
29 };
30
io_file_supports_nowait(struct io_kiocb * req)31 static inline bool io_file_supports_nowait(struct io_kiocb *req)
32 {
33 return req->flags & REQ_F_SUPPORT_NOWAIT;
34 }
35
36 #ifdef CONFIG_COMPAT
io_iov_compat_buffer_select_prep(struct io_rw * rw)37 static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
38 {
39 struct compat_iovec __user *uiov;
40 compat_ssize_t clen;
41
42 uiov = u64_to_user_ptr(rw->addr);
43 if (!access_ok(uiov, sizeof(*uiov)))
44 return -EFAULT;
45 if (__get_user(clen, &uiov->iov_len))
46 return -EFAULT;
47 if (clen < 0)
48 return -EINVAL;
49
50 rw->len = clen;
51 return 0;
52 }
53 #endif
54
io_iov_buffer_select_prep(struct io_kiocb * req)55 static int io_iov_buffer_select_prep(struct io_kiocb *req)
56 {
57 struct iovec __user *uiov;
58 struct iovec iov;
59 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
60
61 if (rw->len != 1)
62 return -EINVAL;
63
64 #ifdef CONFIG_COMPAT
65 if (req->ctx->compat)
66 return io_iov_compat_buffer_select_prep(rw);
67 #endif
68
69 uiov = u64_to_user_ptr(rw->addr);
70 if (copy_from_user(&iov, uiov, sizeof(*uiov)))
71 return -EFAULT;
72 rw->len = iov.iov_len;
73 return 0;
74 }
75
io_prep_rw(struct io_kiocb * req,const struct io_uring_sqe * sqe)76 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
77 {
78 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
79 unsigned ioprio;
80 int ret;
81
82 rw->kiocb.ki_pos = READ_ONCE(sqe->off);
83 /* used for fixed read/write too - just read unconditionally */
84 req->buf_index = READ_ONCE(sqe->buf_index);
85
86 if (req->opcode == IORING_OP_READ_FIXED ||
87 req->opcode == IORING_OP_WRITE_FIXED) {
88 struct io_ring_ctx *ctx = req->ctx;
89 u16 index;
90
91 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
92 return -EFAULT;
93 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
94 req->imu = ctx->user_bufs[index];
95 io_req_set_rsrc_node(req, ctx, 0);
96 }
97
98 ioprio = READ_ONCE(sqe->ioprio);
99 if (ioprio) {
100 ret = ioprio_check_cap(ioprio);
101 if (ret)
102 return ret;
103
104 rw->kiocb.ki_ioprio = ioprio;
105 } else {
106 rw->kiocb.ki_ioprio = get_current_ioprio();
107 }
108
109 rw->addr = READ_ONCE(sqe->addr);
110 rw->len = READ_ONCE(sqe->len);
111 rw->flags = READ_ONCE(sqe->rw_flags);
112
113 /* Have to do this validation here, as this is in io_read() rw->len might
114 * have chanaged due to buffer selection
115 */
116 if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) {
117 ret = io_iov_buffer_select_prep(req);
118 if (ret)
119 return ret;
120 }
121
122 return 0;
123 }
124
io_readv_writev_cleanup(struct io_kiocb * req)125 void io_readv_writev_cleanup(struct io_kiocb *req)
126 {
127 struct io_async_rw *io = req->async_data;
128
129 kfree(io->free_iovec);
130 }
131
io_rw_done(struct kiocb * kiocb,ssize_t ret)132 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
133 {
134 switch (ret) {
135 case -EIOCBQUEUED:
136 break;
137 case -ERESTARTSYS:
138 case -ERESTARTNOINTR:
139 case -ERESTARTNOHAND:
140 case -ERESTART_RESTARTBLOCK:
141 /*
142 * We can't just restart the syscall, since previously
143 * submitted sqes may already be in progress. Just fail this
144 * IO with EINTR.
145 */
146 ret = -EINTR;
147 fallthrough;
148 default:
149 kiocb->ki_complete(kiocb, ret);
150 }
151 }
152
io_kiocb_update_pos(struct io_kiocb * req)153 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
154 {
155 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
156
157 if (rw->kiocb.ki_pos != -1)
158 return &rw->kiocb.ki_pos;
159
160 if (!(req->file->f_mode & FMODE_STREAM)) {
161 req->flags |= REQ_F_CUR_POS;
162 rw->kiocb.ki_pos = req->file->f_pos;
163 return &rw->kiocb.ki_pos;
164 }
165
166 rw->kiocb.ki_pos = 0;
167 return NULL;
168 }
169
io_req_task_queue_reissue(struct io_kiocb * req)170 static void io_req_task_queue_reissue(struct io_kiocb *req)
171 {
172 req->io_task_work.func = io_queue_iowq;
173 io_req_task_work_add(req);
174 }
175
176 #ifdef CONFIG_BLOCK
io_resubmit_prep(struct io_kiocb * req)177 static bool io_resubmit_prep(struct io_kiocb *req)
178 {
179 struct io_async_rw *io = req->async_data;
180
181 if (!req_has_async_data(req))
182 return !io_req_prep_async(req);
183 iov_iter_restore(&io->s.iter, &io->s.iter_state);
184 return true;
185 }
186
io_rw_should_reissue(struct io_kiocb * req)187 static bool io_rw_should_reissue(struct io_kiocb *req)
188 {
189 umode_t mode = file_inode(req->file)->i_mode;
190 struct io_ring_ctx *ctx = req->ctx;
191
192 if (!S_ISBLK(mode) && !S_ISREG(mode))
193 return false;
194 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
195 !(ctx->flags & IORING_SETUP_IOPOLL)))
196 return false;
197 /*
198 * If ref is dying, we might be running poll reap from the exit work.
199 * Don't attempt to reissue from that path, just let it fail with
200 * -EAGAIN.
201 */
202 if (percpu_ref_is_dying(&ctx->refs))
203 return false;
204 /*
205 * Play it safe and assume not safe to re-import and reissue if we're
206 * not in the original thread group (or in task context).
207 */
208 if (!same_thread_group(req->task, current) || !in_task())
209 return false;
210 return true;
211 }
212 #else
io_resubmit_prep(struct io_kiocb * req)213 static bool io_resubmit_prep(struct io_kiocb *req)
214 {
215 return false;
216 }
io_rw_should_reissue(struct io_kiocb * req)217 static bool io_rw_should_reissue(struct io_kiocb *req)
218 {
219 return false;
220 }
221 #endif
222
kiocb_end_write(struct io_kiocb * req)223 static void kiocb_end_write(struct io_kiocb *req)
224 {
225 /*
226 * Tell lockdep we inherited freeze protection from submission
227 * thread.
228 */
229 if (req->flags & REQ_F_ISREG) {
230 struct super_block *sb = file_inode(req->file)->i_sb;
231
232 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
233 sb_end_write(sb);
234 }
235 }
236
237 /*
238 * Trigger the notifications after having done some IO, and finish the write
239 * accounting, if any.
240 */
io_req_io_end(struct io_kiocb * req)241 static void io_req_io_end(struct io_kiocb *req)
242 {
243 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
244
245 if (rw->kiocb.ki_flags & IOCB_WRITE) {
246 kiocb_end_write(req);
247 fsnotify_modify(req->file);
248 } else {
249 fsnotify_access(req->file);
250 }
251 }
252
__io_complete_rw_common(struct io_kiocb * req,long res)253 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
254 {
255 if (unlikely(res != req->cqe.res)) {
256 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
257 io_rw_should_reissue(req)) {
258 /*
259 * Reissue will start accounting again, finish the
260 * current cycle.
261 */
262 io_req_io_end(req);
263 req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
264 return true;
265 }
266 req_set_fail(req);
267 req->cqe.res = res;
268 }
269 return false;
270 }
271
io_fixup_rw_res(struct io_kiocb * req,long res)272 static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
273 {
274 struct io_async_rw *io = req->async_data;
275
276 /* add previously done IO, if any */
277 if (req_has_async_data(req) && io->bytes_done > 0) {
278 if (res < 0)
279 res = io->bytes_done;
280 else
281 res += io->bytes_done;
282 }
283 return res;
284 }
285
io_req_rw_complete(struct io_kiocb * req,bool * locked)286 static void io_req_rw_complete(struct io_kiocb *req, bool *locked)
287 {
288 io_req_io_end(req);
289 io_req_task_complete(req, locked);
290 }
291
io_complete_rw(struct kiocb * kiocb,long res)292 static void io_complete_rw(struct kiocb *kiocb, long res)
293 {
294 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
295 struct io_kiocb *req = cmd_to_io_kiocb(rw);
296
297 if (__io_complete_rw_common(req, res))
298 return;
299 io_req_set_res(req, io_fixup_rw_res(req, res), 0);
300 req->io_task_work.func = io_req_rw_complete;
301 io_req_task_work_add(req);
302 }
303
io_complete_rw_iopoll(struct kiocb * kiocb,long res)304 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
305 {
306 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
307 struct io_kiocb *req = cmd_to_io_kiocb(rw);
308
309 if (kiocb->ki_flags & IOCB_WRITE)
310 kiocb_end_write(req);
311 if (unlikely(res != req->cqe.res)) {
312 if (res == -EAGAIN && io_rw_should_reissue(req)) {
313 req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
314 return;
315 }
316 req->cqe.res = res;
317 }
318
319 /* order with io_iopoll_complete() checking ->iopoll_completed */
320 smp_store_release(&req->iopoll_completed, 1);
321 }
322
kiocb_done(struct io_kiocb * req,ssize_t ret,unsigned int issue_flags)323 static int kiocb_done(struct io_kiocb *req, ssize_t ret,
324 unsigned int issue_flags)
325 {
326 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
327 unsigned final_ret = io_fixup_rw_res(req, ret);
328
329 if (req->flags & REQ_F_CUR_POS)
330 req->file->f_pos = rw->kiocb.ki_pos;
331 if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
332 if (!__io_complete_rw_common(req, ret)) {
333 /*
334 * Safe to call io_end from here as we're inline
335 * from the submission path.
336 */
337 io_req_io_end(req);
338 io_req_set_res(req, final_ret,
339 io_put_kbuf(req, issue_flags));
340 return IOU_OK;
341 }
342 } else {
343 io_rw_done(&rw->kiocb, ret);
344 }
345
346 if (req->flags & REQ_F_REISSUE) {
347 req->flags &= ~REQ_F_REISSUE;
348 if (io_resubmit_prep(req))
349 io_req_task_queue_reissue(req);
350 else
351 io_req_task_queue_fail(req, final_ret);
352 }
353 return IOU_ISSUE_SKIP_COMPLETE;
354 }
355
__io_import_iovec(int ddir,struct io_kiocb * req,struct io_rw_state * s,unsigned int issue_flags)356 static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
357 struct io_rw_state *s,
358 unsigned int issue_flags)
359 {
360 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
361 struct iov_iter *iter = &s->iter;
362 u8 opcode = req->opcode;
363 struct iovec *iovec;
364 void __user *buf;
365 size_t sqe_len;
366 ssize_t ret;
367
368 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
369 ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
370 if (ret)
371 return ERR_PTR(ret);
372 return NULL;
373 }
374
375 buf = u64_to_user_ptr(rw->addr);
376 sqe_len = rw->len;
377
378 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE ||
379 (req->flags & REQ_F_BUFFER_SELECT)) {
380 if (io_do_buffer_select(req)) {
381 buf = io_buffer_select(req, &sqe_len, issue_flags);
382 if (!buf)
383 return ERR_PTR(-ENOBUFS);
384 rw->addr = (unsigned long) buf;
385 rw->len = sqe_len;
386 }
387
388 ret = import_single_range(ddir, buf, sqe_len, s->fast_iov, iter);
389 if (ret)
390 return ERR_PTR(ret);
391 return NULL;
392 }
393
394 iovec = s->fast_iov;
395 ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
396 req->ctx->compat);
397 if (unlikely(ret < 0))
398 return ERR_PTR(ret);
399 return iovec;
400 }
401
io_import_iovec(int rw,struct io_kiocb * req,struct iovec ** iovec,struct io_rw_state * s,unsigned int issue_flags)402 static inline int io_import_iovec(int rw, struct io_kiocb *req,
403 struct iovec **iovec, struct io_rw_state *s,
404 unsigned int issue_flags)
405 {
406 *iovec = __io_import_iovec(rw, req, s, issue_flags);
407 if (unlikely(IS_ERR(*iovec)))
408 return PTR_ERR(*iovec);
409
410 iov_iter_save_state(&s->iter, &s->iter_state);
411 return 0;
412 }
413
io_kiocb_ppos(struct kiocb * kiocb)414 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
415 {
416 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
417 }
418
419 /*
420 * For files that don't have ->read_iter() and ->write_iter(), handle them
421 * by looping over ->read() or ->write() manually.
422 */
loop_rw_iter(int ddir,struct io_rw * rw,struct iov_iter * iter)423 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
424 {
425 struct kiocb *kiocb = &rw->kiocb;
426 struct file *file = kiocb->ki_filp;
427 ssize_t ret = 0;
428 loff_t *ppos;
429
430 /*
431 * Don't support polled IO through this interface, and we can't
432 * support non-blocking either. For the latter, this just causes
433 * the kiocb to be handled from an async context.
434 */
435 if (kiocb->ki_flags & IOCB_HIPRI)
436 return -EOPNOTSUPP;
437 if ((kiocb->ki_flags & IOCB_NOWAIT) &&
438 !(kiocb->ki_filp->f_flags & O_NONBLOCK))
439 return -EAGAIN;
440
441 ppos = io_kiocb_ppos(kiocb);
442
443 while (iov_iter_count(iter)) {
444 struct iovec iovec;
445 ssize_t nr;
446
447 if (!iov_iter_is_bvec(iter)) {
448 iovec = iov_iter_iovec(iter);
449 } else {
450 iovec.iov_base = u64_to_user_ptr(rw->addr);
451 iovec.iov_len = rw->len;
452 }
453
454 if (ddir == READ) {
455 nr = file->f_op->read(file, iovec.iov_base,
456 iovec.iov_len, ppos);
457 } else {
458 nr = file->f_op->write(file, iovec.iov_base,
459 iovec.iov_len, ppos);
460 }
461
462 if (nr < 0) {
463 if (!ret)
464 ret = nr;
465 break;
466 }
467 ret += nr;
468 if (!iov_iter_is_bvec(iter)) {
469 iov_iter_advance(iter, nr);
470 } else {
471 rw->addr += nr;
472 rw->len -= nr;
473 if (!rw->len)
474 break;
475 }
476 if (nr != iovec.iov_len)
477 break;
478 }
479
480 return ret;
481 }
482
io_req_map_rw(struct io_kiocb * req,const struct iovec * iovec,const struct iovec * fast_iov,struct iov_iter * iter)483 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
484 const struct iovec *fast_iov, struct iov_iter *iter)
485 {
486 struct io_async_rw *io = req->async_data;
487
488 memcpy(&io->s.iter, iter, sizeof(*iter));
489 io->free_iovec = iovec;
490 io->bytes_done = 0;
491 /* can only be fixed buffers, no need to do anything */
492 if (iov_iter_is_bvec(iter))
493 return;
494 if (!iovec) {
495 unsigned iov_off = 0;
496
497 io->s.iter.iov = io->s.fast_iov;
498 if (iter->iov != fast_iov) {
499 iov_off = iter->iov - fast_iov;
500 io->s.iter.iov += iov_off;
501 }
502 if (io->s.fast_iov != fast_iov)
503 memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off,
504 sizeof(struct iovec) * iter->nr_segs);
505 } else {
506 req->flags |= REQ_F_NEED_CLEANUP;
507 }
508 }
509
io_setup_async_rw(struct io_kiocb * req,const struct iovec * iovec,struct io_rw_state * s,bool force)510 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
511 struct io_rw_state *s, bool force)
512 {
513 if (!force && !io_op_defs[req->opcode].prep_async)
514 return 0;
515 if (!req_has_async_data(req)) {
516 struct io_async_rw *iorw;
517
518 if (io_alloc_async_data(req)) {
519 kfree(iovec);
520 return -ENOMEM;
521 }
522
523 io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
524 iorw = req->async_data;
525 /* we've copied and mapped the iter, ensure state is saved */
526 iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
527 }
528 return 0;
529 }
530
io_rw_prep_async(struct io_kiocb * req,int rw)531 static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
532 {
533 struct io_async_rw *iorw = req->async_data;
534 struct iovec *iov;
535 int ret;
536
537 /* submission path, ->uring_lock should already be taken */
538 ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
539 if (unlikely(ret < 0))
540 return ret;
541
542 iorw->bytes_done = 0;
543 iorw->free_iovec = iov;
544 if (iov)
545 req->flags |= REQ_F_NEED_CLEANUP;
546 return 0;
547 }
548
io_readv_prep_async(struct io_kiocb * req)549 int io_readv_prep_async(struct io_kiocb *req)
550 {
551 return io_rw_prep_async(req, READ);
552 }
553
io_writev_prep_async(struct io_kiocb * req)554 int io_writev_prep_async(struct io_kiocb *req)
555 {
556 return io_rw_prep_async(req, WRITE);
557 }
558
559 /*
560 * This is our waitqueue callback handler, registered through __folio_lock_async()
561 * when we initially tried to do the IO with the iocb armed our waitqueue.
562 * This gets called when the page is unlocked, and we generally expect that to
563 * happen when the page IO is completed and the page is now uptodate. This will
564 * queue a task_work based retry of the operation, attempting to copy the data
565 * again. If the latter fails because the page was NOT uptodate, then we will
566 * do a thread based blocking retry of the operation. That's the unexpected
567 * slow path.
568 */
io_async_buf_func(struct wait_queue_entry * wait,unsigned mode,int sync,void * arg)569 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
570 int sync, void *arg)
571 {
572 struct wait_page_queue *wpq;
573 struct io_kiocb *req = wait->private;
574 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
575 struct wait_page_key *key = arg;
576
577 wpq = container_of(wait, struct wait_page_queue, wait);
578
579 if (!wake_page_match(wpq, key))
580 return 0;
581
582 rw->kiocb.ki_flags &= ~IOCB_WAITQ;
583 list_del_init(&wait->entry);
584 io_req_task_queue(req);
585 return 1;
586 }
587
588 /*
589 * This controls whether a given IO request should be armed for async page
590 * based retry. If we return false here, the request is handed to the async
591 * worker threads for retry. If we're doing buffered reads on a regular file,
592 * we prepare a private wait_page_queue entry and retry the operation. This
593 * will either succeed because the page is now uptodate and unlocked, or it
594 * will register a callback when the page is unlocked at IO completion. Through
595 * that callback, io_uring uses task_work to setup a retry of the operation.
596 * That retry will attempt the buffered read again. The retry will generally
597 * succeed, or in rare cases where it fails, we then fall back to using the
598 * async worker threads for a blocking retry.
599 */
io_rw_should_retry(struct io_kiocb * req)600 static bool io_rw_should_retry(struct io_kiocb *req)
601 {
602 struct io_async_rw *io = req->async_data;
603 struct wait_page_queue *wait = &io->wpq;
604 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
605 struct kiocb *kiocb = &rw->kiocb;
606
607 /* never retry for NOWAIT, we just complete with -EAGAIN */
608 if (req->flags & REQ_F_NOWAIT)
609 return false;
610
611 /* Only for buffered IO */
612 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
613 return false;
614
615 /*
616 * just use poll if we can, and don't attempt if the fs doesn't
617 * support callback based unlocks
618 */
619 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
620 return false;
621
622 wait->wait.func = io_async_buf_func;
623 wait->wait.private = req;
624 wait->wait.flags = 0;
625 INIT_LIST_HEAD(&wait->wait.entry);
626 kiocb->ki_flags |= IOCB_WAITQ;
627 kiocb->ki_flags &= ~IOCB_NOWAIT;
628 kiocb->ki_waitq = wait;
629 return true;
630 }
631
io_iter_do_read(struct io_rw * rw,struct iov_iter * iter)632 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
633 {
634 struct file *file = rw->kiocb.ki_filp;
635
636 if (likely(file->f_op->read_iter))
637 return call_read_iter(file, &rw->kiocb, iter);
638 else if (file->f_op->read)
639 return loop_rw_iter(READ, rw, iter);
640 else
641 return -EINVAL;
642 }
643
need_complete_io(struct io_kiocb * req)644 static bool need_complete_io(struct io_kiocb *req)
645 {
646 return req->flags & REQ_F_ISREG ||
647 S_ISBLK(file_inode(req->file)->i_mode);
648 }
649
io_rw_init_file(struct io_kiocb * req,fmode_t mode)650 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
651 {
652 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
653 struct kiocb *kiocb = &rw->kiocb;
654 struct io_ring_ctx *ctx = req->ctx;
655 struct file *file = req->file;
656 int ret;
657
658 if (unlikely(!file || !(file->f_mode & mode)))
659 return -EBADF;
660
661 if (!io_req_ffs_set(req))
662 req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
663
664 kiocb->ki_flags = file->f_iocb_flags;
665 ret = kiocb_set_rw_flags(kiocb, rw->flags);
666 if (unlikely(ret))
667 return ret;
668
669 /*
670 * If the file is marked O_NONBLOCK, still allow retry for it if it
671 * supports async. Otherwise it's impossible to use O_NONBLOCK files
672 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
673 */
674 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
675 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
676 req->flags |= REQ_F_NOWAIT;
677
678 if (ctx->flags & IORING_SETUP_IOPOLL) {
679 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
680 return -EOPNOTSUPP;
681
682 kiocb->private = NULL;
683 kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
684 kiocb->ki_complete = io_complete_rw_iopoll;
685 req->iopoll_completed = 0;
686 } else {
687 if (kiocb->ki_flags & IOCB_HIPRI)
688 return -EINVAL;
689 kiocb->ki_complete = io_complete_rw;
690 }
691
692 return 0;
693 }
694
io_read(struct io_kiocb * req,unsigned int issue_flags)695 int io_read(struct io_kiocb *req, unsigned int issue_flags)
696 {
697 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
698 struct io_rw_state __s, *s = &__s;
699 struct iovec *iovec;
700 struct kiocb *kiocb = &rw->kiocb;
701 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
702 struct io_async_rw *io;
703 ssize_t ret, ret2;
704 loff_t *ppos;
705
706 if (!req_has_async_data(req)) {
707 ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
708 if (unlikely(ret < 0))
709 return ret;
710 } else {
711 io = req->async_data;
712 s = &io->s;
713
714 /*
715 * Safe and required to re-import if we're using provided
716 * buffers, as we dropped the selected one before retry.
717 */
718 if (io_do_buffer_select(req)) {
719 ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
720 if (unlikely(ret < 0))
721 return ret;
722 }
723
724 /*
725 * We come here from an earlier attempt, restore our state to
726 * match in case it doesn't. It's cheap enough that we don't
727 * need to make this conditional.
728 */
729 iov_iter_restore(&s->iter, &s->iter_state);
730 iovec = NULL;
731 }
732 ret = io_rw_init_file(req, FMODE_READ);
733 if (unlikely(ret)) {
734 kfree(iovec);
735 return ret;
736 }
737 req->cqe.res = iov_iter_count(&s->iter);
738
739 if (force_nonblock) {
740 /* If the file doesn't support async, just async punt */
741 if (unlikely(!io_file_supports_nowait(req))) {
742 ret = io_setup_async_rw(req, iovec, s, true);
743 return ret ?: -EAGAIN;
744 }
745 kiocb->ki_flags |= IOCB_NOWAIT;
746 } else {
747 /* Ensure we clear previously set non-block flag */
748 kiocb->ki_flags &= ~IOCB_NOWAIT;
749 }
750
751 ppos = io_kiocb_update_pos(req);
752
753 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
754 if (unlikely(ret)) {
755 kfree(iovec);
756 return ret;
757 }
758
759 ret = io_iter_do_read(rw, &s->iter);
760
761 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
762 req->flags &= ~REQ_F_REISSUE;
763 /* if we can poll, just do that */
764 if (req->opcode == IORING_OP_READ && file_can_poll(req->file))
765 return -EAGAIN;
766 /* IOPOLL retry should happen for io-wq threads */
767 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
768 goto done;
769 /* no retry on NONBLOCK nor RWF_NOWAIT */
770 if (req->flags & REQ_F_NOWAIT)
771 goto done;
772 ret = 0;
773 } else if (ret == -EIOCBQUEUED) {
774 if (iovec)
775 kfree(iovec);
776 return IOU_ISSUE_SKIP_COMPLETE;
777 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
778 (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
779 /* read all, failed, already did sync or don't want to retry */
780 goto done;
781 }
782
783 /*
784 * Don't depend on the iter state matching what was consumed, or being
785 * untouched in case of error. Restore it and we'll advance it
786 * manually if we need to.
787 */
788 iov_iter_restore(&s->iter, &s->iter_state);
789
790 ret2 = io_setup_async_rw(req, iovec, s, true);
791 iovec = NULL;
792 if (ret2) {
793 ret = ret > 0 ? ret : ret2;
794 goto done;
795 }
796
797 io = req->async_data;
798 s = &io->s;
799 /*
800 * Now use our persistent iterator and state, if we aren't already.
801 * We've restored and mapped the iter to match.
802 */
803
804 do {
805 /*
806 * We end up here because of a partial read, either from
807 * above or inside this loop. Advance the iter by the bytes
808 * that were consumed.
809 */
810 iov_iter_advance(&s->iter, ret);
811 if (!iov_iter_count(&s->iter))
812 break;
813 io->bytes_done += ret;
814 iov_iter_save_state(&s->iter, &s->iter_state);
815
816 /* if we can retry, do so with the callbacks armed */
817 if (!io_rw_should_retry(req)) {
818 kiocb->ki_flags &= ~IOCB_WAITQ;
819 return -EAGAIN;
820 }
821
822 req->cqe.res = iov_iter_count(&s->iter);
823 /*
824 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
825 * we get -EIOCBQUEUED, then we'll get a notification when the
826 * desired page gets unlocked. We can also get a partial read
827 * here, and if we do, then just retry at the new offset.
828 */
829 ret = io_iter_do_read(rw, &s->iter);
830 if (ret == -EIOCBQUEUED)
831 return IOU_ISSUE_SKIP_COMPLETE;
832 /* we got some bytes, but not all. retry. */
833 kiocb->ki_flags &= ~IOCB_WAITQ;
834 iov_iter_restore(&s->iter, &s->iter_state);
835 } while (ret > 0);
836 done:
837 /* it's faster to check here then delegate to kfree */
838 if (iovec)
839 kfree(iovec);
840 return kiocb_done(req, ret, issue_flags);
841 }
842
io_write(struct io_kiocb * req,unsigned int issue_flags)843 int io_write(struct io_kiocb *req, unsigned int issue_flags)
844 {
845 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
846 struct io_rw_state __s, *s = &__s;
847 struct iovec *iovec;
848 struct kiocb *kiocb = &rw->kiocb;
849 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
850 ssize_t ret, ret2;
851 loff_t *ppos;
852
853 if (!req_has_async_data(req)) {
854 ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags);
855 if (unlikely(ret < 0))
856 return ret;
857 } else {
858 struct io_async_rw *io = req->async_data;
859
860 s = &io->s;
861 iov_iter_restore(&s->iter, &s->iter_state);
862 iovec = NULL;
863 }
864 ret = io_rw_init_file(req, FMODE_WRITE);
865 if (unlikely(ret)) {
866 kfree(iovec);
867 return ret;
868 }
869 req->cqe.res = iov_iter_count(&s->iter);
870
871 if (force_nonblock) {
872 /* If the file doesn't support async, just async punt */
873 if (unlikely(!io_file_supports_nowait(req)))
874 goto copy_iov;
875
876 /* File path supports NOWAIT for non-direct_IO only for block devices. */
877 if (!(kiocb->ki_flags & IOCB_DIRECT) &&
878 !(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) &&
879 (req->flags & REQ_F_ISREG))
880 goto copy_iov;
881
882 kiocb->ki_flags |= IOCB_NOWAIT;
883 } else {
884 /* Ensure we clear previously set non-block flag */
885 kiocb->ki_flags &= ~IOCB_NOWAIT;
886 }
887
888 ppos = io_kiocb_update_pos(req);
889
890 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
891 if (unlikely(ret)) {
892 kfree(iovec);
893 return ret;
894 }
895
896 /*
897 * Open-code file_start_write here to grab freeze protection,
898 * which will be released by another thread in
899 * io_complete_rw(). Fool lockdep by telling it the lock got
900 * released so that it doesn't complain about the held lock when
901 * we return to userspace.
902 */
903 if (req->flags & REQ_F_ISREG) {
904 sb_start_write(file_inode(req->file)->i_sb);
905 __sb_writers_release(file_inode(req->file)->i_sb,
906 SB_FREEZE_WRITE);
907 }
908 kiocb->ki_flags |= IOCB_WRITE;
909
910 if (likely(req->file->f_op->write_iter))
911 ret2 = call_write_iter(req->file, kiocb, &s->iter);
912 else if (req->file->f_op->write)
913 ret2 = loop_rw_iter(WRITE, rw, &s->iter);
914 else
915 ret2 = -EINVAL;
916
917 if (req->flags & REQ_F_REISSUE) {
918 req->flags &= ~REQ_F_REISSUE;
919 ret2 = -EAGAIN;
920 }
921
922 /*
923 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
924 * retry them without IOCB_NOWAIT.
925 */
926 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
927 ret2 = -EAGAIN;
928 /* no retry on NONBLOCK nor RWF_NOWAIT */
929 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
930 goto done;
931 if (!force_nonblock || ret2 != -EAGAIN) {
932 /* IOPOLL retry should happen for io-wq threads */
933 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
934 goto copy_iov;
935
936 if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
937 struct io_async_rw *io;
938
939 trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
940 req->cqe.res, ret2);
941
942 /* This is a partial write. The file pos has already been
943 * updated, setup the async struct to complete the request
944 * in the worker. Also update bytes_done to account for
945 * the bytes already written.
946 */
947 iov_iter_save_state(&s->iter, &s->iter_state);
948 ret = io_setup_async_rw(req, iovec, s, true);
949
950 io = req->async_data;
951 if (io)
952 io->bytes_done += ret2;
953
954 if (kiocb->ki_flags & IOCB_WRITE)
955 kiocb_end_write(req);
956 return ret ? ret : -EAGAIN;
957 }
958 done:
959 ret = kiocb_done(req, ret2, issue_flags);
960 } else {
961 copy_iov:
962 iov_iter_restore(&s->iter, &s->iter_state);
963 ret = io_setup_async_rw(req, iovec, s, false);
964 if (!ret) {
965 if (kiocb->ki_flags & IOCB_WRITE)
966 kiocb_end_write(req);
967 return -EAGAIN;
968 }
969 return ret;
970 }
971 /* it's reportedly faster than delegating the null check to kfree() */
972 if (iovec)
973 kfree(iovec);
974 return ret;
975 }
976
io_cqring_ev_posted_iopoll(struct io_ring_ctx * ctx)977 static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
978 {
979 io_commit_cqring_flush(ctx);
980 if (ctx->flags & IORING_SETUP_SQPOLL)
981 io_cqring_wake(ctx);
982 }
983
io_rw_fail(struct io_kiocb * req)984 void io_rw_fail(struct io_kiocb *req)
985 {
986 int res;
987
988 res = io_fixup_rw_res(req, req->cqe.res);
989 io_req_set_res(req, res, req->cqe.flags);
990 }
991
io_do_iopoll(struct io_ring_ctx * ctx,bool force_nonspin)992 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
993 {
994 struct io_wq_work_node *pos, *start, *prev;
995 unsigned int poll_flags = BLK_POLL_NOSLEEP;
996 DEFINE_IO_COMP_BATCH(iob);
997 int nr_events = 0;
998
999 /*
1000 * Only spin for completions if we don't have multiple devices hanging
1001 * off our complete list.
1002 */
1003 if (ctx->poll_multi_queue || force_nonspin)
1004 poll_flags |= BLK_POLL_ONESHOT;
1005
1006 wq_list_for_each(pos, start, &ctx->iopoll_list) {
1007 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1008 struct file *file = req->file;
1009 int ret;
1010
1011 /*
1012 * Move completed and retryable entries to our local lists.
1013 * If we find a request that requires polling, break out
1014 * and complete those lists first, if we have entries there.
1015 */
1016 if (READ_ONCE(req->iopoll_completed))
1017 break;
1018
1019 if (req->opcode == IORING_OP_URING_CMD) {
1020 struct io_uring_cmd *ioucmd;
1021
1022 ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
1023 ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
1024 poll_flags);
1025 } else {
1026 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1027
1028 ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1029 }
1030 if (unlikely(ret < 0))
1031 return ret;
1032 else if (ret)
1033 poll_flags |= BLK_POLL_ONESHOT;
1034
1035 /* iopoll may have completed current req */
1036 if (!rq_list_empty(iob.req_list) ||
1037 READ_ONCE(req->iopoll_completed))
1038 break;
1039 }
1040
1041 if (!rq_list_empty(iob.req_list))
1042 iob.complete(&iob);
1043 else if (!pos)
1044 return 0;
1045
1046 prev = start;
1047 wq_list_for_each_resume(pos, prev) {
1048 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1049
1050 /* order with io_complete_rw_iopoll(), e.g. ->result updates */
1051 if (!smp_load_acquire(&req->iopoll_completed))
1052 break;
1053 nr_events++;
1054 if (unlikely(req->flags & REQ_F_CQE_SKIP))
1055 continue;
1056
1057 req->cqe.flags = io_put_kbuf(req, 0);
1058 __io_fill_cqe_req(req->ctx, req);
1059 }
1060
1061 if (unlikely(!nr_events))
1062 return 0;
1063
1064 io_commit_cqring(ctx);
1065 io_cqring_ev_posted_iopoll(ctx);
1066 pos = start ? start->next : ctx->iopoll_list.first;
1067 wq_list_cut(&ctx->iopoll_list, prev, start);
1068 io_free_batch_list(ctx, pos);
1069 return nr_events;
1070 }
1071