Lines Matching full:req
415 struct io_kiocb *req; member
707 struct io_kiocb *req; member
743 /* needs req->file assigned */
944 static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
946 static void io_cqring_fill_event(struct io_kiocb *req, long res);
947 static void io_put_req(struct io_kiocb *req);
948 static void io_put_req_deferred(struct io_kiocb *req, int nr);
949 static void io_double_put_req(struct io_kiocb *req);
950 static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
951 static void __io_queue_linked_timeout(struct io_kiocb *req);
952 static void io_queue_linked_timeout(struct io_kiocb *req);
956 static void __io_clean_op(struct io_kiocb *req);
958 struct io_kiocb *req, int fd, bool fixed);
959 static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs);
962 static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
965 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
986 static inline void io_clean_op(struct io_kiocb *req) in io_clean_op() argument
988 if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED | in io_clean_op()
990 __io_clean_op(req); in io_clean_op()
1030 struct io_kiocb *req) in io_sq_thread_acquire_mm() argument
1032 if (!(io_op_defs[req->opcode].work_flags & IO_WQ_WORK_MM)) in io_sq_thread_acquire_mm()
1057 static inline void req_set_fail_links(struct io_kiocb *req) in req_set_fail_links() argument
1059 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK) in req_set_fail_links()
1060 req->flags |= REQ_F_FAIL_LINK; in req_set_fail_links()
1088 static inline void __io_req_init_async(struct io_kiocb *req) in __io_req_init_async() argument
1090 memset(&req->work, 0, sizeof(req->work)); in __io_req_init_async()
1091 req->flags |= REQ_F_WORK_INITIALIZED; in __io_req_init_async()
1098 static inline void io_req_init_async(struct io_kiocb *req) in io_req_init_async() argument
1102 if (req->flags & REQ_F_WORK_INITIALIZED) in io_req_init_async()
1105 __io_req_init_async(req); in io_req_init_async()
1108 req->work.identity = tctx->identity; in io_req_init_async()
1110 refcount_inc(&req->work.identity->count); in io_req_init_async()
1125 static inline bool io_is_timeout_noseq(struct io_kiocb *req) in io_is_timeout_noseq() argument
1127 return !req->timeout.off; in io_is_timeout_noseq()
1191 static bool req_need_defer(struct io_kiocb *req, u32 seq) in req_need_defer() argument
1193 if (unlikely(req->flags & REQ_F_IO_DRAIN)) { in req_need_defer()
1194 struct io_ring_ctx *ctx = req->ctx; in req_need_defer()
1216 static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req) in io_put_identity() argument
1218 if (req->work.identity == &tctx->__identity) in io_put_identity()
1220 if (refcount_dec_and_test(&req->work.identity->count)) in io_put_identity()
1221 kfree(req->work.identity); in io_put_identity()
1224 static void io_req_clean_work(struct io_kiocb *req) in io_req_clean_work() argument
1226 if (!(req->flags & REQ_F_WORK_INITIALIZED)) in io_req_clean_work()
1229 req->flags &= ~REQ_F_WORK_INITIALIZED; in io_req_clean_work()
1231 if (req->work.flags & IO_WQ_WORK_MM) { in io_req_clean_work()
1232 mmdrop(req->work.identity->mm); in io_req_clean_work()
1233 req->work.flags &= ~IO_WQ_WORK_MM; in io_req_clean_work()
1236 if (req->work.flags & IO_WQ_WORK_BLKCG) { in io_req_clean_work()
1237 css_put(req->work.identity->blkcg_css); in io_req_clean_work()
1238 req->work.flags &= ~IO_WQ_WORK_BLKCG; in io_req_clean_work()
1241 if (req->work.flags & IO_WQ_WORK_CREDS) { in io_req_clean_work()
1242 put_cred(req->work.identity->creds); in io_req_clean_work()
1243 req->work.flags &= ~IO_WQ_WORK_CREDS; in io_req_clean_work()
1245 if (req->work.flags & IO_WQ_WORK_FS) { in io_req_clean_work()
1246 struct fs_struct *fs = req->work.identity->fs; in io_req_clean_work()
1248 spin_lock(&req->work.identity->fs->lock); in io_req_clean_work()
1251 spin_unlock(&req->work.identity->fs->lock); in io_req_clean_work()
1254 req->work.flags &= ~IO_WQ_WORK_FS; in io_req_clean_work()
1257 io_put_identity(req->task->io_uring, req); in io_req_clean_work()
1264 static bool io_identity_cow(struct io_kiocb *req) in io_identity_cow() argument
1270 if (req->work.flags & IO_WQ_WORK_CREDS) in io_identity_cow()
1271 creds = req->work.identity->creds; in io_identity_cow()
1273 id = kmemdup(req->work.identity, sizeof(*id), GFP_KERNEL); in io_identity_cow()
1275 req->work.flags |= IO_WQ_WORK_CANCEL; in io_identity_cow()
1292 /* drop tctx and req identity references, if needed */ in io_identity_cow()
1296 if (req->work.identity != &tctx->__identity && in io_identity_cow()
1297 refcount_dec_and_test(&req->work.identity->count)) in io_identity_cow()
1298 kfree(req->work.identity); in io_identity_cow()
1300 req->work.identity = id; in io_identity_cow()
1305 static bool io_grab_identity(struct io_kiocb *req) in io_grab_identity() argument
1307 const struct io_op_def *def = &io_op_defs[req->opcode]; in io_grab_identity()
1308 struct io_identity *id = req->work.identity; in io_grab_identity()
1309 struct io_ring_ctx *ctx = req->ctx; in io_grab_identity()
1314 req->work.flags |= IO_WQ_WORK_FSIZE; in io_grab_identity()
1317 if (!(req->work.flags & IO_WQ_WORK_BLKCG) && in io_grab_identity()
1329 req->work.flags |= IO_WQ_WORK_BLKCG; in io_grab_identity()
1333 if (!(req->work.flags & IO_WQ_WORK_CREDS)) { in io_grab_identity()
1337 req->work.flags |= IO_WQ_WORK_CREDS; in io_grab_identity()
1344 if (!(req->work.flags & IO_WQ_WORK_FS) && in io_grab_identity()
1351 req->work.flags |= IO_WQ_WORK_FS; in io_grab_identity()
1353 req->work.flags |= IO_WQ_WORK_CANCEL; in io_grab_identity()
1357 if (!(req->work.flags & IO_WQ_WORK_FILES) && in io_grab_identity()
1359 !(req->flags & REQ_F_NO_FILE_TABLE)) { in io_grab_identity()
1365 req->flags |= REQ_F_INFLIGHT; in io_grab_identity()
1368 list_add(&req->inflight_entry, &ctx->inflight_list); in io_grab_identity()
1370 req->work.flags |= IO_WQ_WORK_FILES; in io_grab_identity()
1376 static void io_prep_async_work(struct io_kiocb *req) in io_prep_async_work() argument
1378 const struct io_op_def *def = &io_op_defs[req->opcode]; in io_prep_async_work()
1379 struct io_ring_ctx *ctx = req->ctx; in io_prep_async_work()
1382 io_req_init_async(req); in io_prep_async_work()
1383 id = req->work.identity; in io_prep_async_work()
1385 if (req->flags & REQ_F_FORCE_ASYNC) in io_prep_async_work()
1386 req->work.flags |= IO_WQ_WORK_CONCURRENT; in io_prep_async_work()
1388 if (req->flags & REQ_F_ISREG) { in io_prep_async_work()
1390 io_wq_hash_work(&req->work, file_inode(req->file)); in io_prep_async_work()
1393 req->work.flags |= IO_WQ_WORK_UNBOUND; in io_prep_async_work()
1397 if (!(req->work.flags & IO_WQ_WORK_MM) && in io_prep_async_work()
1400 req->work.flags |= IO_WQ_WORK_MM; in io_prep_async_work()
1404 if (io_grab_identity(req)) in io_prep_async_work()
1407 if (!io_identity_cow(req)) in io_prep_async_work()
1411 if (!io_grab_identity(req)) in io_prep_async_work()
1415 static void io_prep_async_link(struct io_kiocb *req) in io_prep_async_link() argument
1419 io_prep_async_work(req); in io_prep_async_link()
1420 if (req->flags & REQ_F_LINK_HEAD) in io_prep_async_link()
1421 list_for_each_entry(cur, &req->link_list, link_list) in io_prep_async_link()
1425 static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req) in __io_queue_async_work() argument
1427 struct io_ring_ctx *ctx = req->ctx; in __io_queue_async_work()
1428 struct io_kiocb *link = io_prep_linked_timeout(req); in __io_queue_async_work()
1430 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, in __io_queue_async_work()
1431 &req->work, req->flags); in __io_queue_async_work()
1432 io_wq_enqueue(ctx->io_wq, &req->work); in __io_queue_async_work()
1436 static void io_queue_async_work(struct io_kiocb *req) in io_queue_async_work() argument
1441 io_prep_async_link(req); in io_queue_async_work()
1442 link = __io_queue_async_work(req); in io_queue_async_work()
1448 static void io_kill_timeout(struct io_kiocb *req) in io_kill_timeout() argument
1450 struct io_timeout_data *io = req->async_data; in io_kill_timeout()
1455 atomic_set(&req->ctx->cq_timeouts, in io_kill_timeout()
1456 atomic_read(&req->ctx->cq_timeouts) + 1); in io_kill_timeout()
1457 list_del_init(&req->timeout.list); in io_kill_timeout()
1458 io_cqring_fill_event(req, 0); in io_kill_timeout()
1459 io_put_req_deferred(req, 1); in io_kill_timeout()
1463 static bool io_task_match(struct io_kiocb *req, struct task_struct *tsk) in io_task_match() argument
1465 struct io_ring_ctx *ctx = req->ctx; in io_task_match()
1467 if (!tsk || req->task == tsk) in io_task_match()
1470 if (ctx->sq_data && req->task == ctx->sq_data->thread) in io_task_match()
1481 struct io_kiocb *req, *tmp; in io_kill_timeouts() local
1485 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { in io_kill_timeouts()
1486 if (io_task_match(req, tsk)) { in io_kill_timeouts()
1487 io_kill_timeout(req); in io_kill_timeouts()
1502 if (req_need_defer(de->req, de->seq)) in __io_queue_deferred()
1506 link = __io_queue_async_work(de->req); in __io_queue_deferred()
1519 struct io_kiocb *req = list_first_entry(&ctx->timeout_list, in io_flush_timeouts() local
1522 if (io_is_timeout_noseq(req)) in io_flush_timeouts()
1524 if (req->timeout.target_seq != ctx->cached_cq_tail in io_flush_timeouts()
1528 list_del_init(&req->timeout.list); in io_flush_timeouts()
1529 io_kill_timeout(req); in io_flush_timeouts()
1597 static inline bool __io_match_files(struct io_kiocb *req, in __io_match_files() argument
1600 return ((req->flags & REQ_F_WORK_INITIALIZED) && in __io_match_files()
1601 (req->work.flags & IO_WQ_WORK_FILES)) && in __io_match_files()
1602 req->work.identity->files == files; in __io_match_files()
1605 static bool io_match_files(struct io_kiocb *req, in io_match_files() argument
1612 if (__io_match_files(req, files)) in io_match_files()
1614 if (req->flags & REQ_F_LINK_HEAD) { in io_match_files()
1615 list_for_each_entry(link, &req->link_list, link_list) { in io_match_files()
1629 struct io_kiocb *req, *tmp; in io_cqring_overflow_flush() local
1649 list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) { in io_cqring_overflow_flush()
1650 if (tsk && req->task != tsk) in io_cqring_overflow_flush()
1652 if (!io_match_files(req, files)) in io_cqring_overflow_flush()
1659 list_move(&req->compl.list, &list); in io_cqring_overflow_flush()
1661 WRITE_ONCE(cqe->user_data, req->user_data); in io_cqring_overflow_flush()
1662 WRITE_ONCE(cqe->res, req->result); in io_cqring_overflow_flush()
1663 WRITE_ONCE(cqe->flags, req->compl.cflags); in io_cqring_overflow_flush()
1678 req = list_first_entry(&list, struct io_kiocb, compl.list); in io_cqring_overflow_flush()
1679 list_del(&req->compl.list); in io_cqring_overflow_flush()
1680 io_put_req(req); in io_cqring_overflow_flush()
1686 static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags) in __io_cqring_fill_event() argument
1688 struct io_ring_ctx *ctx = req->ctx; in __io_cqring_fill_event()
1691 trace_io_uring_complete(ctx, req->user_data, res); in __io_cqring_fill_event()
1700 WRITE_ONCE(cqe->user_data, req->user_data); in __io_cqring_fill_event()
1704 atomic_read(&req->task->io_uring->in_idle)) { in __io_cqring_fill_event()
1718 io_clean_op(req); in __io_cqring_fill_event()
1719 req->result = res; in __io_cqring_fill_event()
1720 req->compl.cflags = cflags; in __io_cqring_fill_event()
1721 refcount_inc(&req->refs); in __io_cqring_fill_event()
1722 list_add_tail(&req->compl.list, &ctx->cq_overflow_list); in __io_cqring_fill_event()
1726 static void io_cqring_fill_event(struct io_kiocb *req, long res) in io_cqring_fill_event() argument
1728 __io_cqring_fill_event(req, res, 0); in io_cqring_fill_event()
1731 static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags) in io_cqring_add_event() argument
1733 struct io_ring_ctx *ctx = req->ctx; in io_cqring_add_event()
1737 __io_cqring_fill_event(req, res, cflags); in io_cqring_add_event()
1750 struct io_kiocb *req; in io_submit_flush_completions() local
1752 req = list_first_entry(&cs->list, struct io_kiocb, compl.list); in io_submit_flush_completions()
1753 list_del(&req->compl.list); in io_submit_flush_completions()
1754 __io_cqring_fill_event(req, req->result, req->compl.cflags); in io_submit_flush_completions()
1759 * because of a potential deadlock with req->work.fs->lock in io_submit_flush_completions()
1761 if (req->flags & (REQ_F_FAIL_LINK|REQ_F_LINK_TIMEOUT in io_submit_flush_completions()
1764 io_put_req(req); in io_submit_flush_completions()
1767 io_put_req(req); in io_submit_flush_completions()
1777 static void __io_req_complete(struct io_kiocb *req, long res, unsigned cflags, in __io_req_complete() argument
1781 io_cqring_add_event(req, res, cflags); in __io_req_complete()
1782 io_put_req(req); in __io_req_complete()
1784 io_clean_op(req); in __io_req_complete()
1785 req->result = res; in __io_req_complete()
1786 req->compl.cflags = cflags; in __io_req_complete()
1787 list_add_tail(&req->compl.list, &cs->list); in __io_req_complete()
1793 static void io_req_complete(struct io_kiocb *req, long res) in io_req_complete() argument
1795 __io_req_complete(req, res, 0, NULL); in io_req_complete()
1798 static inline bool io_is_fallback_req(struct io_kiocb *req) in io_is_fallback_req() argument
1800 return req == (struct io_kiocb *) in io_is_fallback_req()
1801 ((unsigned long) req->ctx->fallback_req & ~1UL); in io_is_fallback_req()
1806 struct io_kiocb *req; in io_get_fallback_req() local
1808 req = ctx->fallback_req; in io_get_fallback_req()
1810 return req; in io_get_fallback_req()
1845 static inline void io_put_file(struct io_kiocb *req, struct file *file, in io_put_file() argument
1849 percpu_ref_put(req->fixed_file_refs); in io_put_file()
1854 static void io_dismantle_req(struct io_kiocb *req) in io_dismantle_req() argument
1856 io_clean_op(req); in io_dismantle_req()
1858 if (req->async_data) in io_dismantle_req()
1859 kfree(req->async_data); in io_dismantle_req()
1860 if (req->file) in io_dismantle_req()
1861 io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE)); in io_dismantle_req()
1863 io_req_clean_work(req); in io_dismantle_req()
1866 static void __io_free_req(struct io_kiocb *req) in __io_free_req() argument
1868 struct io_uring_task *tctx = req->task->io_uring; in __io_free_req()
1869 struct io_ring_ctx *ctx = req->ctx; in __io_free_req()
1871 io_dismantle_req(req); in __io_free_req()
1876 put_task_struct(req->task); in __io_free_req()
1878 if (likely(!io_is_fallback_req(req))) in __io_free_req()
1879 kmem_cache_free(req_cachep, req); in __io_free_req()
1885 static void io_kill_linked_timeout(struct io_kiocb *req) in io_kill_linked_timeout() argument
1887 struct io_ring_ctx *ctx = req->ctx; in io_kill_linked_timeout()
1893 link = list_first_entry_or_null(&req->link_list, struct io_kiocb, in io_kill_linked_timeout()
1897 * req -> link t-out -> link t-out [-> ...] in io_kill_linked_timeout()
1911 req->flags &= ~REQ_F_LINK_TIMEOUT; in io_kill_linked_timeout()
1920 static struct io_kiocb *io_req_link_next(struct io_kiocb *req) in io_req_link_next() argument
1929 if (unlikely(list_empty(&req->link_list))) in io_req_link_next()
1932 nxt = list_first_entry(&req->link_list, struct io_kiocb, link_list); in io_req_link_next()
1933 list_del_init(&req->link_list); in io_req_link_next()
1942 static void io_fail_links(struct io_kiocb *req) in io_fail_links() argument
1944 struct io_ring_ctx *ctx = req->ctx; in io_fail_links()
1948 while (!list_empty(&req->link_list)) { in io_fail_links()
1949 struct io_kiocb *link = list_first_entry(&req->link_list, in io_fail_links()
1953 trace_io_uring_fail_link(req, link); in io_fail_links()
1974 static struct io_kiocb *__io_req_find_next(struct io_kiocb *req) in __io_req_find_next() argument
1976 req->flags &= ~REQ_F_LINK_HEAD; in __io_req_find_next()
1977 if (req->flags & REQ_F_LINK_TIMEOUT) in __io_req_find_next()
1978 io_kill_linked_timeout(req); in __io_req_find_next()
1986 if (likely(!(req->flags & REQ_F_FAIL_LINK))) in __io_req_find_next()
1987 return io_req_link_next(req); in __io_req_find_next()
1988 io_fail_links(req); in __io_req_find_next()
1992 static struct io_kiocb *io_req_find_next(struct io_kiocb *req) in io_req_find_next() argument
1994 if (likely(!(req->flags & REQ_F_LINK_HEAD))) in io_req_find_next()
1996 return __io_req_find_next(req); in io_req_find_next()
1999 static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok) in io_req_task_work_add() argument
2001 struct task_struct *tsk = req->task; in io_req_task_work_add()
2002 struct io_ring_ctx *ctx = req->ctx; in io_req_task_work_add()
2019 ret = task_work_add(tsk, &req->task_work, notify); in io_req_task_work_add()
2026 static void __io_req_task_cancel(struct io_kiocb *req, int error) in __io_req_task_cancel() argument
2028 struct io_ring_ctx *ctx = req->ctx; in __io_req_task_cancel()
2031 io_cqring_fill_event(req, error); in __io_req_task_cancel()
2036 req_set_fail_links(req); in __io_req_task_cancel()
2037 io_double_put_req(req); in __io_req_task_cancel()
2042 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); in io_req_task_cancel() local
2043 struct io_ring_ctx *ctx = req->ctx; in io_req_task_cancel()
2045 __io_req_task_cancel(req, -ECANCELED); in io_req_task_cancel()
2049 static void __io_req_task_submit(struct io_kiocb *req) in __io_req_task_submit() argument
2051 struct io_ring_ctx *ctx = req->ctx; in __io_req_task_submit()
2055 __io_queue_sqe(req, NULL); in __io_req_task_submit()
2058 __io_req_task_cancel(req, -EFAULT); in __io_req_task_submit()
2064 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); in io_req_task_submit() local
2065 struct io_ring_ctx *ctx = req->ctx; in io_req_task_submit()
2067 __io_req_task_submit(req); in io_req_task_submit()
2071 static void io_req_task_queue(struct io_kiocb *req) in io_req_task_queue() argument
2075 init_task_work(&req->task_work, io_req_task_submit); in io_req_task_queue()
2076 percpu_ref_get(&req->ctx->refs); in io_req_task_queue()
2078 ret = io_req_task_work_add(req, true); in io_req_task_queue()
2082 init_task_work(&req->task_work, io_req_task_cancel); in io_req_task_queue()
2083 tsk = io_wq_get_task(req->ctx->io_wq); in io_req_task_queue()
2084 task_work_add(tsk, &req->task_work, TWA_NONE); in io_req_task_queue()
2089 static void io_queue_next(struct io_kiocb *req) in io_queue_next() argument
2091 struct io_kiocb *nxt = io_req_find_next(req); in io_queue_next()
2097 static void io_free_req(struct io_kiocb *req) in io_free_req() argument
2099 io_queue_next(req); in io_free_req()
2100 __io_free_req(req); in io_free_req()
2140 static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req) in io_req_free_batch() argument
2142 if (unlikely(io_is_fallback_req(req))) { in io_req_free_batch()
2143 io_free_req(req); in io_req_free_batch()
2146 if (req->flags & REQ_F_LINK_HEAD) in io_req_free_batch()
2147 io_queue_next(req); in io_req_free_batch()
2149 if (req->task != rb->task) { in io_req_free_batch()
2156 rb->task = req->task; in io_req_free_batch()
2161 io_dismantle_req(req); in io_req_free_batch()
2162 rb->reqs[rb->to_free++] = req; in io_req_free_batch()
2164 __io_req_free_batch_flush(req->ctx, rb); in io_req_free_batch()
2171 static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) in io_put_req_find_next() argument
2175 if (refcount_dec_and_test(&req->refs)) { in io_put_req_find_next()
2176 nxt = io_req_find_next(req); in io_put_req_find_next()
2177 __io_free_req(req); in io_put_req_find_next()
2182 static void io_put_req(struct io_kiocb *req) in io_put_req() argument
2184 if (refcount_dec_and_test(&req->refs)) in io_put_req()
2185 io_free_req(req); in io_put_req()
2190 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); in io_put_req_deferred_cb() local
2192 io_free_req(req); in io_put_req_deferred_cb()
2195 static void io_free_req_deferred(struct io_kiocb *req) in io_free_req_deferred() argument
2199 init_task_work(&req->task_work, io_put_req_deferred_cb); in io_free_req_deferred()
2200 ret = io_req_task_work_add(req, true); in io_free_req_deferred()
2204 tsk = io_wq_get_task(req->ctx->io_wq); in io_free_req_deferred()
2205 task_work_add(tsk, &req->task_work, TWA_NONE); in io_free_req_deferred()
2210 static inline void io_put_req_deferred(struct io_kiocb *req, int refs) in io_put_req_deferred() argument
2212 if (refcount_sub_and_test(refs, &req->refs)) in io_put_req_deferred()
2213 io_free_req_deferred(req); in io_put_req_deferred()
2216 static struct io_wq_work *io_steal_work(struct io_kiocb *req) in io_steal_work() argument
2225 if (refcount_read(&req->refs) != 1) in io_steal_work()
2228 nxt = io_req_find_next(req); in io_steal_work()
2232 static void io_double_put_req(struct io_kiocb *req) in io_double_put_req() argument
2235 if (refcount_sub_and_test(2, &req->refs)) in io_double_put_req()
2236 io_free_req(req); in io_double_put_req()
2268 static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf) in io_put_kbuf() argument
2274 req->flags &= ~REQ_F_BUFFER_SELECTED; in io_put_kbuf()
2279 static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req) in io_put_rw_kbuf() argument
2283 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; in io_put_rw_kbuf()
2284 return io_put_kbuf(req, kbuf); in io_put_rw_kbuf()
2306 struct io_kiocb *req; in io_iopoll_queue() local
2309 req = list_first_entry(again, struct io_kiocb, inflight_entry); in io_iopoll_queue()
2310 list_del(&req->inflight_entry); in io_iopoll_queue()
2311 __io_complete_rw(req, -EAGAIN, 0, NULL); in io_iopoll_queue()
2322 struct io_kiocb *req; in io_iopoll_complete() local
2332 req = list_first_entry(done, struct io_kiocb, inflight_entry); in io_iopoll_complete()
2333 if (READ_ONCE(req->result) == -EAGAIN) { in io_iopoll_complete()
2334 req->result = 0; in io_iopoll_complete()
2335 req->iopoll_completed = 0; in io_iopoll_complete()
2336 list_move_tail(&req->inflight_entry, &again); in io_iopoll_complete()
2339 list_del(&req->inflight_entry); in io_iopoll_complete()
2341 if (req->flags & REQ_F_BUFFER_SELECTED) in io_iopoll_complete()
2342 cflags = io_put_rw_kbuf(req); in io_iopoll_complete()
2344 __io_cqring_fill_event(req, req->result, cflags); in io_iopoll_complete()
2347 if (refcount_dec_and_test(&req->refs)) in io_iopoll_complete()
2348 io_req_free_batch(&rb, req); in io_iopoll_complete()
2363 struct io_kiocb *req, *tmp; in io_do_iopoll() local
2375 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) { in io_do_iopoll()
2376 struct kiocb *kiocb = &req->rw.kiocb; in io_do_iopoll()
2383 if (READ_ONCE(req->iopoll_completed)) { in io_do_iopoll()
2384 list_move_tail(&req->inflight_entry, &done); in io_do_iopoll()
2394 /* iopoll may have completed current req */ in io_do_iopoll()
2395 if (READ_ONCE(req->iopoll_completed)) in io_do_iopoll()
2396 list_move_tail(&req->inflight_entry, &done); in io_do_iopoll()
2508 static void kiocb_end_write(struct io_kiocb *req) in kiocb_end_write() argument
2514 if (req->flags & REQ_F_ISREG) { in kiocb_end_write()
2515 struct inode *inode = file_inode(req->file); in kiocb_end_write()
2519 file_end_write(req->file); in kiocb_end_write()
2525 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); in io_complete_rw_common() local
2529 kiocb_end_write(req); in io_complete_rw_common()
2531 if (res != req->result) in io_complete_rw_common()
2532 req_set_fail_links(req); in io_complete_rw_common()
2533 if (req->flags & REQ_F_BUFFER_SELECTED) in io_complete_rw_common()
2534 cflags = io_put_rw_kbuf(req); in io_complete_rw_common()
2535 __io_req_complete(req, res, cflags, cs); in io_complete_rw_common()
2539 static bool io_resubmit_prep(struct io_kiocb *req, int error) in io_resubmit_prep() argument
2551 switch (req->opcode) { in io_resubmit_prep()
2564 req->opcode); in io_resubmit_prep()
2568 if (!req->async_data) { in io_resubmit_prep()
2569 ret = io_import_iovec(rw, req, &iovec, &iter, false); in io_resubmit_prep()
2572 ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false); in io_resubmit_prep()
2580 req_set_fail_links(req); in io_resubmit_prep()
2585 static bool io_rw_reissue(struct io_kiocb *req, long res) in io_rw_reissue() argument
2588 umode_t mode = file_inode(req->file)->i_mode; in io_rw_reissue()
2596 ret = io_sq_thread_acquire_mm(req->ctx, req); in io_rw_reissue()
2598 if (io_resubmit_prep(req, ret)) { in io_rw_reissue()
2599 refcount_inc(&req->refs); in io_rw_reissue()
2600 io_queue_async_work(req); in io_rw_reissue()
2608 static void __io_complete_rw(struct io_kiocb *req, long res, long res2, in __io_complete_rw() argument
2611 if (!io_rw_reissue(req, res)) in __io_complete_rw()
2612 io_complete_rw_common(&req->rw.kiocb, res, cs); in __io_complete_rw()
2617 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); in io_complete_rw() local
2619 __io_complete_rw(req, res, res2, NULL); in io_complete_rw()
2624 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); in io_complete_rw_iopoll() local
2627 kiocb_end_write(req); in io_complete_rw_iopoll()
2629 if (res != -EAGAIN && res != req->result) in io_complete_rw_iopoll()
2630 req_set_fail_links(req); in io_complete_rw_iopoll()
2632 WRITE_ONCE(req->result, res); in io_complete_rw_iopoll()
2635 WRITE_ONCE(req->iopoll_completed, 1); in io_complete_rw_iopoll()
2644 static void io_iopoll_req_issued(struct io_kiocb *req) in io_iopoll_req_issued() argument
2646 struct io_ring_ctx *ctx = req->ctx; in io_iopoll_req_issued()
2660 if (list_req->file != req->file) in io_iopoll_req_issued()
2668 if (READ_ONCE(req->iopoll_completed)) in io_iopoll_req_issued()
2669 list_add(&req->inflight_entry, &ctx->iopoll_list); in io_iopoll_req_issued()
2671 list_add_tail(&req->inflight_entry, &ctx->iopoll_list); in io_iopoll_req_issued()
2762 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_rw() argument
2764 struct io_ring_ctx *ctx = req->ctx; in io_prep_rw()
2765 struct kiocb *kiocb = &req->rw.kiocb; in io_prep_rw()
2769 if (S_ISREG(file_inode(req->file)->i_mode)) in io_prep_rw()
2770 req->flags |= REQ_F_ISREG; in io_prep_rw()
2773 if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) { in io_prep_rw()
2774 req->flags |= REQ_F_CUR_POS; in io_prep_rw()
2775 kiocb->ki_pos = req->file->f_pos; in io_prep_rw()
2795 req->flags |= REQ_F_NOWAIT; in io_prep_rw()
2804 req->iopoll_completed = 0; in io_prep_rw()
2811 req->rw.addr = READ_ONCE(sqe->addr); in io_prep_rw()
2812 req->rw.len = READ_ONCE(sqe->len); in io_prep_rw()
2813 req->buf_index = READ_ONCE(sqe->buf_index); in io_prep_rw()
2841 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); in kiocb_done() local
2842 struct io_async_rw *io = req->async_data; in kiocb_done()
2852 if (req->flags & REQ_F_CUR_POS) in kiocb_done()
2853 req->file->f_pos = kiocb->ki_pos; in kiocb_done()
2855 __io_complete_rw(req, ret, 0, cs); in kiocb_done()
2860 static ssize_t io_import_fixed(struct io_kiocb *req, int rw, in io_import_fixed() argument
2863 struct io_ring_ctx *ctx = req->ctx; in io_import_fixed()
2864 size_t len = req->rw.len; in io_import_fixed()
2866 u16 index, buf_index = req->buf_index; in io_import_fixed()
2874 buf_addr = req->rw.addr; in io_import_fixed()
2946 static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, in io_buffer_select() argument
2952 if (req->flags & REQ_F_BUFFER_SELECTED) in io_buffer_select()
2955 io_ring_submit_lock(req->ctx, needs_lock); in io_buffer_select()
2957 lockdep_assert_held(&req->ctx->uring_lock); in io_buffer_select()
2959 head = idr_find(&req->ctx->io_buffer_idr, bgid); in io_buffer_select()
2967 idr_remove(&req->ctx->io_buffer_idr, bgid); in io_buffer_select()
2975 io_ring_submit_unlock(req->ctx, needs_lock); in io_buffer_select()
2980 static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len, in io_rw_buffer_select() argument
2986 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; in io_rw_buffer_select()
2987 bgid = req->buf_index; in io_rw_buffer_select()
2988 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock); in io_rw_buffer_select()
2991 req->rw.addr = (u64) (unsigned long) kbuf; in io_rw_buffer_select()
2992 req->flags |= REQ_F_BUFFER_SELECTED; in io_rw_buffer_select()
2997 static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov, in io_compat_import() argument
3005 uiov = u64_to_user_ptr(req->rw.addr); in io_compat_import()
3014 buf = io_rw_buffer_select(req, &len, needs_lock); in io_compat_import()
3023 static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, in __io_iov_buffer_select() argument
3026 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr); in __io_iov_buffer_select()
3036 buf = io_rw_buffer_select(req, &len, needs_lock); in __io_iov_buffer_select()
3044 static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, in io_iov_buffer_select() argument
3047 if (req->flags & REQ_F_BUFFER_SELECTED) { in io_iov_buffer_select()
3050 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; in io_iov_buffer_select()
3055 if (!req->rw.len) in io_iov_buffer_select()
3057 else if (req->rw.len > 1) in io_iov_buffer_select()
3061 if (req->ctx->compat) in io_iov_buffer_select()
3062 return io_compat_import(req, iov, needs_lock); in io_iov_buffer_select()
3065 return __io_iov_buffer_select(req, iov, needs_lock); in io_iov_buffer_select()
3068 static ssize_t __io_import_iovec(int rw, struct io_kiocb *req, in __io_import_iovec() argument
3072 void __user *buf = u64_to_user_ptr(req->rw.addr); in __io_import_iovec()
3073 size_t sqe_len = req->rw.len; in __io_import_iovec()
3077 opcode = req->opcode; in __io_import_iovec()
3080 return io_import_fixed(req, rw, iter); in __io_import_iovec()
3084 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT)) in __io_import_iovec()
3088 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_import_iovec()
3089 buf = io_rw_buffer_select(req, &sqe_len, needs_lock); in __io_import_iovec()
3092 req->rw.len = sqe_len; in __io_import_iovec()
3100 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_import_iovec()
3101 ret = io_iov_buffer_select(req, *iovec, needs_lock); in __io_import_iovec()
3111 req->ctx->compat); in __io_import_iovec()
3114 static ssize_t io_import_iovec(int rw, struct io_kiocb *req, in io_import_iovec() argument
3118 struct io_async_rw *iorw = req->async_data; in io_import_iovec()
3121 return __io_import_iovec(rw, req, iovec, iter, needs_lock); in io_import_iovec()
3135 static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter) in loop_rw_iter() argument
3137 struct kiocb *kiocb = &req->rw.kiocb; in loop_rw_iter()
3138 struct file *file = req->file; in loop_rw_iter()
3158 iovec.iov_base = u64_to_user_ptr(req->rw.addr); in loop_rw_iter()
3159 iovec.iov_len = req->rw.len; in loop_rw_iter()
3178 req->rw.len -= nr; in loop_rw_iter()
3179 req->rw.addr += nr; in loop_rw_iter()
3186 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, in io_req_map_rw() argument
3189 struct io_async_rw *rw = req->async_data; in io_req_map_rw()
3209 req->flags |= REQ_F_NEED_CLEANUP; in io_req_map_rw()
3213 static inline int __io_alloc_async_data(struct io_kiocb *req) in __io_alloc_async_data() argument
3215 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size); in __io_alloc_async_data()
3216 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL); in __io_alloc_async_data()
3217 return req->async_data == NULL; in __io_alloc_async_data()
3220 static int io_alloc_async_data(struct io_kiocb *req) in io_alloc_async_data() argument
3222 if (!io_op_defs[req->opcode].needs_async_data) in io_alloc_async_data()
3225 return __io_alloc_async_data(req); in io_alloc_async_data()
3228 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, in io_setup_async_rw() argument
3232 if (!force && !io_op_defs[req->opcode].needs_async_data) in io_setup_async_rw()
3234 if (!req->async_data) { in io_setup_async_rw()
3235 if (__io_alloc_async_data(req)) in io_setup_async_rw()
3238 io_req_map_rw(req, iovec, fast_iov, iter); in io_setup_async_rw()
3243 static inline int io_rw_prep_async(struct io_kiocb *req, int rw) in io_rw_prep_async() argument
3245 struct io_async_rw *iorw = req->async_data; in io_rw_prep_async()
3249 ret = __io_import_iovec(rw, req, &iov, &iorw->iter, false); in io_rw_prep_async()
3256 req->flags |= REQ_F_NEED_CLEANUP; in io_rw_prep_async()
3260 static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_read_prep() argument
3264 ret = io_prep_rw(req, sqe); in io_read_prep()
3268 if (unlikely(!(req->file->f_mode & FMODE_READ))) in io_read_prep()
3272 if (!req->async_data) in io_read_prep()
3274 return io_rw_prep_async(req, READ); in io_read_prep()
3291 struct io_kiocb *req = wait->private; in io_async_buf_func() local
3300 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ; in io_async_buf_func()
3303 init_task_work(&req->task_work, io_req_task_submit); in io_async_buf_func()
3304 percpu_ref_get(&req->ctx->refs); in io_async_buf_func()
3307 refcount_inc(&req->refs); in io_async_buf_func()
3308 ret = io_req_task_work_add(req, true); in io_async_buf_func()
3313 init_task_work(&req->task_work, io_req_task_cancel); in io_async_buf_func()
3314 tsk = io_wq_get_task(req->ctx->io_wq); in io_async_buf_func()
3315 task_work_add(tsk, &req->task_work, TWA_NONE); in io_async_buf_func()
3333 static bool io_rw_should_retry(struct io_kiocb *req) in io_rw_should_retry() argument
3335 struct io_async_rw *rw = req->async_data; in io_rw_should_retry()
3337 struct kiocb *kiocb = &req->rw.kiocb; in io_rw_should_retry()
3340 if (req->flags & REQ_F_NOWAIT) in io_rw_should_retry()
3351 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC)) in io_rw_should_retry()
3355 wait->wait.private = req; in io_rw_should_retry()
3364 static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter) in io_iter_do_read() argument
3366 if (req->file->f_op->read_iter) in io_iter_do_read()
3367 return call_read_iter(req->file, &req->rw.kiocb, iter); in io_iter_do_read()
3368 else if (req->file->f_op->read) in io_iter_do_read()
3369 return loop_rw_iter(READ, req, iter); in io_iter_do_read()
3374 static int io_read(struct io_kiocb *req, bool force_nonblock, in io_read() argument
3378 struct kiocb *kiocb = &req->rw.kiocb; in io_read()
3380 struct io_async_rw *rw = req->async_data; in io_read()
3388 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock); in io_read()
3393 req->result = io_size; in io_read()
3404 no_async = force_nonblock && !io_file_supports_async(req->file, READ); in io_read()
3408 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), iov_count); in io_read()
3412 ret = io_iter_do_read(req, iter); in io_read()
3421 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_read()
3424 if (req->file->f_flags & O_NONBLOCK) in io_read()
3437 (req->file->f_flags & O_NONBLOCK)) in io_read()
3442 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true); in io_read()
3449 rw = req->async_data; in io_read()
3457 if (!io_rw_should_retry(req)) { in io_read()
3468 ret = io_iter_do_read(req, iter); in io_read()
3486 static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_write_prep() argument
3490 ret = io_prep_rw(req, sqe); in io_write_prep()
3494 if (unlikely(!(req->file->f_mode & FMODE_WRITE))) in io_write_prep()
3498 if (!req->async_data) in io_write_prep()
3500 return io_rw_prep_async(req, WRITE); in io_write_prep()
3503 static int io_write(struct io_kiocb *req, bool force_nonblock, in io_write() argument
3507 struct kiocb *kiocb = &req->rw.kiocb; in io_write()
3509 struct io_async_rw *rw = req->async_data; in io_write()
3516 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock); in io_write()
3521 req->result = io_size; in io_write()
3530 if (force_nonblock && !io_file_supports_async(req->file, WRITE)) in io_write()
3535 (req->flags & REQ_F_ISREG)) in io_write()
3538 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), iov_count); in io_write()
3549 if (req->flags & REQ_F_ISREG) { in io_write()
3550 sb_start_write(file_inode(req->file)->i_sb); in io_write()
3551 __sb_writers_release(file_inode(req->file)->i_sb, in io_write()
3556 if (req->file->f_op->write_iter) in io_write()
3557 ret2 = call_write_iter(req->file, kiocb, iter); in io_write()
3558 else if (req->file->f_op->write) in io_write()
3559 ret2 = loop_rw_iter(WRITE, req, iter); in io_write()
3570 if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK)) in io_write()
3574 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN) in io_write()
3582 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); in io_write()
3593 static int __io_splice_prep(struct io_kiocb *req, in __io_splice_prep() argument
3596 struct io_splice* sp = &req->splice; in __io_splice_prep()
3599 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in __io_splice_prep()
3609 sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in), in __io_splice_prep()
3613 req->flags |= REQ_F_NEED_CLEANUP; in __io_splice_prep()
3620 io_req_init_async(req); in __io_splice_prep()
3621 req->work.flags |= IO_WQ_WORK_UNBOUND; in __io_splice_prep()
3627 static int io_tee_prep(struct io_kiocb *req, in io_tee_prep() argument
3632 return __io_splice_prep(req, sqe); in io_tee_prep()
3635 static int io_tee(struct io_kiocb *req, bool force_nonblock) in io_tee() argument
3637 struct io_splice *sp = &req->splice; in io_tee()
3648 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED)); in io_tee()
3649 req->flags &= ~REQ_F_NEED_CLEANUP; in io_tee()
3652 req_set_fail_links(req); in io_tee()
3653 io_req_complete(req, ret); in io_tee()
3657 static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_splice_prep() argument
3659 struct io_splice* sp = &req->splice; in io_splice_prep()
3663 return __io_splice_prep(req, sqe); in io_splice_prep()
3666 static int io_splice(struct io_kiocb *req, bool force_nonblock) in io_splice() argument
3668 struct io_splice *sp = &req->splice; in io_splice()
3684 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED)); in io_splice()
3685 req->flags &= ~REQ_F_NEED_CLEANUP; in io_splice()
3688 req_set_fail_links(req); in io_splice()
3689 io_req_complete(req, ret); in io_splice()
3696 static int io_nop(struct io_kiocb *req, struct io_comp_state *cs) in io_nop() argument
3698 struct io_ring_ctx *ctx = req->ctx; in io_nop()
3703 __io_req_complete(req, 0, 0, cs); in io_nop()
3707 static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_fsync() argument
3709 struct io_ring_ctx *ctx = req->ctx; in io_prep_fsync()
3711 if (!req->file) in io_prep_fsync()
3719 req->sync.flags = READ_ONCE(sqe->fsync_flags); in io_prep_fsync()
3720 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC)) in io_prep_fsync()
3723 req->sync.off = READ_ONCE(sqe->off); in io_prep_fsync()
3724 req->sync.len = READ_ONCE(sqe->len); in io_prep_fsync()
3728 static int io_fsync(struct io_kiocb *req, bool force_nonblock) in io_fsync() argument
3730 loff_t end = req->sync.off + req->sync.len; in io_fsync()
3737 ret = vfs_fsync_range(req->file, req->sync.off, in io_fsync()
3739 req->sync.flags & IORING_FSYNC_DATASYNC); in io_fsync()
3741 req_set_fail_links(req); in io_fsync()
3742 io_req_complete(req, ret); in io_fsync()
3746 static int io_fallocate_prep(struct io_kiocb *req, in io_fallocate_prep() argument
3751 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_fallocate_prep()
3754 req->sync.off = READ_ONCE(sqe->off); in io_fallocate_prep()
3755 req->sync.len = READ_ONCE(sqe->addr); in io_fallocate_prep()
3756 req->sync.mode = READ_ONCE(sqe->len); in io_fallocate_prep()
3760 static int io_fallocate(struct io_kiocb *req, bool force_nonblock) in io_fallocate() argument
3767 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off, in io_fallocate()
3768 req->sync.len); in io_fallocate()
3770 req_set_fail_links(req); in io_fallocate()
3771 io_req_complete(req, ret); in io_fallocate()
3775 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in __io_openat_prep() argument
3782 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in __io_openat_prep()
3786 if (!(req->open.how.flags & O_PATH) && force_o_largefile()) in __io_openat_prep()
3787 req->open.how.flags |= O_LARGEFILE; in __io_openat_prep()
3789 req->open.dfd = READ_ONCE(sqe->fd); in __io_openat_prep()
3791 req->open.filename = getname(fname); in __io_openat_prep()
3792 if (IS_ERR(req->open.filename)) { in __io_openat_prep()
3793 ret = PTR_ERR(req->open.filename); in __io_openat_prep()
3794 req->open.filename = NULL; in __io_openat_prep()
3797 req->open.nofile = rlimit(RLIMIT_NOFILE); in __io_openat_prep()
3798 req->open.ignore_nonblock = false; in __io_openat_prep()
3799 req->flags |= REQ_F_NEED_CLEANUP; in __io_openat_prep()
3803 static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat_prep() argument
3807 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) in io_openat_prep()
3811 req->open.how = build_open_how(flags, mode); in io_openat_prep()
3812 return __io_openat_prep(req, sqe); in io_openat_prep()
3815 static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat2_prep() argument
3821 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) in io_openat2_prep()
3828 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how, in io_openat2_prep()
3833 return __io_openat_prep(req, sqe); in io_openat2_prep()
3836 static int io_openat2(struct io_kiocb *req, bool force_nonblock) in io_openat2() argument
3842 if (force_nonblock && !req->open.ignore_nonblock) in io_openat2()
3845 ret = build_open_flags(&req->open.how, &op); in io_openat2()
3849 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile); in io_openat2()
3853 file = do_filp_open(req->open.dfd, req->open.filename, &op); in io_openat2()
3867 req->open.ignore_nonblock = true; in io_openat2()
3868 refcount_inc(&req->refs); in io_openat2()
3869 io_req_task_queue(req); in io_openat2()
3877 putname(req->open.filename); in io_openat2()
3878 req->flags &= ~REQ_F_NEED_CLEANUP; in io_openat2()
3880 req_set_fail_links(req); in io_openat2()
3881 io_req_complete(req, ret); in io_openat2()
3885 static int io_openat(struct io_kiocb *req, bool force_nonblock) in io_openat() argument
3887 return io_openat2(req, force_nonblock); in io_openat()
3890 static int io_remove_buffers_prep(struct io_kiocb *req, in io_remove_buffers_prep() argument
3893 struct io_provide_buf *p = &req->pbuf; in io_remove_buffers_prep()
3935 static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock, in io_remove_buffers() argument
3938 struct io_provide_buf *p = &req->pbuf; in io_remove_buffers()
3939 struct io_ring_ctx *ctx = req->ctx; in io_remove_buffers()
3954 req_set_fail_links(req); in io_remove_buffers()
3955 __io_req_complete(req, ret, 0, cs); in io_remove_buffers()
3959 static int io_provide_buffers_prep(struct io_kiocb *req, in io_provide_buffers_prep() argument
3962 struct io_provide_buf *p = &req->pbuf; in io_provide_buffers_prep()
4013 static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock, in io_provide_buffers() argument
4016 struct io_provide_buf *p = &req->pbuf; in io_provide_buffers()
4017 struct io_ring_ctx *ctx = req->ctx; in io_provide_buffers()
4042 req_set_fail_links(req); in io_provide_buffers()
4043 __io_req_complete(req, ret, 0, cs); in io_provide_buffers()
4047 static int io_epoll_ctl_prep(struct io_kiocb *req, in io_epoll_ctl_prep() argument
4053 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL))) in io_epoll_ctl_prep()
4056 req->epoll.epfd = READ_ONCE(sqe->fd); in io_epoll_ctl_prep()
4057 req->epoll.op = READ_ONCE(sqe->len); in io_epoll_ctl_prep()
4058 req->epoll.fd = READ_ONCE(sqe->off); in io_epoll_ctl_prep()
4060 if (ep_op_has_event(req->epoll.op)) { in io_epoll_ctl_prep()
4064 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev))) in io_epoll_ctl_prep()
4074 static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock, in io_epoll_ctl() argument
4078 struct io_epoll *ie = &req->epoll; in io_epoll_ctl()
4086 req_set_fail_links(req); in io_epoll_ctl()
4087 __io_req_complete(req, ret, 0, cs); in io_epoll_ctl()
4094 static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_madvise_prep() argument
4099 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_madvise_prep()
4102 req->madvise.addr = READ_ONCE(sqe->addr); in io_madvise_prep()
4103 req->madvise.len = READ_ONCE(sqe->len); in io_madvise_prep()
4104 req->madvise.advice = READ_ONCE(sqe->fadvise_advice); in io_madvise_prep()
4111 static int io_madvise(struct io_kiocb *req, bool force_nonblock) in io_madvise() argument
4114 struct io_madvise *ma = &req->madvise; in io_madvise()
4122 req_set_fail_links(req); in io_madvise()
4123 io_req_complete(req, ret); in io_madvise()
4130 static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fadvise_prep() argument
4134 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_fadvise_prep()
4137 req->fadvise.offset = READ_ONCE(sqe->off); in io_fadvise_prep()
4138 req->fadvise.len = READ_ONCE(sqe->len); in io_fadvise_prep()
4139 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice); in io_fadvise_prep()
4143 static int io_fadvise(struct io_kiocb *req, bool force_nonblock) in io_fadvise() argument
4145 struct io_fadvise *fa = &req->fadvise; in io_fadvise()
4159 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice); in io_fadvise()
4161 req_set_fail_links(req); in io_fadvise()
4162 io_req_complete(req, ret); in io_fadvise()
4166 static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_statx_prep() argument
4168 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL))) in io_statx_prep()
4172 if (req->flags & REQ_F_FIXED_FILE) in io_statx_prep()
4175 req->statx.dfd = READ_ONCE(sqe->fd); in io_statx_prep()
4176 req->statx.mask = READ_ONCE(sqe->len); in io_statx_prep()
4177 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_statx_prep()
4178 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_statx_prep()
4179 req->statx.flags = READ_ONCE(sqe->statx_flags); in io_statx_prep()
4184 static int io_statx(struct io_kiocb *req, bool force_nonblock) in io_statx() argument
4186 struct io_statx *ctx = &req->statx; in io_statx()
4192 req->flags |= REQ_F_NO_FILE_TABLE; in io_statx()
4200 req_set_fail_links(req); in io_statx()
4201 io_req_complete(req, ret); in io_statx()
4205 static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_close_prep() argument
4212 io_req_init_async(req); in io_close_prep()
4213 req->work.flags |= IO_WQ_WORK_NO_CANCEL; in io_close_prep()
4215 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) in io_close_prep()
4220 if (req->flags & REQ_F_FIXED_FILE) in io_close_prep()
4223 req->close.fd = READ_ONCE(sqe->fd); in io_close_prep()
4224 if ((req->file && req->file->f_op == &io_uring_fops)) in io_close_prep()
4227 req->close.put_file = NULL; in io_close_prep()
4231 static int io_close(struct io_kiocb *req, bool force_nonblock, in io_close() argument
4234 struct io_close *close = &req->close; in io_close()
4247 req->flags &= ~REQ_F_NOWAIT; in io_close()
4249 req->flags |= REQ_F_NO_FILE_TABLE; in io_close()
4254 ret = filp_close(close->put_file, req->work.identity->files); in io_close()
4256 req_set_fail_links(req); in io_close()
4259 __io_req_complete(req, ret, 0, cs); in io_close()
4263 static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_sfr() argument
4265 struct io_ring_ctx *ctx = req->ctx; in io_prep_sfr()
4267 if (!req->file) in io_prep_sfr()
4275 req->sync.off = READ_ONCE(sqe->off); in io_prep_sfr()
4276 req->sync.len = READ_ONCE(sqe->len); in io_prep_sfr()
4277 req->sync.flags = READ_ONCE(sqe->sync_range_flags); in io_prep_sfr()
4281 static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock) in io_sync_file_range() argument
4289 ret = sync_file_range(req->file, req->sync.off, req->sync.len, in io_sync_file_range()
4290 req->sync.flags); in io_sync_file_range()
4292 req_set_fail_links(req); in io_sync_file_range()
4293 io_req_complete(req, ret); in io_sync_file_range()
4298 static int io_setup_async_msg(struct io_kiocb *req, in io_setup_async_msg() argument
4301 struct io_async_msghdr *async_msg = req->async_data; in io_setup_async_msg()
4305 if (io_alloc_async_data(req)) { in io_setup_async_msg()
4310 async_msg = req->async_data; in io_setup_async_msg()
4311 req->flags |= REQ_F_NEED_CLEANUP; in io_setup_async_msg()
4316 static int io_sendmsg_copy_hdr(struct io_kiocb *req, in io_sendmsg_copy_hdr() argument
4321 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg, in io_sendmsg_copy_hdr()
4322 req->sr_msg.msg_flags, &iomsg->iov); in io_sendmsg_copy_hdr()
4325 static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sendmsg_prep() argument
4327 struct io_async_msghdr *async_msg = req->async_data; in io_sendmsg_prep()
4328 struct io_sr_msg *sr = &req->sr_msg; in io_sendmsg_prep()
4331 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_sendmsg_prep()
4339 if (req->ctx->compat) in io_sendmsg_prep()
4343 if (!async_msg || !io_op_defs[req->opcode].needs_async_data) in io_sendmsg_prep()
4345 ret = io_sendmsg_copy_hdr(req, async_msg); in io_sendmsg_prep()
4347 req->flags |= REQ_F_NEED_CLEANUP; in io_sendmsg_prep()
4351 static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, in io_sendmsg() argument
4359 sock = sock_from_file(req->file, &ret); in io_sendmsg()
4363 if (req->async_data) { in io_sendmsg()
4364 kmsg = req->async_data; in io_sendmsg()
4371 ret = io_sendmsg_copy_hdr(req, &iomsg); in io_sendmsg()
4377 flags = req->sr_msg.msg_flags; in io_sendmsg()
4379 req->flags |= REQ_F_NOWAIT; in io_sendmsg()
4385 return io_setup_async_msg(req, kmsg); in io_sendmsg()
4391 req->flags &= ~REQ_F_NEED_CLEANUP; in io_sendmsg()
4393 req_set_fail_links(req); in io_sendmsg()
4394 __io_req_complete(req, ret, 0, cs); in io_sendmsg()
4398 static int io_send(struct io_kiocb *req, bool force_nonblock, in io_send() argument
4401 struct io_sr_msg *sr = &req->sr_msg; in io_send()
4408 sock = sock_from_file(req->file, &ret); in io_send()
4421 flags = req->sr_msg.msg_flags; in io_send()
4423 req->flags |= REQ_F_NOWAIT; in io_send()
4435 req_set_fail_links(req); in io_send()
4436 __io_req_complete(req, ret, 0, cs); in io_send()
4440 static int __io_recvmsg_copy_hdr(struct io_kiocb *req, in __io_recvmsg_copy_hdr() argument
4443 struct io_sr_msg *sr = &req->sr_msg; in __io_recvmsg_copy_hdr()
4453 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_recvmsg_copy_hdr()
4474 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, in __io_compat_recvmsg_copy_hdr() argument
4478 struct io_sr_msg *sr = &req->sr_msg; in __io_compat_recvmsg_copy_hdr()
4491 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_compat_recvmsg_copy_hdr()
4517 static int io_recvmsg_copy_hdr(struct io_kiocb *req, in io_recvmsg_copy_hdr() argument
4524 if (req->ctx->compat) in io_recvmsg_copy_hdr()
4525 return __io_compat_recvmsg_copy_hdr(req, iomsg); in io_recvmsg_copy_hdr()
4528 return __io_recvmsg_copy_hdr(req, iomsg); in io_recvmsg_copy_hdr()
4531 static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req, in io_recv_buffer_select() argument
4534 struct io_sr_msg *sr = &req->sr_msg; in io_recv_buffer_select()
4537 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock); in io_recv_buffer_select()
4542 req->flags |= REQ_F_BUFFER_SELECTED; in io_recv_buffer_select()
4546 static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req) in io_put_recv_kbuf() argument
4548 return io_put_kbuf(req, req->sr_msg.kbuf); in io_put_recv_kbuf()
4551 static int io_recvmsg_prep(struct io_kiocb *req, in io_recvmsg_prep() argument
4554 struct io_async_msghdr *async_msg = req->async_data; in io_recvmsg_prep()
4555 struct io_sr_msg *sr = &req->sr_msg; in io_recvmsg_prep()
4558 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_recvmsg_prep()
4567 if (req->ctx->compat) in io_recvmsg_prep()
4571 if (!async_msg || !io_op_defs[req->opcode].needs_async_data) in io_recvmsg_prep()
4573 ret = io_recvmsg_copy_hdr(req, async_msg); in io_recvmsg_prep()
4575 req->flags |= REQ_F_NEED_CLEANUP; in io_recvmsg_prep()
4579 static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, in io_recvmsg() argument
4588 sock = sock_from_file(req->file, &ret); in io_recvmsg()
4592 if (req->async_data) { in io_recvmsg()
4593 kmsg = req->async_data; in io_recvmsg()
4600 ret = io_recvmsg_copy_hdr(req, &iomsg); in io_recvmsg()
4606 if (req->flags & REQ_F_BUFFER_SELECT) { in io_recvmsg()
4607 kbuf = io_recv_buffer_select(req, !force_nonblock); in io_recvmsg()
4612 1, req->sr_msg.len); in io_recvmsg()
4615 flags = req->sr_msg.msg_flags; in io_recvmsg()
4617 req->flags |= REQ_F_NOWAIT; in io_recvmsg()
4621 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg, in io_recvmsg()
4624 return io_setup_async_msg(req, kmsg); in io_recvmsg()
4628 if (req->flags & REQ_F_BUFFER_SELECTED) in io_recvmsg()
4629 cflags = io_put_recv_kbuf(req); in io_recvmsg()
4632 req->flags &= ~REQ_F_NEED_CLEANUP; in io_recvmsg()
4634 req_set_fail_links(req); in io_recvmsg()
4635 __io_req_complete(req, ret, cflags, cs); in io_recvmsg()
4639 static int io_recv(struct io_kiocb *req, bool force_nonblock, in io_recv() argument
4643 struct io_sr_msg *sr = &req->sr_msg; in io_recv()
4651 sock = sock_from_file(req->file, &ret); in io_recv()
4655 if (req->flags & REQ_F_BUFFER_SELECT) { in io_recv()
4656 kbuf = io_recv_buffer_select(req, !force_nonblock); in io_recv()
4673 flags = req->sr_msg.msg_flags; in io_recv()
4675 req->flags |= REQ_F_NOWAIT; in io_recv()
4685 if (req->flags & REQ_F_BUFFER_SELECTED) in io_recv()
4686 cflags = io_put_recv_kbuf(req); in io_recv()
4688 req_set_fail_links(req); in io_recv()
4689 __io_req_complete(req, ret, cflags, cs); in io_recv()
4693 static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_accept_prep() argument
4695 struct io_accept *accept = &req->accept; in io_accept_prep()
4697 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) in io_accept_prep()
4709 static int io_accept(struct io_kiocb *req, bool force_nonblock, in io_accept() argument
4712 struct io_accept *accept = &req->accept; in io_accept()
4716 if (req->file->f_flags & O_NONBLOCK) in io_accept()
4717 req->flags |= REQ_F_NOWAIT; in io_accept()
4719 ret = __sys_accept4_file(req->file, file_flags, accept->addr, in io_accept()
4727 req_set_fail_links(req); in io_accept()
4729 __io_req_complete(req, ret, 0, cs); in io_accept()
4733 static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_connect_prep() argument
4735 struct io_connect *conn = &req->connect; in io_connect_prep()
4736 struct io_async_connect *io = req->async_data; in io_connect_prep()
4738 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) in io_connect_prep()
4753 static int io_connect(struct io_kiocb *req, bool force_nonblock, in io_connect() argument
4760 if (req->async_data) { in io_connect()
4761 io = req->async_data; in io_connect()
4763 ret = move_addr_to_kernel(req->connect.addr, in io_connect()
4764 req->connect.addr_len, in io_connect()
4773 ret = __sys_connect_file(req->file, &io->address, in io_connect()
4774 req->connect.addr_len, file_flags); in io_connect()
4776 if (req->async_data) in io_connect()
4778 if (io_alloc_async_data(req)) { in io_connect()
4782 io = req->async_data; in io_connect()
4783 memcpy(req->async_data, &__io, sizeof(__io)); in io_connect()
4790 req_set_fail_links(req); in io_connect()
4791 __io_req_complete(req, ret, 0, cs); in io_connect()
4795 static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sendmsg_prep() argument
4800 static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, in io_sendmsg() argument
4806 static int io_send(struct io_kiocb *req, bool force_nonblock, in io_send() argument
4812 static int io_recvmsg_prep(struct io_kiocb *req, in io_recvmsg_prep() argument
4818 static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, in io_recvmsg() argument
4824 static int io_recv(struct io_kiocb *req, bool force_nonblock, in io_recv() argument
4830 static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_accept_prep() argument
4835 static int io_accept(struct io_kiocb *req, bool force_nonblock, in io_accept() argument
4841 static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_connect_prep() argument
4846 static int io_connect(struct io_kiocb *req, bool force_nonblock, in io_connect() argument
4855 struct io_kiocb *req; member
4859 static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, in __io_async_wake() argument
4869 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask); in __io_async_wake()
4873 req->result = mask; in __io_async_wake()
4874 init_task_work(&req->task_work, func); in __io_async_wake()
4875 percpu_ref_get(&req->ctx->refs); in __io_async_wake()
4883 twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh); in __io_async_wake()
4891 ret = io_req_task_work_add(req, twa_signal_ok); in __io_async_wake()
4896 tsk = io_wq_get_task(req->ctx->io_wq); in __io_async_wake()
4897 task_work_add(tsk, &req->task_work, TWA_NONE); in __io_async_wake()
4903 static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll) in io_poll_rewait() argument
4904 __acquires(&req->ctx->completion_lock) in io_poll_rewait()
4906 struct io_ring_ctx *ctx = req->ctx; in io_poll_rewait()
4908 if (!req->result && !READ_ONCE(poll->canceled)) { in io_poll_rewait()
4911 req->result = vfs_poll(req->file, &pt) & poll->events; in io_poll_rewait()
4915 if (!req->result && !READ_ONCE(poll->canceled)) { in io_poll_rewait()
4923 static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req) in io_poll_get_double() argument
4926 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_double()
4927 return req->async_data; in io_poll_get_double()
4928 return req->apoll->double_poll; in io_poll_get_double()
4931 static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req) in io_poll_get_single() argument
4933 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_single()
4934 return &req->poll; in io_poll_get_single()
4935 return &req->apoll->poll; in io_poll_get_single()
4938 static void io_poll_remove_double(struct io_kiocb *req) in io_poll_remove_double() argument
4940 struct io_poll_iocb *poll = io_poll_get_double(req); in io_poll_remove_double()
4942 lockdep_assert_held(&req->ctx->completion_lock); in io_poll_remove_double()
4950 refcount_dec(&req->refs); in io_poll_remove_double()
4956 static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error) in io_poll_complete() argument
4958 struct io_ring_ctx *ctx = req->ctx; in io_poll_complete()
4960 io_poll_remove_double(req); in io_poll_complete()
4961 req->poll.done = true; in io_poll_complete()
4962 io_cqring_fill_event(req, error ? error : mangle_poll(mask)); in io_poll_complete()
4968 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); in io_poll_task_func() local
4969 struct io_ring_ctx *ctx = req->ctx; in io_poll_task_func()
4972 if (io_poll_rewait(req, &req->poll)) { in io_poll_task_func()
4975 hash_del(&req->hash_node); in io_poll_task_func()
4976 io_poll_complete(req, req->result, 0); in io_poll_task_func()
4979 nxt = io_put_req_find_next(req); in io_poll_task_func()
4991 struct io_kiocb *req = wait->private; in io_poll_double_wake() local
4992 struct io_poll_iocb *poll = io_poll_get_single(req); in io_poll_double_wake()
5016 refcount_dec(&req->refs); in io_poll_double_wake()
5035 struct io_kiocb *req = pt->req; in __io_queue_proc() local
5056 refcount_inc(&req->refs); in __io_queue_proc()
5057 poll->wait.private = req; in __io_queue_proc()
5074 struct async_poll *apoll = pt->req->apoll; in io_async_queue_proc()
5081 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); in io_async_task_func() local
5082 struct async_poll *apoll = req->apoll; in io_async_task_func()
5083 struct io_ring_ctx *ctx = req->ctx; in io_async_task_func()
5085 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data); in io_async_task_func()
5087 if (io_poll_rewait(req, &apoll->poll)) { in io_async_task_func()
5093 /* If req is still hashed, it cannot have been canceled. Don't check. */ in io_async_task_func()
5094 if (hash_hashed(&req->hash_node)) in io_async_task_func()
5095 hash_del(&req->hash_node); in io_async_task_func()
5097 io_poll_remove_double(req); in io_async_task_func()
5101 __io_req_task_submit(req); in io_async_task_func()
5103 __io_req_task_cancel(req, -ECANCELED); in io_async_task_func()
5113 struct io_kiocb *req = wait->private; in io_async_wake() local
5114 struct io_poll_iocb *poll = &req->apoll->poll; in io_async_wake()
5116 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data, in io_async_wake()
5119 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func); in io_async_wake()
5122 static void io_poll_req_insert(struct io_kiocb *req) in io_poll_req_insert() argument
5124 struct io_ring_ctx *ctx = req->ctx; in io_poll_req_insert()
5127 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)]; in io_poll_req_insert()
5128 hlist_add_head(&req->hash_node, list); in io_poll_req_insert()
5131 static __poll_t __io_arm_poll_handler(struct io_kiocb *req, in __io_arm_poll_handler() argument
5137 struct io_ring_ctx *ctx = req->ctx; in __io_arm_poll_handler()
5140 INIT_HLIST_NODE(&req->hash_node); in __io_arm_poll_handler()
5142 poll->file = req->file; in __io_arm_poll_handler()
5143 poll->wait.private = req; in __io_arm_poll_handler()
5146 ipt->req = req; in __io_arm_poll_handler()
5149 mask = vfs_poll(req->file, &ipt->pt) & poll->events; in __io_arm_poll_handler()
5165 io_poll_req_insert(req); in __io_arm_poll_handler()
5172 static bool io_arm_poll_handler(struct io_kiocb *req) in io_arm_poll_handler() argument
5174 const struct io_op_def *def = &io_op_defs[req->opcode]; in io_arm_poll_handler()
5175 struct io_ring_ctx *ctx = req->ctx; in io_arm_poll_handler()
5181 if (!req->file || !file_can_poll(req->file)) in io_arm_poll_handler()
5183 if (req->flags & REQ_F_POLLED) in io_arm_poll_handler()
5192 if (!io_file_supports_async(req->file, rw)) in io_arm_poll_handler()
5200 req->flags |= REQ_F_POLLED; in io_arm_poll_handler()
5201 req->apoll = apoll; in io_arm_poll_handler()
5210 if ((req->opcode == IORING_OP_RECVMSG) && in io_arm_poll_handler()
5211 (req->sr_msg.msg_flags & MSG_ERRQUEUE)) in io_arm_poll_handler()
5218 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, in io_arm_poll_handler()
5221 io_poll_remove_double(req); in io_arm_poll_handler()
5228 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask, in io_arm_poll_handler()
5233 static bool __io_poll_remove_one(struct io_kiocb *req, in __io_poll_remove_one() argument
5245 hash_del(&req->hash_node); in __io_poll_remove_one()
5249 static bool io_poll_remove_one(struct io_kiocb *req) in io_poll_remove_one() argument
5253 io_poll_remove_double(req); in io_poll_remove_one()
5255 if (req->opcode == IORING_OP_POLL_ADD) { in io_poll_remove_one()
5256 do_complete = __io_poll_remove_one(req, &req->poll); in io_poll_remove_one()
5258 struct async_poll *apoll = req->apoll; in io_poll_remove_one()
5261 do_complete = __io_poll_remove_one(req, &apoll->poll); in io_poll_remove_one()
5263 io_put_req(req); in io_poll_remove_one()
5270 io_cqring_fill_event(req, -ECANCELED); in io_poll_remove_one()
5271 io_commit_cqring(req->ctx); in io_poll_remove_one()
5272 req_set_fail_links(req); in io_poll_remove_one()
5273 io_put_req_deferred(req, 1); in io_poll_remove_one()
5285 struct io_kiocb *req; in io_poll_remove_all() local
5293 hlist_for_each_entry_safe(req, tmp, list, hash_node) { in io_poll_remove_all()
5294 if (io_task_match(req, tsk)) in io_poll_remove_all()
5295 posted += io_poll_remove_one(req); in io_poll_remove_all()
5309 struct io_kiocb *req; in io_poll_cancel() local
5312 hlist_for_each_entry(req, list, hash_node) { in io_poll_cancel()
5313 if (sqe_addr != req->user_data) in io_poll_cancel()
5315 if (io_poll_remove_one(req)) in io_poll_cancel()
5323 static int io_poll_remove_prep(struct io_kiocb *req, in io_poll_remove_prep() argument
5326 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_poll_remove_prep()
5332 req->poll.addr = READ_ONCE(sqe->addr); in io_poll_remove_prep()
5340 static int io_poll_remove(struct io_kiocb *req) in io_poll_remove() argument
5342 struct io_ring_ctx *ctx = req->ctx; in io_poll_remove()
5346 addr = req->poll.addr; in io_poll_remove()
5352 req_set_fail_links(req); in io_poll_remove()
5353 io_req_complete(req, ret); in io_poll_remove()
5360 struct io_kiocb *req = wait->private; in io_poll_wake() local
5361 struct io_poll_iocb *poll = &req->poll; in io_poll_wake()
5363 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func); in io_poll_wake()
5371 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data); in io_poll_queue_proc()
5374 static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_poll_add_prep() argument
5376 struct io_poll_iocb *poll = &req->poll; in io_poll_add_prep()
5379 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_poll_add_prep()
5393 static int io_poll_add(struct io_kiocb *req) in io_poll_add() argument
5395 struct io_poll_iocb *poll = &req->poll; in io_poll_add()
5396 struct io_ring_ctx *ctx = req->ctx; in io_poll_add()
5402 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events, in io_poll_add()
5407 io_poll_complete(req, mask, 0); in io_poll_add()
5413 io_put_req(req); in io_poll_add()
5422 struct io_kiocb *req = data->req; in io_timeout_fn() local
5423 struct io_ring_ctx *ctx = req->ctx; in io_timeout_fn()
5427 list_del_init(&req->timeout.list); in io_timeout_fn()
5428 atomic_set(&req->ctx->cq_timeouts, in io_timeout_fn()
5429 atomic_read(&req->ctx->cq_timeouts) + 1); in io_timeout_fn()
5431 io_cqring_fill_event(req, -ETIME); in io_timeout_fn()
5436 req_set_fail_links(req); in io_timeout_fn()
5437 io_put_req(req); in io_timeout_fn()
5441 static int __io_timeout_cancel(struct io_kiocb *req) in __io_timeout_cancel() argument
5443 struct io_timeout_data *io = req->async_data; in __io_timeout_cancel()
5449 list_del_init(&req->timeout.list); in __io_timeout_cancel()
5451 req_set_fail_links(req); in __io_timeout_cancel()
5452 io_cqring_fill_event(req, -ECANCELED); in __io_timeout_cancel()
5453 io_put_req_deferred(req, 1); in __io_timeout_cancel()
5459 struct io_kiocb *req; in io_timeout_cancel() local
5462 list_for_each_entry(req, &ctx->timeout_list, timeout.list) { in io_timeout_cancel()
5463 if (user_data == req->user_data) { in io_timeout_cancel()
5472 return __io_timeout_cancel(req); in io_timeout_cancel()
5475 static int io_timeout_remove_prep(struct io_kiocb *req, in io_timeout_remove_prep() argument
5478 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_timeout_remove_prep()
5480 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_timeout_remove_prep()
5485 req->timeout_rem.addr = READ_ONCE(sqe->addr); in io_timeout_remove_prep()
5492 static int io_timeout_remove(struct io_kiocb *req) in io_timeout_remove() argument
5494 struct io_ring_ctx *ctx = req->ctx; in io_timeout_remove()
5498 ret = io_timeout_cancel(ctx, req->timeout_rem.addr); in io_timeout_remove()
5500 io_cqring_fill_event(req, ret); in io_timeout_remove()
5505 req_set_fail_links(req); in io_timeout_remove()
5506 io_put_req(req); in io_timeout_remove()
5510 static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_timeout_prep() argument
5517 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_timeout_prep()
5527 req->timeout.off = off; in io_timeout_prep()
5529 if (!req->async_data && io_alloc_async_data(req)) in io_timeout_prep()
5532 data = req->async_data; in io_timeout_prep()
5533 data->req = req; in io_timeout_prep()
5547 static int io_timeout(struct io_kiocb *req) in io_timeout() argument
5549 struct io_ring_ctx *ctx = req->ctx; in io_timeout()
5550 struct io_timeout_data *data = req->async_data; in io_timeout()
5552 u32 tail, off = req->timeout.off; in io_timeout()
5561 if (io_is_timeout_noseq(req)) { in io_timeout()
5567 req->timeout.target_seq = tail + off; in io_timeout()
5584 list_add(&req->timeout.list, entry); in io_timeout()
5593 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_cancel_cb() local
5595 return req->user_data == (unsigned long) data; in io_cancel_cb()
5620 struct io_kiocb *req, __u64 sqe_addr, in io_async_find_and_cancel() argument
5640 io_cqring_fill_event(req, ret); in io_async_find_and_cancel()
5646 req_set_fail_links(req); in io_async_find_and_cancel()
5647 io_put_req(req); in io_async_find_and_cancel()
5650 static int io_async_cancel_prep(struct io_kiocb *req, in io_async_cancel_prep() argument
5653 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_async_cancel_prep()
5655 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_async_cancel_prep()
5660 req->cancel.addr = READ_ONCE(sqe->addr); in io_async_cancel_prep()
5664 static int io_async_cancel(struct io_kiocb *req) in io_async_cancel() argument
5666 struct io_ring_ctx *ctx = req->ctx; in io_async_cancel()
5668 io_async_find_and_cancel(ctx, req, req->cancel.addr, 0); in io_async_cancel()
5672 static int io_files_update_prep(struct io_kiocb *req, in io_files_update_prep() argument
5675 if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL)) in io_files_update_prep()
5677 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_files_update_prep()
5682 req->files_update.offset = READ_ONCE(sqe->off); in io_files_update_prep()
5683 req->files_update.nr_args = READ_ONCE(sqe->len); in io_files_update_prep()
5684 if (!req->files_update.nr_args) in io_files_update_prep()
5686 req->files_update.arg = READ_ONCE(sqe->addr); in io_files_update_prep()
5690 static int io_files_update(struct io_kiocb *req, bool force_nonblock, in io_files_update() argument
5693 struct io_ring_ctx *ctx = req->ctx; in io_files_update()
5700 up.offset = req->files_update.offset; in io_files_update()
5701 up.fds = req->files_update.arg; in io_files_update()
5704 ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args); in io_files_update()
5708 req_set_fail_links(req); in io_files_update()
5709 __io_req_complete(req, ret, 0, cs); in io_files_update()
5713 static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_req_prep() argument
5715 switch (req->opcode) { in io_req_prep()
5721 return io_read_prep(req, sqe); in io_req_prep()
5725 return io_write_prep(req, sqe); in io_req_prep()
5727 return io_poll_add_prep(req, sqe); in io_req_prep()
5729 return io_poll_remove_prep(req, sqe); in io_req_prep()
5731 return io_prep_fsync(req, sqe); in io_req_prep()
5733 return io_prep_sfr(req, sqe); in io_req_prep()
5736 return io_sendmsg_prep(req, sqe); in io_req_prep()
5739 return io_recvmsg_prep(req, sqe); in io_req_prep()
5741 return io_connect_prep(req, sqe); in io_req_prep()
5743 return io_timeout_prep(req, sqe, false); in io_req_prep()
5745 return io_timeout_remove_prep(req, sqe); in io_req_prep()
5747 return io_async_cancel_prep(req, sqe); in io_req_prep()
5749 return io_timeout_prep(req, sqe, true); in io_req_prep()
5751 return io_accept_prep(req, sqe); in io_req_prep()
5753 return io_fallocate_prep(req, sqe); in io_req_prep()
5755 return io_openat_prep(req, sqe); in io_req_prep()
5757 return io_close_prep(req, sqe); in io_req_prep()
5759 return io_files_update_prep(req, sqe); in io_req_prep()
5761 return io_statx_prep(req, sqe); in io_req_prep()
5763 return io_fadvise_prep(req, sqe); in io_req_prep()
5765 return io_madvise_prep(req, sqe); in io_req_prep()
5767 return io_openat2_prep(req, sqe); in io_req_prep()
5769 return io_epoll_ctl_prep(req, sqe); in io_req_prep()
5771 return io_splice_prep(req, sqe); in io_req_prep()
5773 return io_provide_buffers_prep(req, sqe); in io_req_prep()
5775 return io_remove_buffers_prep(req, sqe); in io_req_prep()
5777 return io_tee_prep(req, sqe); in io_req_prep()
5781 req->opcode); in io_req_prep()
5785 static int io_req_defer_prep(struct io_kiocb *req, in io_req_defer_prep() argument
5790 if (io_alloc_async_data(req)) in io_req_defer_prep()
5792 return io_req_prep(req, sqe); in io_req_defer_prep()
5795 static u32 io_get_sequence(struct io_kiocb *req) in io_get_sequence() argument
5798 struct io_ring_ctx *ctx = req->ctx; in io_get_sequence()
5801 if (req->flags & REQ_F_LINK_HEAD) in io_get_sequence()
5802 list_for_each_entry(pos, &req->link_list, link_list) in io_get_sequence()
5809 static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_req_defer() argument
5811 struct io_ring_ctx *ctx = req->ctx; in io_req_defer()
5816 /* Still need defer if there is pending req in defer list. */ in io_req_defer()
5818 !(req->flags & REQ_F_IO_DRAIN))) in io_req_defer()
5821 seq = io_get_sequence(req); in io_req_defer()
5823 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) in io_req_defer()
5826 if (!req->async_data) { in io_req_defer()
5827 ret = io_req_defer_prep(req, sqe); in io_req_defer()
5831 io_prep_async_link(req); in io_req_defer()
5837 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) { in io_req_defer()
5840 io_queue_async_work(req); in io_req_defer()
5844 trace_io_uring_defer(ctx, req, req->user_data); in io_req_defer()
5845 de->req = req; in io_req_defer()
5852 static void io_req_drop_files(struct io_kiocb *req) in io_req_drop_files() argument
5854 struct io_ring_ctx *ctx = req->ctx; in io_req_drop_files()
5858 list_del(&req->inflight_entry); in io_req_drop_files()
5862 req->flags &= ~REQ_F_INFLIGHT; in io_req_drop_files()
5863 put_files_struct(req->work.identity->files); in io_req_drop_files()
5864 put_nsproxy(req->work.identity->nsproxy); in io_req_drop_files()
5865 req->work.flags &= ~IO_WQ_WORK_FILES; in io_req_drop_files()
5868 static void __io_clean_op(struct io_kiocb *req) in __io_clean_op() argument
5870 if (req->flags & REQ_F_BUFFER_SELECTED) { in __io_clean_op()
5871 switch (req->opcode) { in __io_clean_op()
5875 kfree((void *)(unsigned long)req->rw.addr); in __io_clean_op()
5879 kfree(req->sr_msg.kbuf); in __io_clean_op()
5882 req->flags &= ~REQ_F_BUFFER_SELECTED; in __io_clean_op()
5885 if (req->flags & REQ_F_NEED_CLEANUP) { in __io_clean_op()
5886 switch (req->opcode) { in __io_clean_op()
5893 struct io_async_rw *io = req->async_data; in __io_clean_op()
5900 struct io_async_msghdr *io = req->async_data; in __io_clean_op()
5907 io_put_file(req, req->splice.file_in, in __io_clean_op()
5908 (req->splice.flags & SPLICE_F_FD_IN_FIXED)); in __io_clean_op()
5912 if (req->open.filename) in __io_clean_op()
5913 putname(req->open.filename); in __io_clean_op()
5916 req->flags &= ~REQ_F_NEED_CLEANUP; in __io_clean_op()
5919 if (req->flags & REQ_F_INFLIGHT) in __io_clean_op()
5920 io_req_drop_files(req); in __io_clean_op()
5923 static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock, in io_issue_sqe() argument
5926 struct io_ring_ctx *ctx = req->ctx; in io_issue_sqe()
5929 switch (req->opcode) { in io_issue_sqe()
5931 ret = io_nop(req, cs); in io_issue_sqe()
5936 ret = io_read(req, force_nonblock, cs); in io_issue_sqe()
5941 ret = io_write(req, force_nonblock, cs); in io_issue_sqe()
5944 ret = io_fsync(req, force_nonblock); in io_issue_sqe()
5947 ret = io_poll_add(req); in io_issue_sqe()
5950 ret = io_poll_remove(req); in io_issue_sqe()
5953 ret = io_sync_file_range(req, force_nonblock); in io_issue_sqe()
5956 ret = io_sendmsg(req, force_nonblock, cs); in io_issue_sqe()
5959 ret = io_send(req, force_nonblock, cs); in io_issue_sqe()
5962 ret = io_recvmsg(req, force_nonblock, cs); in io_issue_sqe()
5965 ret = io_recv(req, force_nonblock, cs); in io_issue_sqe()
5968 ret = io_timeout(req); in io_issue_sqe()
5971 ret = io_timeout_remove(req); in io_issue_sqe()
5974 ret = io_accept(req, force_nonblock, cs); in io_issue_sqe()
5977 ret = io_connect(req, force_nonblock, cs); in io_issue_sqe()
5980 ret = io_async_cancel(req); in io_issue_sqe()
5983 ret = io_fallocate(req, force_nonblock); in io_issue_sqe()
5986 ret = io_openat(req, force_nonblock); in io_issue_sqe()
5989 ret = io_close(req, force_nonblock, cs); in io_issue_sqe()
5992 ret = io_files_update(req, force_nonblock, cs); in io_issue_sqe()
5995 ret = io_statx(req, force_nonblock); in io_issue_sqe()
5998 ret = io_fadvise(req, force_nonblock); in io_issue_sqe()
6001 ret = io_madvise(req, force_nonblock); in io_issue_sqe()
6004 ret = io_openat2(req, force_nonblock); in io_issue_sqe()
6007 ret = io_epoll_ctl(req, force_nonblock, cs); in io_issue_sqe()
6010 ret = io_splice(req, force_nonblock); in io_issue_sqe()
6013 ret = io_provide_buffers(req, force_nonblock, cs); in io_issue_sqe()
6016 ret = io_remove_buffers(req, force_nonblock, cs); in io_issue_sqe()
6019 ret = io_tee(req, force_nonblock); in io_issue_sqe()
6030 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) { in io_issue_sqe()
6037 io_iopoll_req_issued(req); in io_issue_sqe()
6048 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_wq_submit_work() local
6052 timeout = io_prep_linked_timeout(req); in io_wq_submit_work()
6064 ret = io_issue_sqe(req, false, NULL); in io_wq_submit_work()
6077 req_set_fail_links(req); in io_wq_submit_work()
6078 io_req_complete(req, ret); in io_wq_submit_work()
6081 return io_steal_work(req); in io_wq_submit_work()
6094 struct io_kiocb *req, int fd, bool fixed) in io_file_get() argument
6096 struct io_ring_ctx *ctx = req->ctx; in io_file_get()
6105 req->fixed_file_refs = &ctx->file_data->node->refs; in io_file_get()
6106 percpu_ref_get(req->fixed_file_refs); in io_file_get()
6116 static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req, in io_req_set_file() argument
6121 fixed = (req->flags & REQ_F_FIXED_FILE) != 0; in io_req_set_file()
6122 if (unlikely(!fixed && io_async_submit(req->ctx))) in io_req_set_file()
6125 req->file = io_file_get(state, req, fd, fixed); in io_req_set_file()
6126 if (req->file || io_op_defs[req->opcode].needs_file_no_error) in io_req_set_file()
6135 struct io_kiocb *req = data->req; in io_link_timeout_fn() local
6136 struct io_ring_ctx *ctx = req->ctx; in io_link_timeout_fn()
6146 if (!list_empty(&req->link_list)) { in io_link_timeout_fn()
6147 prev = list_entry(req->link_list.prev, struct io_kiocb, in io_link_timeout_fn()
6150 list_del_init(&req->link_list); in io_link_timeout_fn()
6159 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME); in io_link_timeout_fn()
6162 io_req_complete(req, -ETIME); in io_link_timeout_fn()
6167 static void __io_queue_linked_timeout(struct io_kiocb *req) in __io_queue_linked_timeout() argument
6173 if (!list_empty(&req->link_list)) { in __io_queue_linked_timeout()
6174 struct io_timeout_data *data = req->async_data; in __io_queue_linked_timeout()
6182 static void io_queue_linked_timeout(struct io_kiocb *req) in io_queue_linked_timeout() argument
6184 struct io_ring_ctx *ctx = req->ctx; in io_queue_linked_timeout()
6187 __io_queue_linked_timeout(req); in io_queue_linked_timeout()
6191 io_put_req(req); in io_queue_linked_timeout()
6194 static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) in io_prep_linked_timeout() argument
6198 if (!(req->flags & REQ_F_LINK_HEAD)) in io_prep_linked_timeout()
6200 if (req->flags & REQ_F_LINK_TIMEOUT) in io_prep_linked_timeout()
6203 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, in io_prep_linked_timeout()
6209 req->flags |= REQ_F_LINK_TIMEOUT; in io_prep_linked_timeout()
6213 static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs) in __io_queue_sqe() argument
6220 linked_timeout = io_prep_linked_timeout(req); in __io_queue_sqe()
6222 if ((req->flags & REQ_F_WORK_INITIALIZED) && in __io_queue_sqe()
6223 (req->work.flags & IO_WQ_WORK_CREDS) && in __io_queue_sqe()
6224 req->work.identity->creds != current_cred()) { in __io_queue_sqe()
6227 if (old_creds == req->work.identity->creds) in __io_queue_sqe()
6230 old_creds = override_creds(req->work.identity->creds); in __io_queue_sqe()
6233 ret = io_issue_sqe(req, true, cs); in __io_queue_sqe()
6239 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { in __io_queue_sqe()
6240 if (!io_arm_poll_handler(req)) { in __io_queue_sqe()
6245 io_queue_async_work(req); in __io_queue_sqe()
6252 req = io_put_req_find_next(req); in __io_queue_sqe()
6256 if (req) { in __io_queue_sqe()
6257 if (!(req->flags & REQ_F_FORCE_ASYNC)) in __io_queue_sqe()
6259 io_queue_async_work(req); in __io_queue_sqe()
6263 req->flags &= ~REQ_F_LINK_TIMEOUT; in __io_queue_sqe()
6264 req_set_fail_links(req); in __io_queue_sqe()
6265 io_put_req(req); in __io_queue_sqe()
6266 io_req_complete(req, ret); in __io_queue_sqe()
6273 static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_queue_sqe() argument
6278 ret = io_req_defer(req, sqe); in io_queue_sqe()
6282 req_set_fail_links(req); in io_queue_sqe()
6283 io_put_req(req); in io_queue_sqe()
6284 io_req_complete(req, ret); in io_queue_sqe()
6286 } else if (req->flags & REQ_F_FORCE_ASYNC) { in io_queue_sqe()
6287 if (!req->async_data) { in io_queue_sqe()
6288 ret = io_req_defer_prep(req, sqe); in io_queue_sqe()
6292 io_queue_async_work(req); in io_queue_sqe()
6295 ret = io_req_prep(req, sqe); in io_queue_sqe()
6299 __io_queue_sqe(req, cs); in io_queue_sqe()
6303 static inline void io_queue_link_head(struct io_kiocb *req, in io_queue_link_head() argument
6306 if (unlikely(req->flags & REQ_F_FAIL_LINK)) { in io_queue_link_head()
6307 io_put_req(req); in io_queue_link_head()
6308 io_req_complete(req, -ECANCELED); in io_queue_link_head()
6310 io_queue_sqe(req, NULL, cs); in io_queue_link_head()
6313 static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_submit_sqe() argument
6316 struct io_ring_ctx *ctx = req->ctx; in io_submit_sqe()
6336 if (req->flags & REQ_F_IO_DRAIN) { in io_submit_sqe()
6340 ret = io_req_defer_prep(req, sqe); in io_submit_sqe()
6346 trace_io_uring_link(ctx, req, head); in io_submit_sqe()
6347 list_add_tail(&req->link_list, &head->link_list); in io_submit_sqe()
6350 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { in io_submit_sqe()
6356 req->flags |= REQ_F_IO_DRAIN; in io_submit_sqe()
6359 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { in io_submit_sqe()
6360 req->flags |= REQ_F_LINK_HEAD; in io_submit_sqe()
6361 INIT_LIST_HEAD(&req->link_list); in io_submit_sqe()
6363 ret = io_req_defer_prep(req, sqe); in io_submit_sqe()
6365 req->flags |= REQ_F_FAIL_LINK; in io_submit_sqe()
6366 *link = req; in io_submit_sqe()
6368 io_queue_sqe(req, sqe, cs); in io_submit_sqe()
6457 struct io_kiocb *req, in io_check_restriction() argument
6463 if (!test_bit(req->opcode, ctx->restrictions.sqe_op)) in io_check_restriction()
6481 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, in io_init_req() argument
6488 req->opcode = READ_ONCE(sqe->opcode); in io_init_req()
6489 req->user_data = READ_ONCE(sqe->user_data); in io_init_req()
6490 req->async_data = NULL; in io_init_req()
6491 req->file = NULL; in io_init_req()
6492 req->ctx = ctx; in io_init_req()
6493 req->flags = 0; in io_init_req()
6495 refcount_set(&req->refs, 2); in io_init_req()
6496 req->task = current; in io_init_req()
6497 req->result = 0; in io_init_req()
6499 if (unlikely(req->opcode >= IORING_OP_LAST)) in io_init_req()
6502 if (unlikely(io_sq_thread_acquire_mm(ctx, req))) in io_init_req()
6510 if (unlikely(!io_check_restriction(ctx, req, sqe_flags))) in io_init_req()
6514 !io_op_defs[req->opcode].buffer_select) in io_init_req()
6526 __io_req_init_async(req); in io_init_req()
6528 req->work.identity = iod; in io_init_req()
6529 req->work.flags |= IO_WQ_WORK_CREDS; in io_init_req()
6533 req->flags |= sqe_flags; in io_init_req()
6535 if (!io_op_defs[req->opcode].needs_file) in io_init_req()
6538 ret = io_req_set_file(state, req, READ_ONCE(sqe->fd)); in io_init_req()
6569 struct io_kiocb *req; in io_submit_sqes() local
6577 req = io_alloc_req(ctx, &state); in io_submit_sqes()
6578 if (unlikely(!req)) { in io_submit_sqes()
6587 err = io_init_req(ctx, req, sqe, &state); in io_submit_sqes()
6590 io_put_req(req); in io_submit_sqes()
6591 io_req_complete(req, err); in io_submit_sqes()
6595 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data, in io_submit_sqes()
6597 err = io_submit_sqe(req, sqe, &link, &state.comp); in io_submit_sqes()
7685 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_free_work() local
7688 io_put_req(req); in io_free_work()
8433 * Returns true if 'preq' is the link parent of 'req'
8435 static bool io_match_link(struct io_kiocb *preq, struct io_kiocb *req) in io_match_link() argument
8443 if (link == req) in io_match_link()
8451 * We're looking to cancel 'req' because it's holding on to our files, but
8452 * 'req' could be a link to another request. See if it is, and cancel that
8455 static bool io_poll_remove_link(struct io_ring_ctx *ctx, struct io_kiocb *req) in io_poll_remove_link() argument
8468 found = io_match_link(preq, req); in io_poll_remove_link()
8480 struct io_kiocb *req) in io_timeout_remove_link() argument
8487 found = io_match_link(preq, req); in io_timeout_remove_link()
8499 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_cancel_link_cb() local
8502 if (req->flags & REQ_F_LINK_TIMEOUT) { in io_cancel_link_cb()
8504 struct io_ring_ctx *ctx = req->ctx; in io_cancel_link_cb()
8508 ret = io_match_link(req, data); in io_cancel_link_cb()
8511 ret = io_match_link(req, data); in io_cancel_link_cb()
8516 static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req) in io_attempt_cancel() argument
8521 cret = io_wq_cancel_work(ctx->io_wq, &req->work); in io_attempt_cancel()
8526 cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_link_cb, req, true); in io_attempt_cancel()
8531 if (io_poll_remove_link(ctx, req)) in io_attempt_cancel()
8534 /* final option, timeout link is holding this req pending */ in io_attempt_cancel()
8535 io_timeout_remove_link(ctx, req); in io_attempt_cancel()
8547 if (io_task_match(de->req, task) && in io_cancel_defer_files()
8548 io_match_files(de->req, files)) { in io_cancel_defer_files()
8558 req_set_fail_links(de->req); in io_cancel_defer_files()
8559 io_put_req(de->req); in io_cancel_defer_files()
8560 io_req_complete(de->req, -ECANCELED); in io_cancel_defer_files()
8578 struct io_kiocb *cancel_req = NULL, *req; in io_uring_cancel_files() local
8582 list_for_each_entry(req, &ctx->inflight_list, inflight_entry) { in io_uring_cancel_files()
8583 if (files && (req->work.flags & IO_WQ_WORK_FILES) && in io_uring_cancel_files()
8584 req->work.identity->files != files) in io_uring_cancel_files()
8586 /* req is being completed, ignore */ in io_uring_cancel_files()
8587 if (!refcount_inc_not_zero(&req->refs)) in io_uring_cancel_files()
8589 cancel_req = req; in io_uring_cancel_files()
8597 /* We need to keep going until we don't find a matching req */ in io_uring_cancel_files()
8614 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_cancel_task_cb() local
8617 return io_task_match(req, task); in io_cancel_task_cb()
9095 struct io_kiocb *req; in __io_uring_show_fdinfo() local
9097 hlist_for_each_entry(req, list, hash_node) in __io_uring_show_fdinfo()
9098 seq_printf(m, " op=%d, task_works=%d\n", req->opcode, in __io_uring_show_fdinfo()
9099 req->task->task_works != NULL); in __io_uring_show_fdinfo()