Lines Matching full:req

510 	struct io_kiocb			*req;  member
806 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
898 struct io_kiocb *req; member
903 /* needs req->file assigned */
1075 static bool io_disarm_next(struct io_kiocb *req);
1084 static void io_put_req(struct io_kiocb *req);
1085 static void io_put_req_deferred(struct io_kiocb *req);
1086 static void io_dismantle_req(struct io_kiocb *req);
1087 static void io_queue_linked_timeout(struct io_kiocb *req);
1091 static void io_clean_op(struct io_kiocb *req);
1093 struct io_kiocb *req, int fd, bool fixed);
1094 static void __io_queue_sqe(struct io_kiocb *req);
1097 static void io_req_task_queue(struct io_kiocb *req);
1099 static int io_req_prep_async(struct io_kiocb *req);
1101 static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
1103 static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
1139 #define req_ref_zero_or_close_to_overflow(req) \ argument
1140 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1142 static inline bool req_ref_inc_not_zero(struct io_kiocb *req) in req_ref_inc_not_zero() argument
1144 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT)); in req_ref_inc_not_zero()
1145 return atomic_inc_not_zero(&req->refs); in req_ref_inc_not_zero()
1148 static inline bool req_ref_put_and_test(struct io_kiocb *req) in req_ref_put_and_test() argument
1150 if (likely(!(req->flags & REQ_F_REFCOUNT))) in req_ref_put_and_test()
1153 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); in req_ref_put_and_test()
1154 return atomic_dec_and_test(&req->refs); in req_ref_put_and_test()
1157 static inline void req_ref_put(struct io_kiocb *req) in req_ref_put() argument
1159 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT)); in req_ref_put()
1160 WARN_ON_ONCE(req_ref_put_and_test(req)); in req_ref_put()
1163 static inline void req_ref_get(struct io_kiocb *req) in req_ref_get() argument
1165 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT)); in req_ref_get()
1166 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); in req_ref_get()
1167 atomic_inc(&req->refs); in req_ref_get()
1170 static inline void __io_req_set_refcount(struct io_kiocb *req, int nr) in __io_req_set_refcount() argument
1172 if (!(req->flags & REQ_F_REFCOUNT)) { in __io_req_set_refcount()
1173 req->flags |= REQ_F_REFCOUNT; in __io_req_set_refcount()
1174 atomic_set(&req->refs, nr); in __io_req_set_refcount()
1178 static inline void io_req_set_refcount(struct io_kiocb *req) in io_req_set_refcount() argument
1180 __io_req_set_refcount(req, 1); in io_req_set_refcount()
1183 static inline void io_req_set_rsrc_node(struct io_kiocb *req) in io_req_set_rsrc_node() argument
1185 struct io_ring_ctx *ctx = req->ctx; in io_req_set_rsrc_node()
1187 if (!req->fixed_rsrc_refs) { in io_req_set_rsrc_node()
1188 req->fixed_rsrc_refs = &ctx->rsrc_node->refs; in io_req_set_rsrc_node()
1189 percpu_ref_get(req->fixed_rsrc_refs); in io_req_set_rsrc_node()
1208 struct io_kiocb *req; in io_match_task() local
1215 io_for_each_link(req, head) { in io_match_task()
1216 if (req->flags & REQ_F_INFLIGHT) in io_match_task()
1222 static inline void req_set_fail(struct io_kiocb *req) in req_set_fail() argument
1224 req->flags |= REQ_F_FAIL; in req_set_fail()
1227 static inline void req_fail_link_node(struct io_kiocb *req, int res) in req_fail_link_node() argument
1229 req_set_fail(req); in req_fail_link_node()
1230 req->result = res; in req_fail_link_node()
1240 static inline bool io_is_timeout_noseq(struct io_kiocb *req) in io_is_timeout_noseq() argument
1242 return !req->timeout.off; in io_is_timeout_noseq()
1250 struct io_kiocb *req, *tmp; in io_fallback_req_func() local
1254 llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node) in io_fallback_req_func()
1255 req->io_task_work.func(req, &locked); in io_fallback_req_func()
1340 static bool req_need_defer(struct io_kiocb *req, u32 seq) in req_need_defer() argument
1342 if (unlikely(req->flags & REQ_F_IO_DRAIN)) { in req_need_defer()
1343 struct io_ring_ctx *ctx = req->ctx; in req_need_defer()
1360 static inline bool io_req_ffs_set(struct io_kiocb *req) in io_req_ffs_set() argument
1362 return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE); in io_req_ffs_set()
1365 static void io_req_track_inflight(struct io_kiocb *req) in io_req_track_inflight() argument
1367 if (!(req->flags & REQ_F_INFLIGHT)) { in io_req_track_inflight()
1368 req->flags |= REQ_F_INFLIGHT; in io_req_track_inflight()
1373 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req) in __io_prep_linked_timeout() argument
1375 if (WARN_ON_ONCE(!req->link)) in __io_prep_linked_timeout()
1378 req->flags &= ~REQ_F_ARM_LTIMEOUT; in __io_prep_linked_timeout()
1379 req->flags |= REQ_F_LINK_TIMEOUT; in __io_prep_linked_timeout()
1382 io_req_set_refcount(req); in __io_prep_linked_timeout()
1383 __io_req_set_refcount(req->link, 2); in __io_prep_linked_timeout()
1384 return req->link; in __io_prep_linked_timeout()
1387 static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) in io_prep_linked_timeout() argument
1389 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT))) in io_prep_linked_timeout()
1391 return __io_prep_linked_timeout(req); in io_prep_linked_timeout()
1394 static void io_prep_async_work(struct io_kiocb *req) in io_prep_async_work() argument
1396 const struct io_op_def *def = &io_op_defs[req->opcode]; in io_prep_async_work()
1397 struct io_ring_ctx *ctx = req->ctx; in io_prep_async_work()
1399 if (!(req->flags & REQ_F_CREDS)) { in io_prep_async_work()
1400 req->flags |= REQ_F_CREDS; in io_prep_async_work()
1401 req->creds = get_current_cred(); in io_prep_async_work()
1404 req->work.list.next = NULL; in io_prep_async_work()
1405 req->work.flags = 0; in io_prep_async_work()
1406 if (req->flags & REQ_F_FORCE_ASYNC) in io_prep_async_work()
1407 req->work.flags |= IO_WQ_WORK_CONCURRENT; in io_prep_async_work()
1409 if (req->flags & REQ_F_ISREG) { in io_prep_async_work()
1411 io_wq_hash_work(&req->work, file_inode(req->file)); in io_prep_async_work()
1412 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) { in io_prep_async_work()
1414 req->work.flags |= IO_WQ_WORK_UNBOUND; in io_prep_async_work()
1417 switch (req->opcode) { in io_prep_async_work()
1420 if (!S_ISREG(file_inode(req->splice.file_in)->i_mode)) in io_prep_async_work()
1421 req->work.flags |= IO_WQ_WORK_UNBOUND; in io_prep_async_work()
1426 static void io_prep_async_link(struct io_kiocb *req) in io_prep_async_link() argument
1430 if (req->flags & REQ_F_LINK_TIMEOUT) { in io_prep_async_link()
1431 struct io_ring_ctx *ctx = req->ctx; in io_prep_async_link()
1434 io_for_each_link(cur, req) in io_prep_async_link()
1438 io_for_each_link(cur, req) in io_prep_async_link()
1443 static void io_queue_async_work(struct io_kiocb *req, bool *locked) in io_queue_async_work() argument
1445 struct io_ring_ctx *ctx = req->ctx; in io_queue_async_work()
1446 struct io_kiocb *link = io_prep_linked_timeout(req); in io_queue_async_work()
1447 struct io_uring_task *tctx = req->task->io_uring; in io_queue_async_work()
1456 io_prep_async_link(req); in io_queue_async_work()
1465 if (WARN_ON_ONCE(!same_thread_group(req->task, current))) in io_queue_async_work()
1466 req->work.flags |= IO_WQ_WORK_CANCEL; in io_queue_async_work()
1468 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, in io_queue_async_work()
1469 &req->work, req->flags); in io_queue_async_work()
1470 io_wq_enqueue(tctx->io_wq, &req->work); in io_queue_async_work()
1475 static void io_kill_timeout(struct io_kiocb *req, int status) in io_kill_timeout() argument
1476 __must_hold(&req->ctx->completion_lock) in io_kill_timeout()
1477 __must_hold(&req->ctx->timeout_lock) in io_kill_timeout()
1479 struct io_timeout_data *io = req->async_data; in io_kill_timeout()
1483 req_set_fail(req); in io_kill_timeout()
1484 atomic_set(&req->ctx->cq_timeouts, in io_kill_timeout()
1485 atomic_read(&req->ctx->cq_timeouts) + 1); in io_kill_timeout()
1486 list_del_init(&req->timeout.list); in io_kill_timeout()
1487 io_cqring_fill_event(req->ctx, req->user_data, status, 0); in io_kill_timeout()
1488 io_put_req_deferred(req); in io_kill_timeout()
1498 if (req_need_defer(de->req, de->seq)) in io_queue_deferred()
1501 io_req_task_queue(de->req); in io_queue_deferred()
1514 struct io_kiocb *req = list_first_entry(&ctx->timeout_list, in io_flush_timeouts() local
1517 if (io_is_timeout_noseq(req)) in io_flush_timeouts()
1527 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush; in io_flush_timeouts()
1532 list_del_init(&req->timeout.list); in io_flush_timeouts()
1533 io_kill_timeout(req, 0); in io_flush_timeouts()
1781 static void io_req_complete_post(struct io_kiocb *req, long res, in io_req_complete_post() argument
1784 struct io_ring_ctx *ctx = req->ctx; in io_req_complete_post()
1787 __io_cqring_fill_event(ctx, req->user_data, res, cflags); in io_req_complete_post()
1792 if (req_ref_put_and_test(req)) { in io_req_complete_post()
1793 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { in io_req_complete_post()
1794 if (req->flags & IO_DISARM_MASK) in io_req_complete_post()
1795 io_disarm_next(req); in io_req_complete_post()
1796 if (req->link) { in io_req_complete_post()
1797 io_req_task_queue(req->link); in io_req_complete_post()
1798 req->link = NULL; in io_req_complete_post()
1801 io_dismantle_req(req); in io_req_complete_post()
1802 io_put_task(req->task, 1); in io_req_complete_post()
1803 list_add(&req->inflight_entry, &ctx->locked_free_list); in io_req_complete_post()
1807 req = NULL; in io_req_complete_post()
1812 if (req) { in io_req_complete_post()
1818 static inline bool io_req_needs_clean(struct io_kiocb *req) in io_req_needs_clean() argument
1820 return req->flags & IO_REQ_CLEAN_FLAGS; in io_req_needs_clean()
1823 static void io_req_complete_state(struct io_kiocb *req, long res, in io_req_complete_state() argument
1826 if (io_req_needs_clean(req)) in io_req_complete_state()
1827 io_clean_op(req); in io_req_complete_state()
1828 req->result = res; in io_req_complete_state()
1829 req->compl.cflags = cflags; in io_req_complete_state()
1830 req->flags |= REQ_F_COMPLETE_INLINE; in io_req_complete_state()
1833 static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags, in __io_req_complete() argument
1837 io_req_complete_state(req, res, cflags); in __io_req_complete()
1839 io_req_complete_post(req, res, cflags); in __io_req_complete()
1842 static inline void io_req_complete(struct io_kiocb *req, long res) in io_req_complete() argument
1844 __io_req_complete(req, 0, res, 0); in io_req_complete()
1847 static void io_req_complete_failed(struct io_kiocb *req, long res) in io_req_complete_failed() argument
1849 req_set_fail(req); in io_req_complete_failed()
1850 io_req_complete_post(req, res, 0); in io_req_complete_failed()
1853 static void io_req_complete_fail_submit(struct io_kiocb *req) in io_req_complete_fail_submit() argument
1859 req->flags &= ~REQ_F_HARDLINK; in io_req_complete_fail_submit()
1860 req->flags |= REQ_F_LINK; in io_req_complete_fail_submit()
1861 io_req_complete_failed(req, req->result); in io_req_complete_fail_submit()
1868 static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx) in io_preinit_req() argument
1870 req->ctx = ctx; in io_preinit_req()
1871 req->link = NULL; in io_preinit_req()
1872 req->async_data = NULL; in io_preinit_req()
1874 req->result = 0; in io_preinit_req()
1902 struct io_kiocb *req = list_first_entry(&state->free_list, in io_flush_cached_reqs() local
1905 list_del(&req->inflight_entry); in io_flush_cached_reqs()
1906 state->reqs[nr++] = req; in io_flush_cached_reqs()
1961 static void io_dismantle_req(struct io_kiocb *req) in io_dismantle_req() argument
1963 unsigned int flags = req->flags; in io_dismantle_req()
1965 if (io_req_needs_clean(req)) in io_dismantle_req()
1966 io_clean_op(req); in io_dismantle_req()
1968 io_put_file(req->file); in io_dismantle_req()
1969 if (req->fixed_rsrc_refs) in io_dismantle_req()
1970 percpu_ref_put(req->fixed_rsrc_refs); in io_dismantle_req()
1971 if (req->async_data) { in io_dismantle_req()
1972 kfree(req->async_data); in io_dismantle_req()
1973 req->async_data = NULL; in io_dismantle_req()
1977 static void __io_free_req(struct io_kiocb *req) in __io_free_req() argument
1979 struct io_ring_ctx *ctx = req->ctx; in __io_free_req()
1981 io_dismantle_req(req); in __io_free_req()
1982 io_put_task(req->task, 1); in __io_free_req()
1985 list_add(&req->inflight_entry, &ctx->locked_free_list); in __io_free_req()
1992 static inline void io_remove_next_linked(struct io_kiocb *req) in io_remove_next_linked() argument
1994 struct io_kiocb *nxt = req->link; in io_remove_next_linked()
1996 req->link = nxt->link; in io_remove_next_linked()
2000 static bool io_kill_linked_timeout(struct io_kiocb *req) in io_kill_linked_timeout() argument
2001 __must_hold(&req->ctx->completion_lock) in io_kill_linked_timeout()
2002 __must_hold(&req->ctx->timeout_lock) in io_kill_linked_timeout()
2004 struct io_kiocb *link = req->link; in io_kill_linked_timeout()
2009 io_remove_next_linked(req); in io_kill_linked_timeout()
2022 static void io_fail_links(struct io_kiocb *req) in io_fail_links() argument
2023 __must_hold(&req->ctx->completion_lock) in io_fail_links()
2025 struct io_kiocb *nxt, *link = req->link; in io_fail_links()
2027 req->link = NULL; in io_fail_links()
2037 trace_io_uring_fail_link(req, link); in io_fail_links()
2044 static bool io_disarm_next(struct io_kiocb *req) in io_disarm_next() argument
2045 __must_hold(&req->ctx->completion_lock) in io_disarm_next()
2049 if (req->flags & REQ_F_ARM_LTIMEOUT) { in io_disarm_next()
2050 struct io_kiocb *link = req->link; in io_disarm_next()
2052 req->flags &= ~REQ_F_ARM_LTIMEOUT; in io_disarm_next()
2054 io_remove_next_linked(req); in io_disarm_next()
2060 } else if (req->flags & REQ_F_LINK_TIMEOUT) { in io_disarm_next()
2061 struct io_ring_ctx *ctx = req->ctx; in io_disarm_next()
2064 posted = io_kill_linked_timeout(req); in io_disarm_next()
2067 if (unlikely((req->flags & REQ_F_FAIL) && in io_disarm_next()
2068 !(req->flags & REQ_F_HARDLINK))) { in io_disarm_next()
2069 posted |= (req->link != NULL); in io_disarm_next()
2070 io_fail_links(req); in io_disarm_next()
2075 static struct io_kiocb *__io_req_find_next(struct io_kiocb *req) in __io_req_find_next() argument
2085 if (req->flags & IO_DISARM_MASK) { in __io_req_find_next()
2086 struct io_ring_ctx *ctx = req->ctx; in __io_req_find_next()
2090 posted = io_disarm_next(req); in __io_req_find_next()
2092 io_commit_cqring(req->ctx); in __io_req_find_next()
2097 nxt = req->link; in __io_req_find_next()
2098 req->link = NULL; in __io_req_find_next()
2102 static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) in io_req_find_next() argument
2104 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK)))) in io_req_find_next()
2106 return __io_req_find_next(req); in io_req_find_next()
2146 struct io_kiocb *req = container_of(node, struct io_kiocb, in tctx_task_work() local
2149 if (req->ctx != ctx) { in tctx_task_work()
2151 ctx = req->ctx; in tctx_task_work()
2156 req->io_task_work.func(req, &locked); in tctx_task_work()
2166 static void io_req_task_work_add(struct io_kiocb *req) in io_req_task_work_add() argument
2168 struct task_struct *tsk = req->task; in io_req_task_work_add()
2178 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list); in io_req_task_work_add()
2194 notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL; in io_req_task_work_add()
2207 req = container_of(node, struct io_kiocb, io_task_work.node); in io_req_task_work_add()
2209 if (llist_add(&req->io_task_work.fallback_node, in io_req_task_work_add()
2210 &req->ctx->fallback_llist)) in io_req_task_work_add()
2211 schedule_delayed_work(&req->ctx->fallback_work, 1); in io_req_task_work_add()
2215 static void io_req_task_cancel(struct io_kiocb *req, bool *locked) in io_req_task_cancel() argument
2217 struct io_ring_ctx *ctx = req->ctx; in io_req_task_cancel()
2221 io_req_complete_failed(req, req->result); in io_req_task_cancel()
2224 static void io_req_task_submit(struct io_kiocb *req, bool *locked) in io_req_task_submit() argument
2226 struct io_ring_ctx *ctx = req->ctx; in io_req_task_submit()
2229 /* req->task == current here, checking PF_EXITING is safe */ in io_req_task_submit()
2230 if (likely(!(req->task->flags & PF_EXITING))) in io_req_task_submit()
2231 __io_queue_sqe(req); in io_req_task_submit()
2233 io_req_complete_failed(req, -EFAULT); in io_req_task_submit()
2236 static void io_req_task_queue_fail(struct io_kiocb *req, int ret) in io_req_task_queue_fail() argument
2238 req->result = ret; in io_req_task_queue_fail()
2239 req->io_task_work.func = io_req_task_cancel; in io_req_task_queue_fail()
2240 io_req_task_work_add(req); in io_req_task_queue_fail()
2243 static void io_req_task_queue(struct io_kiocb *req) in io_req_task_queue() argument
2245 req->io_task_work.func = io_req_task_submit; in io_req_task_queue()
2246 io_req_task_work_add(req); in io_req_task_queue()
2249 static void io_req_task_queue_reissue(struct io_kiocb *req) in io_req_task_queue_reissue() argument
2251 req->io_task_work.func = io_queue_async_work; in io_req_task_queue_reissue()
2252 io_req_task_work_add(req); in io_req_task_queue_reissue()
2255 static inline void io_queue_next(struct io_kiocb *req) in io_queue_next() argument
2257 struct io_kiocb *nxt = io_req_find_next(req); in io_queue_next()
2263 static void io_free_req(struct io_kiocb *req) in io_free_req() argument
2265 io_queue_next(req); in io_free_req()
2266 __io_free_req(req); in io_free_req()
2269 static void io_free_req_work(struct io_kiocb *req, bool *locked) in io_free_req_work() argument
2271 io_free_req(req); in io_free_req_work()
2296 static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, in io_req_free_batch() argument
2299 io_queue_next(req); in io_req_free_batch()
2300 io_dismantle_req(req); in io_req_free_batch()
2302 if (req->task != rb->task) { in io_req_free_batch()
2305 rb->task = req->task; in io_req_free_batch()
2312 state->reqs[state->free_reqs++] = req; in io_req_free_batch()
2314 list_add(&req->inflight_entry, &state->free_list); in io_req_free_batch()
2326 struct io_kiocb *req = state->compl_reqs[i]; in io_submit_flush_completions() local
2328 __io_cqring_fill_event(ctx, req->user_data, req->result, in io_submit_flush_completions()
2329 req->compl.cflags); in io_submit_flush_completions()
2337 struct io_kiocb *req = state->compl_reqs[i]; in io_submit_flush_completions() local
2339 if (req_ref_put_and_test(req)) in io_submit_flush_completions()
2340 io_req_free_batch(&rb, req, &ctx->submit_state); in io_submit_flush_completions()
2351 static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) in io_put_req_find_next() argument
2355 if (req_ref_put_and_test(req)) { in io_put_req_find_next()
2356 nxt = io_req_find_next(req); in io_put_req_find_next()
2357 __io_free_req(req); in io_put_req_find_next()
2362 static inline void io_put_req(struct io_kiocb *req) in io_put_req() argument
2364 if (req_ref_put_and_test(req)) in io_put_req()
2365 io_free_req(req); in io_put_req()
2368 static inline void io_put_req_deferred(struct io_kiocb *req) in io_put_req_deferred() argument
2370 if (req_ref_put_and_test(req)) { in io_put_req_deferred()
2371 req->io_task_work.func = io_free_req_work; in io_put_req_deferred()
2372 io_req_task_work_add(req); in io_put_req_deferred()
2391 static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf) in io_put_kbuf() argument
2397 req->flags &= ~REQ_F_BUFFER_SELECTED; in io_put_kbuf()
2402 static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req) in io_put_rw_kbuf() argument
2406 if (likely(!(req->flags & REQ_F_BUFFER_SELECTED))) in io_put_rw_kbuf()
2408 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; in io_put_rw_kbuf()
2409 return io_put_kbuf(req, kbuf); in io_put_rw_kbuf()
2430 struct io_kiocb *req; in io_iopoll_complete() local
2437 req = list_first_entry(done, struct io_kiocb, inflight_entry); in io_iopoll_complete()
2438 list_del(&req->inflight_entry); in io_iopoll_complete()
2440 __io_cqring_fill_event(ctx, req->user_data, req->result, in io_iopoll_complete()
2441 io_put_rw_kbuf(req)); in io_iopoll_complete()
2444 if (req_ref_put_and_test(req)) in io_iopoll_complete()
2445 io_req_free_batch(&rb, req, &ctx->submit_state); in io_iopoll_complete()
2456 struct io_kiocb *req, *tmp; in io_do_iopoll() local
2466 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) { in io_do_iopoll()
2467 struct kiocb *kiocb = &req->rw.kiocb; in io_do_iopoll()
2475 if (READ_ONCE(req->iopoll_completed)) { in io_do_iopoll()
2476 list_move_tail(&req->inflight_entry, &done); in io_do_iopoll()
2488 /* iopoll may have completed current req */ in io_do_iopoll()
2489 if (READ_ONCE(req->iopoll_completed)) in io_do_iopoll()
2490 list_move_tail(&req->inflight_entry, &done); in io_do_iopoll()
2581 static void kiocb_end_write(struct io_kiocb *req) in kiocb_end_write() argument
2587 if (req->flags & REQ_F_ISREG) { in kiocb_end_write()
2588 struct super_block *sb = file_inode(req->file)->i_sb; in kiocb_end_write()
2596 static bool io_resubmit_prep(struct io_kiocb *req) in io_resubmit_prep() argument
2598 struct io_async_rw *rw = req->async_data; in io_resubmit_prep()
2601 return !io_req_prep_async(req); in io_resubmit_prep()
2606 static bool io_rw_should_reissue(struct io_kiocb *req) in io_rw_should_reissue() argument
2608 umode_t mode = file_inode(req->file)->i_mode; in io_rw_should_reissue()
2609 struct io_ring_ctx *ctx = req->ctx; in io_rw_should_reissue()
2613 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && in io_rw_should_reissue()
2627 if (!same_thread_group(req->task, current) || !in_task()) in io_rw_should_reissue()
2632 static bool io_resubmit_prep(struct io_kiocb *req) in io_resubmit_prep() argument
2636 static bool io_rw_should_reissue(struct io_kiocb *req) in io_rw_should_reissue() argument
2642 static bool __io_complete_rw_common(struct io_kiocb *req, long res) in __io_complete_rw_common() argument
2644 if (req->rw.kiocb.ki_flags & IOCB_WRITE) in __io_complete_rw_common()
2645 kiocb_end_write(req); in __io_complete_rw_common()
2646 if (res != req->result) { in __io_complete_rw_common()
2648 io_rw_should_reissue(req)) { in __io_complete_rw_common()
2649 req->flags |= REQ_F_REISSUE; in __io_complete_rw_common()
2652 req_set_fail(req); in __io_complete_rw_common()
2653 req->result = res; in __io_complete_rw_common()
2658 static void io_req_task_complete(struct io_kiocb *req, bool *locked) in io_req_task_complete() argument
2660 unsigned int cflags = io_put_rw_kbuf(req); in io_req_task_complete()
2661 long res = req->result; in io_req_task_complete()
2664 struct io_ring_ctx *ctx = req->ctx; in io_req_task_complete()
2667 io_req_complete_state(req, res, cflags); in io_req_task_complete()
2668 state->compl_reqs[state->compl_nr++] = req; in io_req_task_complete()
2672 io_req_complete_post(req, res, cflags); in io_req_task_complete()
2676 static void __io_complete_rw(struct io_kiocb *req, long res, long res2, in __io_complete_rw() argument
2679 if (__io_complete_rw_common(req, res)) in __io_complete_rw()
2681 __io_req_complete(req, issue_flags, req->result, io_put_rw_kbuf(req)); in __io_complete_rw()
2686 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); in io_complete_rw() local
2688 if (__io_complete_rw_common(req, res)) in io_complete_rw()
2690 req->result = res; in io_complete_rw()
2691 req->io_task_work.func = io_req_task_complete; in io_complete_rw()
2692 io_req_task_work_add(req); in io_complete_rw()
2697 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); in io_complete_rw_iopoll() local
2700 kiocb_end_write(req); in io_complete_rw_iopoll()
2701 if (unlikely(res != req->result)) { in io_complete_rw_iopoll()
2702 if (res == -EAGAIN && io_rw_should_reissue(req)) { in io_complete_rw_iopoll()
2703 req->flags |= REQ_F_REISSUE; in io_complete_rw_iopoll()
2708 WRITE_ONCE(req->result, res); in io_complete_rw_iopoll()
2711 WRITE_ONCE(req->iopoll_completed, 1); in io_complete_rw_iopoll()
2720 static void io_iopoll_req_issued(struct io_kiocb *req) in io_iopoll_req_issued() argument
2722 struct io_ring_ctx *ctx = req->ctx; in io_iopoll_req_issued()
2743 if (list_req->file != req->file) { in io_iopoll_req_issued()
2747 queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie); in io_iopoll_req_issued()
2757 if (READ_ONCE(req->iopoll_completed)) in io_iopoll_req_issued()
2758 list_add(&req->inflight_entry, &ctx->iopoll_list); in io_iopoll_req_issued()
2760 list_add_tail(&req->inflight_entry, &ctx->iopoll_list); in io_iopoll_req_issued()
2820 static bool io_file_supports_nowait(struct io_kiocb *req, int rw) in io_file_supports_nowait() argument
2822 if (rw == READ && (req->flags & REQ_F_NOWAIT_READ)) in io_file_supports_nowait()
2824 else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE)) in io_file_supports_nowait()
2827 return __io_file_supports_nowait(req->file, rw); in io_file_supports_nowait()
2830 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_prep_rw() argument
2833 struct io_ring_ctx *ctx = req->ctx; in io_prep_rw()
2834 struct kiocb *kiocb = &req->rw.kiocb; in io_prep_rw()
2835 struct file *file = req->file; in io_prep_rw()
2839 if (!io_req_ffs_set(req) && S_ISREG(file_inode(file)->i_mode)) in io_prep_rw()
2840 req->flags |= REQ_F_ISREG; in io_prep_rw()
2844 req->flags |= REQ_F_CUR_POS; in io_prep_rw()
2859 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw))) in io_prep_rw()
2860 req->flags |= REQ_F_NOWAIT; in io_prep_rw()
2879 req->iopoll_completed = 0; in io_prep_rw()
2886 if (req->opcode == IORING_OP_READ_FIXED || in io_prep_rw()
2887 req->opcode == IORING_OP_WRITE_FIXED) { in io_prep_rw()
2888 req->imu = NULL; in io_prep_rw()
2889 io_req_set_rsrc_node(req); in io_prep_rw()
2892 req->rw.addr = READ_ONCE(sqe->addr); in io_prep_rw()
2893 req->rw.len = READ_ONCE(sqe->len); in io_prep_rw()
2894 req->buf_index = READ_ONCE(sqe->buf_index); in io_prep_rw()
2922 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); in kiocb_done() local
2923 struct io_async_rw *io = req->async_data; in kiocb_done()
2933 if (req->flags & REQ_F_CUR_POS) in kiocb_done()
2934 req->file->f_pos = kiocb->ki_pos; in kiocb_done()
2936 __io_complete_rw(req, ret, 0, issue_flags); in kiocb_done()
2940 if (req->flags & REQ_F_REISSUE) { in kiocb_done()
2941 req->flags &= ~REQ_F_REISSUE; in kiocb_done()
2942 if (io_resubmit_prep(req)) { in kiocb_done()
2943 io_req_task_queue_reissue(req); in kiocb_done()
2945 unsigned int cflags = io_put_rw_kbuf(req); in kiocb_done()
2946 struct io_ring_ctx *ctx = req->ctx; in kiocb_done()
2948 req_set_fail(req); in kiocb_done()
2951 __io_req_complete(req, issue_flags, ret, cflags); in kiocb_done()
2954 __io_req_complete(req, issue_flags, ret, cflags); in kiocb_done()
2960 static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter, in __io_import_fixed() argument
2963 size_t len = req->rw.len; in __io_import_fixed()
2964 u64 buf_end, buf_addr = req->rw.addr; in __io_import_fixed()
3018 static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter) in io_import_fixed() argument
3020 struct io_ring_ctx *ctx = req->ctx; in io_import_fixed()
3021 struct io_mapped_ubuf *imu = req->imu; in io_import_fixed()
3022 u16 index, buf_index = req->buf_index; in io_import_fixed()
3029 req->imu = imu; in io_import_fixed()
3031 return __io_import_fixed(req, rw, iter, imu); in io_import_fixed()
3052 static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, in io_buffer_select() argument
3058 if (req->flags & REQ_F_BUFFER_SELECTED) in io_buffer_select()
3061 io_ring_submit_lock(req->ctx, needs_lock); in io_buffer_select()
3063 lockdep_assert_held(&req->ctx->uring_lock); in io_buffer_select()
3065 head = xa_load(&req->ctx->io_buffers, bgid); in io_buffer_select()
3073 xa_erase(&req->ctx->io_buffers, bgid); in io_buffer_select()
3081 io_ring_submit_unlock(req->ctx, needs_lock); in io_buffer_select()
3086 static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len, in io_rw_buffer_select() argument
3092 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; in io_rw_buffer_select()
3093 bgid = req->buf_index; in io_rw_buffer_select()
3094 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock); in io_rw_buffer_select()
3097 req->rw.addr = (u64) (unsigned long) kbuf; in io_rw_buffer_select()
3098 req->flags |= REQ_F_BUFFER_SELECTED; in io_rw_buffer_select()
3103 static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov, in io_compat_import() argument
3111 uiov = u64_to_user_ptr(req->rw.addr); in io_compat_import()
3120 buf = io_rw_buffer_select(req, &len, needs_lock); in io_compat_import()
3129 static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, in __io_iov_buffer_select() argument
3132 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr); in __io_iov_buffer_select()
3142 buf = io_rw_buffer_select(req, &len, needs_lock); in __io_iov_buffer_select()
3150 static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, in io_iov_buffer_select() argument
3153 if (req->flags & REQ_F_BUFFER_SELECTED) { in io_iov_buffer_select()
3156 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; in io_iov_buffer_select()
3161 if (req->rw.len != 1) in io_iov_buffer_select()
3165 if (req->ctx->compat) in io_iov_buffer_select()
3166 return io_compat_import(req, iov, needs_lock); in io_iov_buffer_select()
3169 return __io_iov_buffer_select(req, iov, needs_lock); in io_iov_buffer_select()
3172 static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, in io_import_iovec() argument
3175 void __user *buf = u64_to_user_ptr(req->rw.addr); in io_import_iovec()
3176 size_t sqe_len = req->rw.len; in io_import_iovec()
3177 u8 opcode = req->opcode; in io_import_iovec()
3182 return io_import_fixed(req, rw, iter); in io_import_iovec()
3186 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT)) in io_import_iovec()
3190 if (req->flags & REQ_F_BUFFER_SELECT) { in io_import_iovec()
3191 buf = io_rw_buffer_select(req, &sqe_len, needs_lock); in io_import_iovec()
3194 req->rw.len = sqe_len; in io_import_iovec()
3202 if (req->flags & REQ_F_BUFFER_SELECT) { in io_import_iovec()
3203 ret = io_iov_buffer_select(req, *iovec, needs_lock); in io_import_iovec()
3211 req->ctx->compat); in io_import_iovec()
3223 static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter) in loop_rw_iter() argument
3225 struct kiocb *kiocb = &req->rw.kiocb; in loop_rw_iter()
3226 struct file *file = req->file; in loop_rw_iter()
3246 iovec.iov_base = u64_to_user_ptr(req->rw.addr); in loop_rw_iter()
3247 iovec.iov_len = req->rw.len; in loop_rw_iter()
3266 req->rw.len -= nr; in loop_rw_iter()
3267 req->rw.addr += nr; in loop_rw_iter()
3277 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, in io_req_map_rw() argument
3280 struct io_async_rw *rw = req->async_data; in io_req_map_rw()
3300 req->flags |= REQ_F_NEED_CLEANUP; in io_req_map_rw()
3304 static inline int io_alloc_async_data(struct io_kiocb *req) in io_alloc_async_data() argument
3306 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size); in io_alloc_async_data()
3307 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL); in io_alloc_async_data()
3308 return req->async_data == NULL; in io_alloc_async_data()
3311 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, in io_setup_async_rw() argument
3315 if (!force && !io_op_defs[req->opcode].needs_async_setup) in io_setup_async_rw()
3317 if (!req->async_data) { in io_setup_async_rw()
3320 if (io_alloc_async_data(req)) { in io_setup_async_rw()
3325 io_req_map_rw(req, iovec, fast_iov, iter); in io_setup_async_rw()
3326 iorw = req->async_data; in io_setup_async_rw()
3333 static inline int io_rw_prep_async(struct io_kiocb *req, int rw) in io_rw_prep_async() argument
3335 struct io_async_rw *iorw = req->async_data; in io_rw_prep_async()
3339 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false); in io_rw_prep_async()
3346 req->flags |= REQ_F_NEED_CLEANUP; in io_rw_prep_async()
3351 static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_read_prep() argument
3353 if (unlikely(!(req->file->f_mode & FMODE_READ))) in io_read_prep()
3355 return io_prep_rw(req, sqe, READ); in io_read_prep()
3372 struct io_kiocb *req = wait->private; in io_async_buf_func() local
3380 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ; in io_async_buf_func()
3382 io_req_task_queue(req); in io_async_buf_func()
3398 static bool io_rw_should_retry(struct io_kiocb *req) in io_rw_should_retry() argument
3400 struct io_async_rw *rw = req->async_data; in io_rw_should_retry()
3402 struct kiocb *kiocb = &req->rw.kiocb; in io_rw_should_retry()
3405 if (req->flags & REQ_F_NOWAIT) in io_rw_should_retry()
3416 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC)) in io_rw_should_retry()
3420 wait->wait.private = req; in io_rw_should_retry()
3429 static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter) in io_iter_do_read() argument
3431 if (req->file->f_op->read_iter) in io_iter_do_read()
3432 return call_read_iter(req->file, &req->rw.kiocb, iter); in io_iter_do_read()
3433 else if (req->file->f_op->read) in io_iter_do_read()
3434 return loop_rw_iter(READ, req, iter); in io_iter_do_read()
3439 static bool need_read_all(struct io_kiocb *req) in need_read_all() argument
3441 return req->flags & REQ_F_ISREG || in need_read_all()
3442 S_ISBLK(file_inode(req->file)->i_mode); in need_read_all()
3445 static int io_read(struct io_kiocb *req, unsigned int issue_flags) in io_read() argument
3448 struct kiocb *kiocb = &req->rw.kiocb; in io_read()
3450 struct io_async_rw *rw = req->async_data; in io_read()
3466 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock); in io_read()
3472 req->result = iov_iter_count(iter); in io_read()
3481 if (force_nonblock && !io_file_supports_nowait(req, READ)) { in io_read()
3482 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true); in io_read()
3486 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result); in io_read()
3492 ret = io_iter_do_read(req, iter); in io_read()
3494 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { in io_read()
3495 req->flags &= ~REQ_F_REISSUE; in io_read()
3497 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_read()
3500 if (req->flags & REQ_F_NOWAIT) in io_read()
3505 } else if (ret <= 0 || ret == req->result || !force_nonblock || in io_read()
3506 (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) { in io_read()
3518 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true); in io_read()
3523 rw = req->async_data; in io_read()
3546 if (!io_rw_should_retry(req)) { in io_read()
3557 ret = io_iter_do_read(req, iter); in io_read()
3573 static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_write_prep() argument
3575 if (unlikely(!(req->file->f_mode & FMODE_WRITE))) in io_write_prep()
3577 return io_prep_rw(req, sqe, WRITE); in io_write_prep()
3580 static int io_write(struct io_kiocb *req, unsigned int issue_flags) in io_write() argument
3583 struct kiocb *kiocb = &req->rw.kiocb; in io_write()
3585 struct io_async_rw *rw = req->async_data; in io_write()
3596 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock); in io_write()
3602 req->result = iov_iter_count(iter); in io_write()
3611 if (force_nonblock && !io_file_supports_nowait(req, WRITE)) in io_write()
3616 (req->flags & REQ_F_ISREG)) in io_write()
3619 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result); in io_write()
3630 if (req->flags & REQ_F_ISREG) { in io_write()
3631 sb_start_write(file_inode(req->file)->i_sb); in io_write()
3632 __sb_writers_release(file_inode(req->file)->i_sb, in io_write()
3637 if (req->file->f_op->write_iter) in io_write()
3638 ret2 = call_write_iter(req->file, kiocb, iter); in io_write()
3639 else if (req->file->f_op->write) in io_write()
3640 ret2 = loop_rw_iter(WRITE, req, iter); in io_write()
3644 if (req->flags & REQ_F_REISSUE) { in io_write()
3645 req->flags &= ~REQ_F_REISSUE; in io_write()
3656 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) in io_write()
3660 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN) in io_write()
3667 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); in io_write()
3677 static int io_renameat_prep(struct io_kiocb *req, in io_renameat_prep() argument
3680 struct io_rename *ren = &req->rename; in io_renameat_prep()
3683 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_renameat_prep()
3687 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in io_renameat_prep()
3706 req->flags |= REQ_F_NEED_CLEANUP; in io_renameat_prep()
3710 static int io_renameat(struct io_kiocb *req, unsigned int issue_flags) in io_renameat() argument
3712 struct io_rename *ren = &req->rename; in io_renameat()
3721 req->flags &= ~REQ_F_NEED_CLEANUP; in io_renameat()
3723 req_set_fail(req); in io_renameat()
3724 io_req_complete(req, ret); in io_renameat()
3728 static int io_unlinkat_prep(struct io_kiocb *req, in io_unlinkat_prep() argument
3731 struct io_unlink *un = &req->unlink; in io_unlinkat_prep()
3734 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_unlinkat_prep()
3739 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in io_unlinkat_prep()
3753 req->flags |= REQ_F_NEED_CLEANUP; in io_unlinkat_prep()
3757 static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags) in io_unlinkat() argument
3759 struct io_unlink *un = &req->unlink; in io_unlinkat()
3770 req->flags &= ~REQ_F_NEED_CLEANUP; in io_unlinkat()
3772 req_set_fail(req); in io_unlinkat()
3773 io_req_complete(req, ret); in io_unlinkat()
3777 static int io_mkdirat_prep(struct io_kiocb *req, in io_mkdirat_prep() argument
3780 struct io_mkdir *mkd = &req->mkdir; in io_mkdirat_prep()
3783 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_mkdirat_prep()
3788 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in io_mkdirat_prep()
3799 req->flags |= REQ_F_NEED_CLEANUP; in io_mkdirat_prep()
3803 static int io_mkdirat(struct io_kiocb *req, int issue_flags) in io_mkdirat() argument
3805 struct io_mkdir *mkd = &req->mkdir; in io_mkdirat()
3813 req->flags &= ~REQ_F_NEED_CLEANUP; in io_mkdirat()
3815 req_set_fail(req); in io_mkdirat()
3816 io_req_complete(req, ret); in io_mkdirat()
3820 static int io_symlinkat_prep(struct io_kiocb *req, in io_symlinkat_prep() argument
3823 struct io_symlink *sl = &req->symlink; in io_symlinkat_prep()
3826 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_symlinkat_prep()
3831 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in io_symlinkat_prep()
3848 req->flags |= REQ_F_NEED_CLEANUP; in io_symlinkat_prep()
3852 static int io_symlinkat(struct io_kiocb *req, int issue_flags) in io_symlinkat() argument
3854 struct io_symlink *sl = &req->symlink; in io_symlinkat()
3862 req->flags &= ~REQ_F_NEED_CLEANUP; in io_symlinkat()
3864 req_set_fail(req); in io_symlinkat()
3865 io_req_complete(req, ret); in io_symlinkat()
3869 static int io_linkat_prep(struct io_kiocb *req, in io_linkat_prep() argument
3872 struct io_hardlink *lnk = &req->hardlink; in io_linkat_prep()
3875 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_linkat_prep()
3879 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in io_linkat_prep()
3898 req->flags |= REQ_F_NEED_CLEANUP; in io_linkat_prep()
3902 static int io_linkat(struct io_kiocb *req, int issue_flags) in io_linkat() argument
3904 struct io_hardlink *lnk = &req->hardlink; in io_linkat()
3913 req->flags &= ~REQ_F_NEED_CLEANUP; in io_linkat()
3915 req_set_fail(req); in io_linkat()
3916 io_req_complete(req, ret); in io_linkat()
3920 static int io_shutdown_prep(struct io_kiocb *req, in io_shutdown_prep() argument
3924 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_shutdown_prep()
3930 req->shutdown.how = READ_ONCE(sqe->len); in io_shutdown_prep()
3937 static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) in io_shutdown() argument
3946 sock = sock_from_file(req->file); in io_shutdown()
3950 ret = __sys_shutdown_sock(sock, req->shutdown.how); in io_shutdown()
3952 req_set_fail(req); in io_shutdown()
3953 io_req_complete(req, ret); in io_shutdown()
3960 static int __io_splice_prep(struct io_kiocb *req, in __io_splice_prep() argument
3963 struct io_splice *sp = &req->splice; in __io_splice_prep()
3966 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in __io_splice_prep()
3976 sp->file_in = io_file_get(req->ctx, req, READ_ONCE(sqe->splice_fd_in), in __io_splice_prep()
3980 req->flags |= REQ_F_NEED_CLEANUP; in __io_splice_prep()
3984 static int io_tee_prep(struct io_kiocb *req, in io_tee_prep() argument
3989 return __io_splice_prep(req, sqe); in io_tee_prep()
3992 static int io_tee(struct io_kiocb *req, unsigned int issue_flags) in io_tee() argument
3994 struct io_splice *sp = &req->splice; in io_tee()
4007 req->flags &= ~REQ_F_NEED_CLEANUP; in io_tee()
4010 req_set_fail(req); in io_tee()
4011 io_req_complete(req, ret); in io_tee()
4015 static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_splice_prep() argument
4017 struct io_splice *sp = &req->splice; in io_splice_prep()
4021 return __io_splice_prep(req, sqe); in io_splice_prep()
4024 static int io_splice(struct io_kiocb *req, unsigned int issue_flags) in io_splice() argument
4026 struct io_splice *sp = &req->splice; in io_splice()
4044 req->flags &= ~REQ_F_NEED_CLEANUP; in io_splice()
4047 req_set_fail(req); in io_splice()
4048 io_req_complete(req, ret); in io_splice()
4055 static int io_nop(struct io_kiocb *req, unsigned int issue_flags) in io_nop() argument
4057 struct io_ring_ctx *ctx = req->ctx; in io_nop()
4062 __io_req_complete(req, issue_flags, 0, 0); in io_nop()
4066 static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fsync_prep() argument
4068 struct io_ring_ctx *ctx = req->ctx; in io_fsync_prep()
4070 if (!req->file) in io_fsync_prep()
4079 req->sync.flags = READ_ONCE(sqe->fsync_flags); in io_fsync_prep()
4080 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC)) in io_fsync_prep()
4083 req->sync.off = READ_ONCE(sqe->off); in io_fsync_prep()
4084 req->sync.len = READ_ONCE(sqe->len); in io_fsync_prep()
4088 static int io_fsync(struct io_kiocb *req, unsigned int issue_flags) in io_fsync() argument
4090 loff_t end = req->sync.off + req->sync.len; in io_fsync()
4097 ret = vfs_fsync_range(req->file, req->sync.off, in io_fsync()
4099 req->sync.flags & IORING_FSYNC_DATASYNC); in io_fsync()
4101 req_set_fail(req); in io_fsync()
4102 io_req_complete(req, ret); in io_fsync()
4106 static int io_fallocate_prep(struct io_kiocb *req, in io_fallocate_prep() argument
4112 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_fallocate_prep()
4115 req->sync.off = READ_ONCE(sqe->off); in io_fallocate_prep()
4116 req->sync.len = READ_ONCE(sqe->addr); in io_fallocate_prep()
4117 req->sync.mode = READ_ONCE(sqe->len); in io_fallocate_prep()
4121 static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags) in io_fallocate() argument
4128 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off, in io_fallocate()
4129 req->sync.len); in io_fallocate()
4131 req_set_fail(req); in io_fallocate()
4132 io_req_complete(req, ret); in io_fallocate()
4136 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in __io_openat_prep() argument
4141 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in __io_openat_prep()
4145 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in __io_openat_prep()
4149 if (!(req->open.how.flags & O_PATH) && force_o_largefile()) in __io_openat_prep()
4150 req->open.how.flags |= O_LARGEFILE; in __io_openat_prep()
4152 req->open.dfd = READ_ONCE(sqe->fd); in __io_openat_prep()
4154 req->open.filename = getname(fname); in __io_openat_prep()
4155 if (IS_ERR(req->open.filename)) { in __io_openat_prep()
4156 ret = PTR_ERR(req->open.filename); in __io_openat_prep()
4157 req->open.filename = NULL; in __io_openat_prep()
4161 req->open.file_slot = READ_ONCE(sqe->file_index); in __io_openat_prep()
4162 if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC)) in __io_openat_prep()
4165 req->open.nofile = rlimit(RLIMIT_NOFILE); in __io_openat_prep()
4166 req->flags |= REQ_F_NEED_CLEANUP; in __io_openat_prep()
4170 static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat_prep() argument
4175 req->open.how = build_open_how(flags, mode); in io_openat_prep()
4176 return __io_openat_prep(req, sqe); in io_openat_prep()
4179 static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat2_prep() argument
4190 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how, in io_openat2_prep()
4195 return __io_openat_prep(req, sqe); in io_openat2_prep()
4198 static int io_openat2(struct io_kiocb *req, unsigned int issue_flags) in io_openat2() argument
4203 bool fixed = !!req->open.file_slot; in io_openat2()
4206 ret = build_open_flags(&req->open.how, &op); in io_openat2()
4210 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED; in io_openat2()
4216 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE)) in io_openat2()
4223 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile); in io_openat2()
4228 file = do_filp_open(req->open.dfd, req->open.filename, &op); in io_openat2()
4253 ret = io_install_fixed_file(req, file, issue_flags, in io_openat2()
4254 req->open.file_slot - 1); in io_openat2()
4256 putname(req->open.filename); in io_openat2()
4257 req->flags &= ~REQ_F_NEED_CLEANUP; in io_openat2()
4259 req_set_fail(req); in io_openat2()
4260 __io_req_complete(req, issue_flags, ret, 0); in io_openat2()
4264 static int io_openat(struct io_kiocb *req, unsigned int issue_flags) in io_openat() argument
4266 return io_openat2(req, issue_flags); in io_openat()
4269 static int io_remove_buffers_prep(struct io_kiocb *req, in io_remove_buffers_prep() argument
4272 struct io_provide_buf *p = &req->pbuf; in io_remove_buffers_prep()
4315 static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) in io_remove_buffers() argument
4317 struct io_provide_buf *p = &req->pbuf; in io_remove_buffers()
4318 struct io_ring_ctx *ctx = req->ctx; in io_remove_buffers()
4332 req_set_fail(req); in io_remove_buffers()
4335 __io_req_complete(req, issue_flags, ret, 0); in io_remove_buffers()
4340 static int io_provide_buffers_prep(struct io_kiocb *req, in io_provide_buffers_prep() argument
4344 struct io_provide_buf *p = &req->pbuf; in io_provide_buffers_prep()
4402 static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) in io_provide_buffers() argument
4404 struct io_provide_buf *p = &req->pbuf; in io_provide_buffers()
4405 struct io_ring_ctx *ctx = req->ctx; in io_provide_buffers()
4423 req_set_fail(req); in io_provide_buffers()
4425 __io_req_complete(req, issue_flags, ret, 0); in io_provide_buffers()
4430 static int io_epoll_ctl_prep(struct io_kiocb *req, in io_epoll_ctl_prep() argument
4436 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_epoll_ctl_prep()
4439 req->epoll.epfd = READ_ONCE(sqe->fd); in io_epoll_ctl_prep()
4440 req->epoll.op = READ_ONCE(sqe->len); in io_epoll_ctl_prep()
4441 req->epoll.fd = READ_ONCE(sqe->off); in io_epoll_ctl_prep()
4443 if (ep_op_has_event(req->epoll.op)) { in io_epoll_ctl_prep()
4447 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev))) in io_epoll_ctl_prep()
4457 static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags) in io_epoll_ctl() argument
4460 struct io_epoll *ie = &req->epoll; in io_epoll_ctl()
4469 req_set_fail(req); in io_epoll_ctl()
4470 __io_req_complete(req, issue_flags, ret, 0); in io_epoll_ctl()
4477 static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_madvise_prep() argument
4482 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_madvise_prep()
4485 req->madvise.addr = READ_ONCE(sqe->addr); in io_madvise_prep()
4486 req->madvise.len = READ_ONCE(sqe->len); in io_madvise_prep()
4487 req->madvise.advice = READ_ONCE(sqe->fadvise_advice); in io_madvise_prep()
4494 static int io_madvise(struct io_kiocb *req, unsigned int issue_flags) in io_madvise() argument
4497 struct io_madvise *ma = &req->madvise; in io_madvise()
4505 req_set_fail(req); in io_madvise()
4506 io_req_complete(req, ret); in io_madvise()
4513 static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fadvise_prep() argument
4517 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_fadvise_prep()
4520 req->fadvise.offset = READ_ONCE(sqe->off); in io_fadvise_prep()
4521 req->fadvise.len = READ_ONCE(sqe->len); in io_fadvise_prep()
4522 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice); in io_fadvise_prep()
4526 static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags) in io_fadvise() argument
4528 struct io_fadvise *fa = &req->fadvise; in io_fadvise()
4542 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice); in io_fadvise()
4544 req_set_fail(req); in io_fadvise()
4545 __io_req_complete(req, issue_flags, ret, 0); in io_fadvise()
4549 static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_statx_prep() argument
4551 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_statx_prep()
4555 if (req->flags & REQ_F_FIXED_FILE) in io_statx_prep()
4558 req->statx.dfd = READ_ONCE(sqe->fd); in io_statx_prep()
4559 req->statx.mask = READ_ONCE(sqe->len); in io_statx_prep()
4560 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_statx_prep()
4561 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_statx_prep()
4562 req->statx.flags = READ_ONCE(sqe->statx_flags); in io_statx_prep()
4567 static int io_statx(struct io_kiocb *req, unsigned int issue_flags) in io_statx() argument
4569 struct io_statx *ctx = &req->statx; in io_statx()
4579 req_set_fail(req); in io_statx()
4580 io_req_complete(req, ret); in io_statx()
4584 static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_close_prep() argument
4586 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_close_prep()
4591 if (req->flags & REQ_F_FIXED_FILE) in io_close_prep()
4594 req->close.fd = READ_ONCE(sqe->fd); in io_close_prep()
4595 req->close.file_slot = READ_ONCE(sqe->file_index); in io_close_prep()
4596 if (req->close.file_slot && req->close.fd) in io_close_prep()
4602 static int io_close(struct io_kiocb *req, unsigned int issue_flags) in io_close() argument
4605 struct io_close *close = &req->close; in io_close()
4610 if (req->close.file_slot) { in io_close()
4611 ret = io_close_fixed(req, issue_flags); in io_close()
4646 req_set_fail(req); in io_close()
4649 __io_req_complete(req, issue_flags, ret, 0); in io_close()
4653 static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sfr_prep() argument
4655 struct io_ring_ctx *ctx = req->ctx; in io_sfr_prep()
4663 req->sync.off = READ_ONCE(sqe->off); in io_sfr_prep()
4664 req->sync.len = READ_ONCE(sqe->len); in io_sfr_prep()
4665 req->sync.flags = READ_ONCE(sqe->sync_range_flags); in io_sfr_prep()
4669 static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags) in io_sync_file_range() argument
4677 ret = sync_file_range(req->file, req->sync.off, req->sync.len, in io_sync_file_range()
4678 req->sync.flags); in io_sync_file_range()
4680 req_set_fail(req); in io_sync_file_range()
4681 io_req_complete(req, ret); in io_sync_file_range()
4686 static int io_setup_async_msg(struct io_kiocb *req, in io_setup_async_msg() argument
4689 struct io_async_msghdr *async_msg = req->async_data; in io_setup_async_msg()
4693 if (io_alloc_async_data(req)) { in io_setup_async_msg()
4697 async_msg = req->async_data; in io_setup_async_msg()
4698 req->flags |= REQ_F_NEED_CLEANUP; in io_setup_async_msg()
4708 static int io_sendmsg_copy_hdr(struct io_kiocb *req, in io_sendmsg_copy_hdr() argument
4713 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg, in io_sendmsg_copy_hdr()
4714 req->sr_msg.msg_flags, &iomsg->free_iov); in io_sendmsg_copy_hdr()
4717 static int io_sendmsg_prep_async(struct io_kiocb *req) in io_sendmsg_prep_async() argument
4721 ret = io_sendmsg_copy_hdr(req, req->async_data); in io_sendmsg_prep_async()
4723 req->flags |= REQ_F_NEED_CLEANUP; in io_sendmsg_prep_async()
4727 static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sendmsg_prep() argument
4729 struct io_sr_msg *sr = &req->sr_msg; in io_sendmsg_prep()
4731 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_sendmsg_prep()
4738 req->flags |= REQ_F_NOWAIT; in io_sendmsg_prep()
4741 if (req->ctx->compat) in io_sendmsg_prep()
4747 static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) in io_sendmsg() argument
4755 sock = sock_from_file(req->file); in io_sendmsg()
4759 kmsg = req->async_data; in io_sendmsg()
4761 ret = io_sendmsg_copy_hdr(req, &iomsg); in io_sendmsg()
4767 flags = req->sr_msg.msg_flags; in io_sendmsg()
4775 return io_setup_async_msg(req, kmsg); in io_sendmsg()
4782 req->flags &= ~REQ_F_NEED_CLEANUP; in io_sendmsg()
4784 req_set_fail(req); in io_sendmsg()
4785 __io_req_complete(req, issue_flags, ret, 0); in io_sendmsg()
4789 static int io_send(struct io_kiocb *req, unsigned int issue_flags) in io_send() argument
4791 struct io_sr_msg *sr = &req->sr_msg; in io_send()
4799 sock = sock_from_file(req->file); in io_send()
4812 flags = req->sr_msg.msg_flags; in io_send()
4826 req_set_fail(req); in io_send()
4827 __io_req_complete(req, issue_flags, ret, 0); in io_send()
4831 static int __io_recvmsg_copy_hdr(struct io_kiocb *req, in __io_recvmsg_copy_hdr() argument
4834 struct io_sr_msg *sr = &req->sr_msg; in __io_recvmsg_copy_hdr()
4844 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_recvmsg_copy_hdr()
4864 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, in __io_compat_recvmsg_copy_hdr() argument
4867 struct io_sr_msg *sr = &req->sr_msg; in __io_compat_recvmsg_copy_hdr()
4879 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_compat_recvmsg_copy_hdr()
4905 static int io_recvmsg_copy_hdr(struct io_kiocb *req, in io_recvmsg_copy_hdr() argument
4911 if (req->ctx->compat) in io_recvmsg_copy_hdr()
4912 return __io_compat_recvmsg_copy_hdr(req, iomsg); in io_recvmsg_copy_hdr()
4915 return __io_recvmsg_copy_hdr(req, iomsg); in io_recvmsg_copy_hdr()
4918 static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req, in io_recv_buffer_select() argument
4921 struct io_sr_msg *sr = &req->sr_msg; in io_recv_buffer_select()
4924 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock); in io_recv_buffer_select()
4929 req->flags |= REQ_F_BUFFER_SELECTED; in io_recv_buffer_select()
4933 static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req) in io_put_recv_kbuf() argument
4935 return io_put_kbuf(req, req->sr_msg.kbuf); in io_put_recv_kbuf()
4938 static int io_recvmsg_prep_async(struct io_kiocb *req) in io_recvmsg_prep_async() argument
4942 ret = io_recvmsg_copy_hdr(req, req->async_data); in io_recvmsg_prep_async()
4944 req->flags |= REQ_F_NEED_CLEANUP; in io_recvmsg_prep_async()
4948 static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_recvmsg_prep() argument
4950 struct io_sr_msg *sr = &req->sr_msg; in io_recvmsg_prep()
4952 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_recvmsg_prep()
4960 req->flags |= REQ_F_NOWAIT; in io_recvmsg_prep()
4963 if (req->ctx->compat) in io_recvmsg_prep()
4969 static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) in io_recvmsg() argument
4979 sock = sock_from_file(req->file); in io_recvmsg()
4983 kmsg = req->async_data; in io_recvmsg()
4985 ret = io_recvmsg_copy_hdr(req, &iomsg); in io_recvmsg()
4991 if (req->flags & REQ_F_BUFFER_SELECT) { in io_recvmsg()
4992 kbuf = io_recv_buffer_select(req, !force_nonblock); in io_recvmsg()
4996 kmsg->fast_iov[0].iov_len = req->sr_msg.len; in io_recvmsg()
4998 1, req->sr_msg.len); in io_recvmsg()
5001 flags = req->sr_msg.msg_flags; in io_recvmsg()
5007 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg, in io_recvmsg()
5010 return io_setup_async_msg(req, kmsg); in io_recvmsg()
5014 if (req->flags & REQ_F_BUFFER_SELECTED) in io_recvmsg()
5015 cflags = io_put_recv_kbuf(req); in io_recvmsg()
5019 req->flags &= ~REQ_F_NEED_CLEANUP; in io_recvmsg()
5021 req_set_fail(req); in io_recvmsg()
5022 __io_req_complete(req, issue_flags, ret, cflags); in io_recvmsg()
5026 static int io_recv(struct io_kiocb *req, unsigned int issue_flags) in io_recv() argument
5029 struct io_sr_msg *sr = &req->sr_msg; in io_recv()
5039 sock = sock_from_file(req->file); in io_recv()
5043 if (req->flags & REQ_F_BUFFER_SELECT) { in io_recv()
5044 kbuf = io_recv_buffer_select(req, !force_nonblock); in io_recv()
5061 flags = req->sr_msg.msg_flags; in io_recv()
5073 if (req->flags & REQ_F_BUFFER_SELECTED) in io_recv()
5074 cflags = io_put_recv_kbuf(req); in io_recv()
5076 req_set_fail(req); in io_recv()
5077 __io_req_complete(req, issue_flags, ret, cflags); in io_recv()
5081 static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_accept_prep() argument
5083 struct io_accept *accept = &req->accept; in io_accept_prep()
5085 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_accept_prep()
5096 if (accept->file_slot && ((req->open.how.flags & O_CLOEXEC) || in io_accept_prep()
5106 static int io_accept(struct io_kiocb *req, unsigned int issue_flags) in io_accept() argument
5108 struct io_accept *accept = &req->accept; in io_accept()
5115 if (req->file->f_flags & O_NONBLOCK) in io_accept()
5116 req->flags |= REQ_F_NOWAIT; in io_accept()
5123 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len, in io_accept()
5133 req_set_fail(req); in io_accept()
5138 ret = io_install_fixed_file(req, file, issue_flags, in io_accept()
5141 __io_req_complete(req, issue_flags, ret, 0); in io_accept()
5145 static int io_connect_prep_async(struct io_kiocb *req) in io_connect_prep_async() argument
5147 struct io_async_connect *io = req->async_data; in io_connect_prep_async()
5148 struct io_connect *conn = &req->connect; in io_connect_prep_async()
5153 static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_connect_prep() argument
5155 struct io_connect *conn = &req->connect; in io_connect_prep()
5157 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_connect_prep()
5168 static int io_connect(struct io_kiocb *req, unsigned int issue_flags) in io_connect() argument
5175 if (req->async_data) { in io_connect()
5176 io = req->async_data; in io_connect()
5178 ret = move_addr_to_kernel(req->connect.addr, in io_connect()
5179 req->connect.addr_len, in io_connect()
5188 ret = __sys_connect_file(req->file, &io->address, in io_connect()
5189 req->connect.addr_len, file_flags); in io_connect()
5191 if (req->async_data) in io_connect()
5193 if (io_alloc_async_data(req)) { in io_connect()
5197 memcpy(req->async_data, &__io, sizeof(__io)); in io_connect()
5204 req_set_fail(req); in io_connect()
5205 __io_req_complete(req, issue_flags, ret, 0); in io_connect()
5210 static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
5217 static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
5224 static int io_##op##_prep_async(struct io_kiocb *req) \
5239 struct io_kiocb *req; member
5244 static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, in __io_async_wake() argument
5251 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask); in __io_async_wake()
5255 req->result = mask; in __io_async_wake()
5256 req->io_task_work.func = func; in __io_async_wake()
5264 io_req_task_work_add(req); in __io_async_wake()
5268 static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll) in io_poll_rewait() argument
5269 __acquires(&req->ctx->completion_lock) in io_poll_rewait()
5271 struct io_ring_ctx *ctx = req->ctx; in io_poll_rewait()
5273 /* req->task == current here, checking PF_EXITING is safe */ in io_poll_rewait()
5274 if (unlikely(req->task->flags & PF_EXITING)) in io_poll_rewait()
5277 if (!req->result && !READ_ONCE(poll->canceled)) { in io_poll_rewait()
5280 req->result = vfs_poll(req->file, &pt) & poll->events; in io_poll_rewait()
5284 if (!req->result && !READ_ONCE(poll->canceled)) { in io_poll_rewait()
5292 static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req) in io_poll_get_double() argument
5295 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_double()
5296 return req->async_data; in io_poll_get_double()
5297 return req->apoll->double_poll; in io_poll_get_double()
5300 static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req) in io_poll_get_single() argument
5302 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_single()
5303 return &req->poll; in io_poll_get_single()
5304 return &req->apoll->poll; in io_poll_get_single()
5307 static void io_poll_remove_double(struct io_kiocb *req) in io_poll_remove_double() argument
5308 __must_hold(&req->ctx->completion_lock) in io_poll_remove_double()
5310 struct io_poll_iocb *poll = io_poll_get_double(req); in io_poll_remove_double()
5312 lockdep_assert_held(&req->ctx->completion_lock); in io_poll_remove_double()
5320 req_ref_put(req); in io_poll_remove_double()
5326 static bool __io_poll_complete(struct io_kiocb *req, __poll_t mask) in __io_poll_complete() argument
5327 __must_hold(&req->ctx->completion_lock) in __io_poll_complete()
5329 struct io_ring_ctx *ctx = req->ctx; in __io_poll_complete()
5333 if (READ_ONCE(req->poll.canceled)) { in __io_poll_complete()
5335 req->poll.events |= EPOLLONESHOT; in __io_poll_complete()
5339 if (req->poll.events & EPOLLONESHOT) in __io_poll_complete()
5341 if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) { in __io_poll_complete()
5342 req->poll.events |= EPOLLONESHOT; in __io_poll_complete()
5351 static inline bool io_poll_complete(struct io_kiocb *req, __poll_t mask) in io_poll_complete() argument
5352 __must_hold(&req->ctx->completion_lock) in io_poll_complete()
5356 done = __io_poll_complete(req, mask); in io_poll_complete()
5357 io_commit_cqring(req->ctx); in io_poll_complete()
5361 static void io_poll_task_func(struct io_kiocb *req, bool *locked) in io_poll_task_func() argument
5363 struct io_ring_ctx *ctx = req->ctx; in io_poll_task_func()
5366 if (io_poll_rewait(req, &req->poll)) { in io_poll_task_func()
5371 if (req->poll.done) { in io_poll_task_func()
5375 done = __io_poll_complete(req, req->result); in io_poll_task_func()
5377 io_poll_remove_double(req); in io_poll_task_func()
5378 hash_del(&req->hash_node); in io_poll_task_func()
5379 req->poll.done = true; in io_poll_task_func()
5381 req->result = 0; in io_poll_task_func()
5382 add_wait_queue(req->poll.head, &req->poll.wait); in io_poll_task_func()
5389 nxt = io_put_req_find_next(req); in io_poll_task_func()
5399 struct io_kiocb *req = wait->private; in io_poll_double_wake() local
5400 struct io_poll_iocb *poll = io_poll_get_single(req); in io_poll_double_wake()
5427 req_ref_put(req); in io_poll_double_wake()
5448 struct io_kiocb *req = pt->req; in __io_queue_proc() local
5480 req_ref_get(req); in __io_queue_proc()
5481 poll->wait.private = req; in __io_queue_proc()
5498 struct async_poll *apoll = pt->req->apoll; in io_async_queue_proc()
5503 static void io_async_task_func(struct io_kiocb *req, bool *locked) in io_async_task_func() argument
5505 struct async_poll *apoll = req->apoll; in io_async_task_func()
5506 struct io_ring_ctx *ctx = req->ctx; in io_async_task_func()
5508 trace_io_uring_task_run(req->ctx, req, req->opcode, req->user_data); in io_async_task_func()
5510 if (io_poll_rewait(req, &apoll->poll)) { in io_async_task_func()
5515 hash_del(&req->hash_node); in io_async_task_func()
5516 io_poll_remove_double(req); in io_async_task_func()
5521 io_req_task_submit(req, locked); in io_async_task_func()
5523 io_req_complete_failed(req, -ECANCELED); in io_async_task_func()
5529 struct io_kiocb *req = wait->private; in io_async_wake() local
5530 struct io_poll_iocb *poll = &req->apoll->poll; in io_async_wake()
5532 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data, in io_async_wake()
5535 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func); in io_async_wake()
5538 static void io_poll_req_insert(struct io_kiocb *req) in io_poll_req_insert() argument
5540 struct io_ring_ctx *ctx = req->ctx; in io_poll_req_insert()
5543 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)]; in io_poll_req_insert()
5544 hlist_add_head(&req->hash_node, list); in io_poll_req_insert()
5547 static __poll_t __io_arm_poll_handler(struct io_kiocb *req, in __io_arm_poll_handler() argument
5553 struct io_ring_ctx *ctx = req->ctx; in __io_arm_poll_handler()
5556 INIT_HLIST_NODE(&req->hash_node); in __io_arm_poll_handler()
5558 poll->file = req->file; in __io_arm_poll_handler()
5559 poll->wait.private = req; in __io_arm_poll_handler()
5562 ipt->req = req; in __io_arm_poll_handler()
5566 mask = vfs_poll(req->file, &ipt->pt) & poll->events; in __io_arm_poll_handler()
5572 io_poll_remove_double(req); in __io_arm_poll_handler()
5586 io_poll_req_insert(req); in __io_arm_poll_handler()
5599 static int io_arm_poll_handler(struct io_kiocb *req) in io_arm_poll_handler() argument
5601 const struct io_op_def *def = &io_op_defs[req->opcode]; in io_arm_poll_handler()
5602 struct io_ring_ctx *ctx = req->ctx; in io_arm_poll_handler()
5608 if (!req->file || !file_can_poll(req->file)) in io_arm_poll_handler()
5610 if (req->flags & REQ_F_POLLED) in io_arm_poll_handler()
5620 if ((req->opcode == IORING_OP_RECVMSG) && in io_arm_poll_handler()
5621 (req->sr_msg.msg_flags & MSG_ERRQUEUE)) in io_arm_poll_handler()
5629 if (!io_file_supports_nowait(req, rw)) in io_arm_poll_handler()
5636 req->apoll = apoll; in io_arm_poll_handler()
5637 req->flags |= REQ_F_POLLED; in io_arm_poll_handler()
5639 io_req_set_refcount(req); in io_arm_poll_handler()
5641 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, in io_arm_poll_handler()
5647 trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data, in io_arm_poll_handler()
5652 static bool __io_poll_remove_one(struct io_kiocb *req, in __io_poll_remove_one() argument
5654 __must_hold(&req->ctx->completion_lock) in __io_poll_remove_one()
5668 hash_del(&req->hash_node); in __io_poll_remove_one()
5672 static bool io_poll_remove_one(struct io_kiocb *req) in io_poll_remove_one() argument
5673 __must_hold(&req->ctx->completion_lock) in io_poll_remove_one()
5677 io_poll_remove_double(req); in io_poll_remove_one()
5678 do_complete = __io_poll_remove_one(req, io_poll_get_single(req), true); in io_poll_remove_one()
5681 io_cqring_fill_event(req->ctx, req->user_data, -ECANCELED, 0); in io_poll_remove_one()
5682 io_commit_cqring(req->ctx); in io_poll_remove_one()
5683 req_set_fail(req); in io_poll_remove_one()
5684 io_put_req_deferred(req); in io_poll_remove_one()
5696 struct io_kiocb *req; in io_poll_remove_all() local
5704 hlist_for_each_entry_safe(req, tmp, list, hash_node) { in io_poll_remove_all()
5705 if (io_match_task(req, tsk, cancel_all)) in io_poll_remove_all()
5706 posted += io_poll_remove_one(req); in io_poll_remove_all()
5722 struct io_kiocb *req; in io_poll_find() local
5725 hlist_for_each_entry(req, list, hash_node) { in io_poll_find()
5726 if (sqe_addr != req->user_data) in io_poll_find()
5728 if (poll_only && req->opcode != IORING_OP_POLL_ADD) in io_poll_find()
5730 return req; in io_poll_find()
5739 struct io_kiocb *req; in io_poll_cancel() local
5741 req = io_poll_find(ctx, sqe_addr, poll_only); in io_poll_cancel()
5742 if (!req) in io_poll_cancel()
5744 if (io_poll_remove_one(req)) in io_poll_cancel()
5764 static int io_poll_update_prep(struct io_kiocb *req, in io_poll_update_prep() argument
5767 struct io_poll_update *upd = &req->poll_update; in io_poll_update_prep()
5770 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_poll_update_prep()
5800 struct io_kiocb *req = wait->private; in io_poll_wake() local
5801 struct io_poll_iocb *poll = &req->poll; in io_poll_wake()
5803 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func); in io_poll_wake()
5811 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data); in io_poll_queue_proc()
5814 static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_poll_add_prep() argument
5816 struct io_poll_iocb *poll = &req->poll; in io_poll_add_prep()
5819 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_poll_add_prep()
5827 io_req_set_refcount(req); in io_poll_add_prep()
5832 static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) in io_poll_add() argument
5834 struct io_poll_iocb *poll = &req->poll; in io_poll_add()
5835 struct io_ring_ctx *ctx = req->ctx; in io_poll_add()
5842 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events, in io_poll_add()
5847 done = io_poll_complete(req, mask); in io_poll_add()
5854 io_put_req(req); in io_poll_add()
5859 static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags) in io_poll_update() argument
5861 struct io_ring_ctx *ctx = req->ctx; in io_poll_update()
5867 preq = io_poll_find(ctx, req->poll_update.old_user_data, true); in io_poll_update()
5873 if (!req->poll_update.update_events && !req->poll_update.update_user_data) { in io_poll_update()
5894 req_set_fail(req); in io_poll_update()
5895 io_req_complete(req, ret); in io_poll_update()
5899 if (req->poll_update.update_events) { in io_poll_update()
5901 preq->poll.events |= req->poll_update.events & 0xffff; in io_poll_update()
5904 if (req->poll_update.update_user_data) in io_poll_update()
5905 preq->user_data = req->poll_update.new_user_data; in io_poll_update()
5909 io_req_complete(req, ret); in io_poll_update()
5921 static void io_req_task_timeout(struct io_kiocb *req, bool *locked) in io_req_task_timeout() argument
5923 req_set_fail(req); in io_req_task_timeout()
5924 io_req_complete_post(req, -ETIME, 0); in io_req_task_timeout()
5931 struct io_kiocb *req = data->req; in io_timeout_fn() local
5932 struct io_ring_ctx *ctx = req->ctx; in io_timeout_fn()
5936 list_del_init(&req->timeout.list); in io_timeout_fn()
5937 atomic_set(&req->ctx->cq_timeouts, in io_timeout_fn()
5938 atomic_read(&req->ctx->cq_timeouts) + 1); in io_timeout_fn()
5941 req->io_task_work.func = io_req_task_timeout; in io_timeout_fn()
5942 io_req_task_work_add(req); in io_timeout_fn()
5951 struct io_kiocb *req; in io_timeout_extract() local
5954 list_for_each_entry(req, &ctx->timeout_list, timeout.list) { in io_timeout_extract()
5955 found = user_data == req->user_data; in io_timeout_extract()
5962 io = req->async_data; in io_timeout_extract()
5965 list_del_init(&req->timeout.list); in io_timeout_extract()
5966 return req; in io_timeout_extract()
5973 struct io_kiocb *req = io_timeout_extract(ctx, user_data); in io_timeout_cancel() local
5975 if (IS_ERR(req)) in io_timeout_cancel()
5976 return PTR_ERR(req); in io_timeout_cancel()
5978 req_set_fail(req); in io_timeout_cancel()
5979 io_cqring_fill_event(ctx, req->user_data, -ECANCELED, 0); in io_timeout_cancel()
5980 io_put_req_deferred(req); in io_timeout_cancel()
6005 struct io_kiocb *req; in io_linked_timeout_update() local
6008 list_for_each_entry(req, &ctx->ltimeout_list, timeout.list) { in io_linked_timeout_update()
6009 found = user_data == req->user_data; in io_linked_timeout_update()
6016 io = req->async_data; in io_linked_timeout_update()
6029 struct io_kiocb *req = io_timeout_extract(ctx, user_data); in io_timeout_update() local
6032 if (IS_ERR(req)) in io_timeout_update()
6033 return PTR_ERR(req); in io_timeout_update()
6035 req->timeout.off = 0; /* noseq */ in io_timeout_update()
6036 data = req->async_data; in io_timeout_update()
6037 list_add_tail(&req->timeout.list, &ctx->timeout_list); in io_timeout_update()
6044 static int io_timeout_remove_prep(struct io_kiocb *req, in io_timeout_remove_prep() argument
6047 struct io_timeout_rem *tr = &req->timeout_rem; in io_timeout_remove_prep()
6049 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_timeout_remove_prep()
6051 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_timeout_remove_prep()
6085 static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags) in io_timeout_remove() argument
6087 struct io_timeout_rem *tr = &req->timeout_rem; in io_timeout_remove()
6088 struct io_ring_ctx *ctx = req->ctx; in io_timeout_remove()
6091 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) { in io_timeout_remove()
6109 req_set_fail(req); in io_timeout_remove()
6110 io_req_complete_post(req, ret, 0); in io_timeout_remove()
6114 static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_timeout_prep() argument
6121 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_timeout_prep()
6135 INIT_LIST_HEAD(&req->timeout.list); in io_timeout_prep()
6136 req->timeout.off = off; in io_timeout_prep()
6137 if (unlikely(off && !req->ctx->off_timeout_used)) in io_timeout_prep()
6138 req->ctx->off_timeout_used = true; in io_timeout_prep()
6140 if (!req->async_data && io_alloc_async_data(req)) in io_timeout_prep()
6143 data = req->async_data; in io_timeout_prep()
6144 data->req = req; in io_timeout_prep()
6154 struct io_submit_link *link = &req->ctx->submit_state.link; in io_timeout_prep()
6160 req->timeout.head = link->last; in io_timeout_prep()
6166 static int io_timeout(struct io_kiocb *req, unsigned int issue_flags) in io_timeout() argument
6168 struct io_ring_ctx *ctx = req->ctx; in io_timeout()
6169 struct io_timeout_data *data = req->async_data; in io_timeout()
6171 u32 tail, off = req->timeout.off; in io_timeout()
6180 if (io_is_timeout_noseq(req)) { in io_timeout()
6186 req->timeout.target_seq = tail + off; in io_timeout()
6209 list_add(&req->timeout.list, entry); in io_timeout()
6223 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_cancel_cb() local
6226 return req->ctx == cd->ctx && req->user_data == cd->user_data; in io_cancel_cb()
6255 static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr) in io_try_cancel_userdata() argument
6257 struct io_ring_ctx *ctx = req->ctx; in io_try_cancel_userdata()
6260 WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current); in io_try_cancel_userdata()
6262 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx); in io_try_cancel_userdata()
6278 static int io_async_cancel_prep(struct io_kiocb *req, in io_async_cancel_prep() argument
6281 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_async_cancel_prep()
6283 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_async_cancel_prep()
6289 req->cancel.addr = READ_ONCE(sqe->addr); in io_async_cancel_prep()
6293 static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) in io_async_cancel() argument
6295 struct io_ring_ctx *ctx = req->ctx; in io_async_cancel()
6296 u64 sqe_addr = req->cancel.addr; in io_async_cancel()
6300 ret = io_try_cancel_userdata(req, sqe_addr); in io_async_cancel()
6310 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx); in io_async_cancel()
6317 req_set_fail(req); in io_async_cancel()
6318 io_req_complete_post(req, ret, 0); in io_async_cancel()
6322 static int io_rsrc_update_prep(struct io_kiocb *req, in io_rsrc_update_prep() argument
6325 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_rsrc_update_prep()
6330 req->rsrc_update.offset = READ_ONCE(sqe->off); in io_rsrc_update_prep()
6331 req->rsrc_update.nr_args = READ_ONCE(sqe->len); in io_rsrc_update_prep()
6332 if (!req->rsrc_update.nr_args) in io_rsrc_update_prep()
6334 req->rsrc_update.arg = READ_ONCE(sqe->addr); in io_rsrc_update_prep()
6338 static int io_files_update(struct io_kiocb *req, unsigned int issue_flags) in io_files_update() argument
6340 struct io_ring_ctx *ctx = req->ctx; in io_files_update()
6344 up.offset = req->rsrc_update.offset; in io_files_update()
6345 up.data = req->rsrc_update.arg; in io_files_update()
6352 &up, req->rsrc_update.nr_args); in io_files_update()
6356 req_set_fail(req); in io_files_update()
6357 __io_req_complete(req, issue_flags, ret, 0); in io_files_update()
6361 static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_req_prep() argument
6363 switch (req->opcode) { in io_req_prep()
6369 return io_read_prep(req, sqe); in io_req_prep()
6373 return io_write_prep(req, sqe); in io_req_prep()
6375 return io_poll_add_prep(req, sqe); in io_req_prep()
6377 return io_poll_update_prep(req, sqe); in io_req_prep()
6379 return io_fsync_prep(req, sqe); in io_req_prep()
6381 return io_sfr_prep(req, sqe); in io_req_prep()
6384 return io_sendmsg_prep(req, sqe); in io_req_prep()
6387 return io_recvmsg_prep(req, sqe); in io_req_prep()
6389 return io_connect_prep(req, sqe); in io_req_prep()
6391 return io_timeout_prep(req, sqe, false); in io_req_prep()
6393 return io_timeout_remove_prep(req, sqe); in io_req_prep()
6395 return io_async_cancel_prep(req, sqe); in io_req_prep()
6397 return io_timeout_prep(req, sqe, true); in io_req_prep()
6399 return io_accept_prep(req, sqe); in io_req_prep()
6401 return io_fallocate_prep(req, sqe); in io_req_prep()
6403 return io_openat_prep(req, sqe); in io_req_prep()
6405 return io_close_prep(req, sqe); in io_req_prep()
6407 return io_rsrc_update_prep(req, sqe); in io_req_prep()
6409 return io_statx_prep(req, sqe); in io_req_prep()
6411 return io_fadvise_prep(req, sqe); in io_req_prep()
6413 return io_madvise_prep(req, sqe); in io_req_prep()
6415 return io_openat2_prep(req, sqe); in io_req_prep()
6417 return io_epoll_ctl_prep(req, sqe); in io_req_prep()
6419 return io_splice_prep(req, sqe); in io_req_prep()
6421 return io_provide_buffers_prep(req, sqe); in io_req_prep()
6423 return io_remove_buffers_prep(req, sqe); in io_req_prep()
6425 return io_tee_prep(req, sqe); in io_req_prep()
6427 return io_shutdown_prep(req, sqe); in io_req_prep()
6429 return io_renameat_prep(req, sqe); in io_req_prep()
6431 return io_unlinkat_prep(req, sqe); in io_req_prep()
6433 return io_mkdirat_prep(req, sqe); in io_req_prep()
6435 return io_symlinkat_prep(req, sqe); in io_req_prep()
6437 return io_linkat_prep(req, sqe); in io_req_prep()
6441 req->opcode); in io_req_prep()
6445 static int io_req_prep_async(struct io_kiocb *req) in io_req_prep_async() argument
6447 if (!io_op_defs[req->opcode].needs_async_setup) in io_req_prep_async()
6449 if (WARN_ON_ONCE(req->async_data)) in io_req_prep_async()
6451 if (io_alloc_async_data(req)) in io_req_prep_async()
6454 switch (req->opcode) { in io_req_prep_async()
6456 return io_rw_prep_async(req, READ); in io_req_prep_async()
6458 return io_rw_prep_async(req, WRITE); in io_req_prep_async()
6460 return io_sendmsg_prep_async(req); in io_req_prep_async()
6462 return io_recvmsg_prep_async(req); in io_req_prep_async()
6464 return io_connect_prep_async(req); in io_req_prep_async()
6467 req->opcode); in io_req_prep_async()
6471 static u32 io_get_sequence(struct io_kiocb *req) in io_get_sequence() argument
6473 u32 seq = req->ctx->cached_sq_head; in io_get_sequence()
6475 /* need original cached_sq_head, but it was increased for each req */ in io_get_sequence()
6476 io_for_each_link(req, req) in io_get_sequence()
6481 static bool io_drain_req(struct io_kiocb *req) in io_drain_req() argument
6484 struct io_ring_ctx *ctx = req->ctx; in io_drain_req()
6489 if (req->flags & REQ_F_FAIL) { in io_drain_req()
6490 io_req_complete_fail_submit(req); in io_drain_req()
6501 req->flags |= REQ_F_IO_DRAIN; in io_drain_req()
6505 io_for_each_link(pos, req->link) { in io_drain_req()
6508 req->flags |= REQ_F_IO_DRAIN; in io_drain_req()
6513 /* Still need defer if there is pending req in defer list. */ in io_drain_req()
6515 !(req->flags & REQ_F_IO_DRAIN))) { in io_drain_req()
6520 seq = io_get_sequence(req); in io_drain_req()
6522 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) in io_drain_req()
6525 ret = io_req_prep_async(req); in io_drain_req()
6528 io_prep_async_link(req); in io_drain_req()
6533 io_req_complete_failed(req, ret); in io_drain_req()
6538 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) { in io_drain_req()
6541 io_queue_async_work(req, NULL); in io_drain_req()
6545 trace_io_uring_defer(ctx, req, req->user_data); in io_drain_req()
6546 de->req = req; in io_drain_req()
6553 static void io_clean_op(struct io_kiocb *req) in io_clean_op() argument
6555 if (req->flags & REQ_F_BUFFER_SELECTED) { in io_clean_op()
6556 switch (req->opcode) { in io_clean_op()
6560 kfree((void *)(unsigned long)req->rw.addr); in io_clean_op()
6564 kfree(req->sr_msg.kbuf); in io_clean_op()
6569 if (req->flags & REQ_F_NEED_CLEANUP) { in io_clean_op()
6570 switch (req->opcode) { in io_clean_op()
6577 struct io_async_rw *io = req->async_data; in io_clean_op()
6584 struct io_async_msghdr *io = req->async_data; in io_clean_op()
6591 if (!(req->splice.flags & SPLICE_F_FD_IN_FIXED)) in io_clean_op()
6592 io_put_file(req->splice.file_in); in io_clean_op()
6596 if (req->open.filename) in io_clean_op()
6597 putname(req->open.filename); in io_clean_op()
6600 putname(req->rename.oldpath); in io_clean_op()
6601 putname(req->rename.newpath); in io_clean_op()
6604 putname(req->unlink.filename); in io_clean_op()
6607 putname(req->mkdir.filename); in io_clean_op()
6610 putname(req->symlink.oldpath); in io_clean_op()
6611 putname(req->symlink.newpath); in io_clean_op()
6614 putname(req->hardlink.oldpath); in io_clean_op()
6615 putname(req->hardlink.newpath); in io_clean_op()
6619 if ((req->flags & REQ_F_POLLED) && req->apoll) { in io_clean_op()
6620 kfree(req->apoll->double_poll); in io_clean_op()
6621 kfree(req->apoll); in io_clean_op()
6622 req->apoll = NULL; in io_clean_op()
6624 if (req->flags & REQ_F_INFLIGHT) { in io_clean_op()
6625 struct io_uring_task *tctx = req->task->io_uring; in io_clean_op()
6629 if (req->flags & REQ_F_CREDS) in io_clean_op()
6630 put_cred(req->creds); in io_clean_op()
6632 req->flags &= ~IO_REQ_CLEAN_FLAGS; in io_clean_op()
6635 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) in io_issue_sqe() argument
6637 struct io_ring_ctx *ctx = req->ctx; in io_issue_sqe()
6641 if ((req->flags & REQ_F_CREDS) && req->creds != current_cred()) in io_issue_sqe()
6642 creds = override_creds(req->creds); in io_issue_sqe()
6644 switch (req->opcode) { in io_issue_sqe()
6646 ret = io_nop(req, issue_flags); in io_issue_sqe()
6651 ret = io_read(req, issue_flags); in io_issue_sqe()
6656 ret = io_write(req, issue_flags); in io_issue_sqe()
6659 ret = io_fsync(req, issue_flags); in io_issue_sqe()
6662 ret = io_poll_add(req, issue_flags); in io_issue_sqe()
6665 ret = io_poll_update(req, issue_flags); in io_issue_sqe()
6668 ret = io_sync_file_range(req, issue_flags); in io_issue_sqe()
6671 ret = io_sendmsg(req, issue_flags); in io_issue_sqe()
6674 ret = io_send(req, issue_flags); in io_issue_sqe()
6677 ret = io_recvmsg(req, issue_flags); in io_issue_sqe()
6680 ret = io_recv(req, issue_flags); in io_issue_sqe()
6683 ret = io_timeout(req, issue_flags); in io_issue_sqe()
6686 ret = io_timeout_remove(req, issue_flags); in io_issue_sqe()
6689 ret = io_accept(req, issue_flags); in io_issue_sqe()
6692 ret = io_connect(req, issue_flags); in io_issue_sqe()
6695 ret = io_async_cancel(req, issue_flags); in io_issue_sqe()
6698 ret = io_fallocate(req, issue_flags); in io_issue_sqe()
6701 ret = io_openat(req, issue_flags); in io_issue_sqe()
6704 ret = io_close(req, issue_flags); in io_issue_sqe()
6707 ret = io_files_update(req, issue_flags); in io_issue_sqe()
6710 ret = io_statx(req, issue_flags); in io_issue_sqe()
6713 ret = io_fadvise(req, issue_flags); in io_issue_sqe()
6716 ret = io_madvise(req, issue_flags); in io_issue_sqe()
6719 ret = io_openat2(req, issue_flags); in io_issue_sqe()
6722 ret = io_epoll_ctl(req, issue_flags); in io_issue_sqe()
6725 ret = io_splice(req, issue_flags); in io_issue_sqe()
6728 ret = io_provide_buffers(req, issue_flags); in io_issue_sqe()
6731 ret = io_remove_buffers(req, issue_flags); in io_issue_sqe()
6734 ret = io_tee(req, issue_flags); in io_issue_sqe()
6737 ret = io_shutdown(req, issue_flags); in io_issue_sqe()
6740 ret = io_renameat(req, issue_flags); in io_issue_sqe()
6743 ret = io_unlinkat(req, issue_flags); in io_issue_sqe()
6746 ret = io_mkdirat(req, issue_flags); in io_issue_sqe()
6749 ret = io_symlinkat(req, issue_flags); in io_issue_sqe()
6752 ret = io_linkat(req, issue_flags); in io_issue_sqe()
6764 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) in io_issue_sqe()
6765 io_iopoll_req_issued(req); in io_issue_sqe()
6772 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_wq_free_work() local
6774 req = io_put_req_find_next(req); in io_wq_free_work()
6775 return req ? &req->work : NULL; in io_wq_free_work()
6780 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_wq_submit_work() local
6785 if (!(req->flags & REQ_F_REFCOUNT)) in io_wq_submit_work()
6786 __io_req_set_refcount(req, 2); in io_wq_submit_work()
6788 req_ref_get(req); in io_wq_submit_work()
6790 timeout = io_prep_linked_timeout(req); in io_wq_submit_work()
6800 ret = io_issue_sqe(req, 0); in io_wq_submit_work()
6814 io_req_task_queue_fail(req, ret); in io_wq_submit_work()
6845 struct io_kiocb *req, int fd) in io_file_get_fixed() argument
6857 req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT); in io_file_get_fixed()
6858 io_req_set_rsrc_node(req); in io_file_get_fixed()
6863 struct io_kiocb *req, int fd) in io_file_get_normal() argument
6871 io_req_track_inflight(req); in io_file_get_normal()
6876 struct io_kiocb *req, int fd, bool fixed) in io_file_get() argument
6879 return io_file_get_fixed(ctx, req, fd); in io_file_get()
6881 return io_file_get_normal(ctx, req, fd); in io_file_get()
6884 static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked) in io_req_task_link_timeout() argument
6886 struct io_kiocb *prev = req->timeout.prev; in io_req_task_link_timeout()
6890 ret = io_try_cancel_userdata(req, prev->user_data); in io_req_task_link_timeout()
6891 io_req_complete_post(req, ret ?: -ETIME, 0); in io_req_task_link_timeout()
6894 io_req_complete_post(req, -ETIME, 0); in io_req_task_link_timeout()
6902 struct io_kiocb *prev, *req = data->req; in io_link_timeout_fn() local
6903 struct io_ring_ctx *ctx = req->ctx; in io_link_timeout_fn()
6907 prev = req->timeout.head; in io_link_timeout_fn()
6908 req->timeout.head = NULL; in io_link_timeout_fn()
6919 list_del(&req->timeout.list); in io_link_timeout_fn()
6920 req->timeout.prev = prev; in io_link_timeout_fn()
6923 req->io_task_work.func = io_req_task_link_timeout; in io_link_timeout_fn()
6924 io_req_task_work_add(req); in io_link_timeout_fn()
6928 static void io_queue_linked_timeout(struct io_kiocb *req) in io_queue_linked_timeout() argument
6930 struct io_ring_ctx *ctx = req->ctx; in io_queue_linked_timeout()
6937 if (req->timeout.head) { in io_queue_linked_timeout()
6938 struct io_timeout_data *data = req->async_data; in io_queue_linked_timeout()
6943 list_add_tail(&req->timeout.list, &ctx->ltimeout_list); in io_queue_linked_timeout()
6947 io_put_req(req); in io_queue_linked_timeout()
6950 static void __io_queue_sqe(struct io_kiocb *req) in __io_queue_sqe() argument
6951 __must_hold(&req->ctx->uring_lock) in __io_queue_sqe()
6957 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); in __io_queue_sqe()
6964 if (req->flags & REQ_F_COMPLETE_INLINE) { in __io_queue_sqe()
6965 struct io_ring_ctx *ctx = req->ctx; in __io_queue_sqe()
6968 state->compl_reqs[state->compl_nr++] = req; in __io_queue_sqe()
6974 linked_timeout = io_prep_linked_timeout(req); in __io_queue_sqe()
6977 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { in __io_queue_sqe()
6978 linked_timeout = io_prep_linked_timeout(req); in __io_queue_sqe()
6980 switch (io_arm_poll_handler(req)) { in __io_queue_sqe()
6990 io_queue_async_work(req, NULL); in __io_queue_sqe()
6997 io_req_complete_failed(req, ret); in __io_queue_sqe()
7001 static inline void io_queue_sqe(struct io_kiocb *req) in io_queue_sqe() argument
7002 __must_hold(&req->ctx->uring_lock) in io_queue_sqe()
7004 if (unlikely(req->ctx->drain_active) && io_drain_req(req)) in io_queue_sqe()
7007 if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) { in io_queue_sqe()
7008 __io_queue_sqe(req); in io_queue_sqe()
7009 } else if (req->flags & REQ_F_FAIL) { in io_queue_sqe()
7010 io_req_complete_fail_submit(req); in io_queue_sqe()
7012 int ret = io_req_prep_async(req); in io_queue_sqe()
7015 io_req_complete_failed(req, ret); in io_queue_sqe()
7017 io_queue_async_work(req, NULL); in io_queue_sqe()
7027 struct io_kiocb *req, in io_check_restriction() argument
7033 if (!test_bit(req->opcode, ctx->restrictions.sqe_op)) in io_check_restriction()
7047 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, in io_init_req() argument
7055 /* req is partially pre-initialised, see io_preinit_req() */ in io_init_req()
7056 req->opcode = READ_ONCE(sqe->opcode); in io_init_req()
7058 req->flags = sqe_flags = READ_ONCE(sqe->flags); in io_init_req()
7059 req->user_data = READ_ONCE(sqe->user_data); in io_init_req()
7060 req->file = NULL; in io_init_req()
7061 req->fixed_rsrc_refs = NULL; in io_init_req()
7062 req->task = current; in io_init_req()
7067 if (unlikely(req->opcode >= IORING_OP_LAST)) in io_init_req()
7069 if (!io_check_restriction(ctx, req, sqe_flags)) in io_init_req()
7073 !io_op_defs[req->opcode].buffer_select) in io_init_req()
7080 req->creds = xa_load(&ctx->personalities, personality); in io_init_req()
7081 if (!req->creds) in io_init_req()
7083 get_cred(req->creds); in io_init_req()
7084 req->flags |= REQ_F_CREDS; in io_init_req()
7093 io_op_defs[req->opcode].plug) { in io_init_req()
7098 if (io_op_defs[req->opcode].needs_file) { in io_init_req()
7099 req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd), in io_init_req()
7101 if (unlikely(!req->file)) in io_init_req()
7109 static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, in io_submit_sqe() argument
7116 ret = io_init_req(ctx, req, sqe); in io_submit_sqe()
7122 * we can judge a link req is failed or cancelled by if in io_submit_sqe()
7124 * it may be set REQ_F_FAIL because of other req's failure in io_submit_sqe()
7125 * so let's leverage req->result to distinguish if a head in io_submit_sqe()
7126 * is set REQ_F_FAIL because of its failure or other req's in io_submit_sqe()
7132 } else if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { in io_submit_sqe()
7134 * the current req is a normal req, we should return in io_submit_sqe()
7137 io_req_complete_failed(req, ret); in io_submit_sqe()
7140 req_fail_link_node(req, ret); in io_submit_sqe()
7142 ret = io_req_prep(req, sqe); in io_submit_sqe()
7148 trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data, in io_submit_sqe()
7149 req->flags, true, in io_submit_sqe()
7162 if (!(req->flags & REQ_F_FAIL)) { in io_submit_sqe()
7163 ret = io_req_prep_async(req); in io_submit_sqe()
7165 req_fail_link_node(req, ret); in io_submit_sqe()
7170 trace_io_uring_link(ctx, req, head); in io_submit_sqe()
7171 link->last->link = req; in io_submit_sqe()
7172 link->last = req; in io_submit_sqe()
7175 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { in io_submit_sqe()
7180 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { in io_submit_sqe()
7181 link->head = req; in io_submit_sqe()
7182 link->last = req; in io_submit_sqe()
7184 io_queue_sqe(req); in io_submit_sqe()
7275 struct io_kiocb *req; in io_submit_sqes() local
7277 req = io_alloc_req(ctx); in io_submit_sqes()
7278 if (unlikely(!req)) { in io_submit_sqes()
7285 list_add(&req->inflight_entry, &ctx->submit_state.free_list); in io_submit_sqes()
7290 if (io_submit_sqe(ctx, req, sqe)) in io_submit_sqes()
8352 static int io_install_fixed_file(struct io_kiocb *req, struct file *file, in io_install_fixed_file() argument
8355 struct io_ring_ctx *ctx = req->ctx; in io_install_fixed_file()
8408 static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) in io_close_fixed() argument
8410 unsigned int offset = req->close.file_slot - 1; in io_close_fixed()
8411 struct io_ring_ctx *ctx = req->ctx; in io_close_fixed()
9220 struct io_kiocb *req, *nxt; in io_req_cache_free() local
9222 list_for_each_entry_safe(req, nxt, list, inflight_entry) { in io_req_cache_free()
9223 list_del(&req->inflight_entry); in io_req_cache_free()
9224 kmem_cache_free(req_cachep, req); in io_req_cache_free()
9376 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_cancel_ctx_cb() local
9378 return req->ctx == data; in io_cancel_ctx_cb()
9453 struct io_kiocb *req, *tmp; in io_kill_timeouts() local
9458 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { in io_kill_timeouts()
9459 if (io_match_task(req, tsk, cancel_all)) { in io_kill_timeouts()
9460 io_kill_timeout(req, -ECANCELED); in io_kill_timeouts()
9518 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_cancel_task_cb() local
9522 if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) { in io_cancel_task_cb()
9523 struct io_ring_ctx *ctx = req->ctx; in io_cancel_task_cb()
9527 ret = io_match_task(req, cancel->task, cancel->all); in io_cancel_task_cb()
9530 ret = io_match_task(req, cancel->task, cancel->all); in io_cancel_task_cb()
9543 if (io_match_task(de->req, task, cancel_all)) { in io_cancel_defer_files()
9555 io_req_complete_failed(de->req, -ECANCELED); in io_cancel_defer_files()
10112 struct io_kiocb *req; in __io_uring_show_fdinfo() local
10114 hlist_for_each_entry(req, list, hash_node) in __io_uring_show_fdinfo()
10115 seq_printf(m, " op=%d, task_works=%d\n", req->opcode, in __io_uring_show_fdinfo()
10116 req->task->task_works != NULL); in __io_uring_show_fdinfo()