Lines Matching refs:req
34 struct io_kiocb *req; member
68 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req) in io_poll_get_ownership_slowpath() argument
77 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs); in io_poll_get_ownership_slowpath()
80 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); in io_poll_get_ownership_slowpath()
89 static inline bool io_poll_get_ownership(struct io_kiocb *req) in io_poll_get_ownership() argument
91 if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS)) in io_poll_get_ownership()
92 return io_poll_get_ownership_slowpath(req); in io_poll_get_ownership()
93 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); in io_poll_get_ownership()
96 static void io_poll_mark_cancelled(struct io_kiocb *req) in io_poll_mark_cancelled() argument
98 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs); in io_poll_mark_cancelled()
101 static struct io_poll *io_poll_get_double(struct io_kiocb *req) in io_poll_get_double() argument
104 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_double()
105 return req->async_data; in io_poll_get_double()
106 return req->apoll->double_poll; in io_poll_get_double()
109 static struct io_poll *io_poll_get_single(struct io_kiocb *req) in io_poll_get_single() argument
111 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_single()
112 return io_kiocb_to_cmd(req, struct io_poll); in io_poll_get_single()
113 return &req->apoll->poll; in io_poll_get_single()
116 static void io_poll_req_insert(struct io_kiocb *req) in io_poll_req_insert() argument
118 struct io_hash_table *table = &req->ctx->cancel_table; in io_poll_req_insert()
119 u32 index = hash_long(req->cqe.user_data, table->hash_bits); in io_poll_req_insert()
123 hlist_add_head(&req->hash_node, &hb->list); in io_poll_req_insert()
127 static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx) in io_poll_req_delete() argument
129 struct io_hash_table *table = &req->ctx->cancel_table; in io_poll_req_delete()
130 u32 index = hash_long(req->cqe.user_data, table->hash_bits); in io_poll_req_delete()
134 hash_del(&req->hash_node); in io_poll_req_delete()
138 static void io_poll_req_insert_locked(struct io_kiocb *req) in io_poll_req_insert_locked() argument
140 struct io_hash_table *table = &req->ctx->cancel_table_locked; in io_poll_req_insert_locked()
141 u32 index = hash_long(req->cqe.user_data, table->hash_bits); in io_poll_req_insert_locked()
143 lockdep_assert_held(&req->ctx->uring_lock); in io_poll_req_insert_locked()
145 hlist_add_head(&req->hash_node, &table->hbs[index].list); in io_poll_req_insert_locked()
148 static void io_poll_tw_hash_eject(struct io_kiocb *req, bool *locked) in io_poll_tw_hash_eject() argument
150 struct io_ring_ctx *ctx = req->ctx; in io_poll_tw_hash_eject()
152 if (req->flags & REQ_F_HASH_LOCKED) { in io_poll_tw_hash_eject()
160 hash_del(&req->hash_node); in io_poll_tw_hash_eject()
161 req->flags &= ~REQ_F_HASH_LOCKED; in io_poll_tw_hash_eject()
163 io_poll_req_delete(req, ctx); in io_poll_tw_hash_eject()
190 static void io_poll_remove_entries(struct io_kiocb *req) in io_poll_remove_entries() argument
196 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL))) in io_poll_remove_entries()
215 if (req->flags & REQ_F_SINGLE_POLL) in io_poll_remove_entries()
216 io_poll_remove_entry(io_poll_get_single(req)); in io_poll_remove_entries()
217 if (req->flags & REQ_F_DOUBLE_POLL) in io_poll_remove_entries()
218 io_poll_remove_entry(io_poll_get_double(req)); in io_poll_remove_entries()
238 static int io_poll_check_events(struct io_kiocb *req, bool *locked) in io_poll_check_events() argument
240 struct io_ring_ctx *ctx = req->ctx; in io_poll_check_events()
244 if (unlikely(req->task->flags & PF_EXITING)) in io_poll_check_events()
248 v = atomic_read(&req->poll_refs); in io_poll_check_events()
261 req->cqe.res = 0; in io_poll_check_events()
263 req->cqe.res = 0; in io_poll_check_events()
269 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs); in io_poll_check_events()
274 if (!req->cqe.res) { in io_poll_check_events()
275 struct poll_table_struct pt = { ._key = req->apoll_events }; in io_poll_check_events()
276 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events; in io_poll_check_events()
279 if ((unlikely(!req->cqe.res))) in io_poll_check_events()
281 if (req->apoll_events & EPOLLONESHOT) in io_poll_check_events()
283 if (io_is_uring_fops(req->file)) in io_poll_check_events()
287 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { in io_poll_check_events()
288 __poll_t mask = mangle_poll(req->cqe.res & in io_poll_check_events()
289 req->apoll_events); in io_poll_check_events()
291 if (!io_post_aux_cqe(ctx, req->cqe.user_data, in io_poll_check_events()
293 io_req_set_res(req, mask, 0); in io_poll_check_events()
297 ret = io_poll_issue(req, locked); in io_poll_check_events()
305 req->cqe.res = 0; in io_poll_check_events()
311 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) & in io_poll_check_events()
317 static void io_poll_task_func(struct io_kiocb *req, bool *locked) in io_poll_task_func() argument
321 ret = io_poll_check_events(req, locked); in io_poll_task_func()
326 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); in io_poll_task_func()
327 req->cqe.res = mangle_poll(req->cqe.res & poll->events); in io_poll_task_func()
329 req->cqe.res = ret; in io_poll_task_func()
330 req_set_fail(req); in io_poll_task_func()
333 io_poll_remove_entries(req); in io_poll_task_func()
334 io_poll_tw_hash_eject(req, locked); in io_poll_task_func()
336 io_req_set_res(req, req->cqe.res, 0); in io_poll_task_func()
337 io_req_task_complete(req, locked); in io_poll_task_func()
340 static void io_apoll_task_func(struct io_kiocb *req, bool *locked) in io_apoll_task_func() argument
344 ret = io_poll_check_events(req, locked); in io_apoll_task_func()
348 io_poll_remove_entries(req); in io_apoll_task_func()
349 io_poll_tw_hash_eject(req, locked); in io_apoll_task_func()
352 io_req_complete_post(req); in io_apoll_task_func()
354 io_req_task_submit(req, locked); in io_apoll_task_func()
356 io_req_complete_failed(req, ret); in io_apoll_task_func()
359 static void __io_poll_execute(struct io_kiocb *req, int mask) in __io_poll_execute() argument
361 io_req_set_res(req, mask, 0); in __io_poll_execute()
368 if (req->opcode == IORING_OP_POLL_ADD) in __io_poll_execute()
369 req->io_task_work.func = io_poll_task_func; in __io_poll_execute()
371 req->io_task_work.func = io_apoll_task_func; in __io_poll_execute()
373 trace_io_uring_task_add(req, mask); in __io_poll_execute()
374 io_req_task_work_add(req); in __io_poll_execute()
377 static inline void io_poll_execute(struct io_kiocb *req, int res) in io_poll_execute() argument
379 if (io_poll_get_ownership(req)) in io_poll_execute()
380 __io_poll_execute(req, res); in io_poll_execute()
383 static void io_poll_cancel_req(struct io_kiocb *req) in io_poll_cancel_req() argument
385 io_poll_mark_cancelled(req); in io_poll_cancel_req()
387 io_poll_execute(req, 0); in io_poll_cancel_req()
392 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll) in io_pollfree_wake() argument
394 io_poll_mark_cancelled(req); in io_pollfree_wake()
396 io_poll_execute(req, 0); in io_pollfree_wake()
420 struct io_kiocb *req = wqe_to_req(wait); in io_poll_wake() local
425 return io_pollfree_wake(req, poll); in io_poll_wake()
431 if (io_poll_get_ownership(req)) { in io_poll_wake()
437 req->flags &= ~REQ_F_DOUBLE_POLL; in io_poll_wake()
439 req->flags &= ~REQ_F_SINGLE_POLL; in io_poll_wake()
441 __io_poll_execute(req, mask); in io_poll_wake()
447 static bool io_poll_double_prepare(struct io_kiocb *req) in io_poll_double_prepare() argument
450 struct io_poll *poll = io_poll_get_single(req); in io_poll_double_prepare()
463 req->flags |= REQ_F_DOUBLE_POLL; in io_poll_double_prepare()
464 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_double_prepare()
465 req->flags |= REQ_F_ASYNC_DATA; in io_poll_double_prepare()
476 struct io_kiocb *req = pt->req; in __io_queue_proc() local
477 unsigned long wqe_private = (unsigned long) req; in __io_queue_proc()
507 if (!io_poll_double_prepare(req)) { in __io_queue_proc()
515 req->flags |= REQ_F_SINGLE_POLL; in __io_queue_proc()
532 struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll); in io_poll_queue_proc()
535 (struct io_poll **) &pt->req->async_data); in io_poll_queue_proc()
538 static bool io_poll_can_finish_inline(struct io_kiocb *req, in io_poll_can_finish_inline() argument
541 return pt->owning || io_poll_get_ownership(req); in io_poll_can_finish_inline()
550 static int __io_arm_poll_handler(struct io_kiocb *req, in __io_arm_poll_handler() argument
555 struct io_ring_ctx *ctx = req->ctx; in __io_arm_poll_handler()
557 INIT_HLIST_NODE(&req->hash_node); in __io_arm_poll_handler()
558 req->work.cancel_seq = atomic_read(&ctx->cancel_seq); in __io_arm_poll_handler()
560 poll->file = req->file; in __io_arm_poll_handler()
561 req->apoll_events = poll->events; in __io_arm_poll_handler()
564 ipt->req = req; in __io_arm_poll_handler()
579 atomic_set(&req->poll_refs, (int)ipt->owning); in __io_arm_poll_handler()
583 req->flags &= ~REQ_F_HASH_LOCKED; in __io_arm_poll_handler()
585 mask = vfs_poll(req->file, &ipt->pt) & poll->events; in __io_arm_poll_handler()
588 io_poll_remove_entries(req); in __io_arm_poll_handler()
590 if (!io_poll_can_finish_inline(req, ipt)) { in __io_arm_poll_handler()
591 io_poll_mark_cancelled(req); in __io_arm_poll_handler()
602 if (!io_poll_can_finish_inline(req, ipt)) in __io_arm_poll_handler()
604 io_poll_remove_entries(req); in __io_arm_poll_handler()
610 if (req->flags & REQ_F_HASH_LOCKED) in __io_arm_poll_handler()
611 io_poll_req_insert_locked(req); in __io_arm_poll_handler()
613 io_poll_req_insert(req); in __io_arm_poll_handler()
616 io_poll_can_finish_inline(req, ipt)) { in __io_arm_poll_handler()
617 __io_poll_execute(req, mask); in __io_arm_poll_handler()
626 if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1) in __io_arm_poll_handler()
627 __io_poll_execute(req, 0); in __io_arm_poll_handler()
636 struct async_poll *apoll = pt->req->apoll; in io_async_queue_proc()
641 static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req, in io_req_alloc_apoll() argument
644 struct io_ring_ctx *ctx = req->ctx; in io_req_alloc_apoll()
648 if (req->flags & REQ_F_POLLED) { in io_req_alloc_apoll()
649 apoll = req->apoll; in io_req_alloc_apoll()
660 req->apoll = apoll; in io_req_alloc_apoll()
664 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) in io_arm_poll_handler() argument
666 const struct io_op_def *def = &io_op_defs[req->opcode]; in io_arm_poll_handler()
676 req->flags |= REQ_F_HASH_LOCKED; in io_arm_poll_handler()
680 if (!file_can_poll(req->file)) in io_arm_poll_handler()
682 if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED) in io_arm_poll_handler()
684 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) in io_arm_poll_handler()
691 if (req->flags & REQ_F_CLEAR_POLLIN) in io_arm_poll_handler()
699 apoll = io_req_alloc_apoll(req, issue_flags); in io_arm_poll_handler()
702 req->flags |= REQ_F_POLLED; in io_arm_poll_handler()
705 io_kbuf_recycle(req, issue_flags); in io_arm_poll_handler()
707 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags); in io_arm_poll_handler()
710 trace_io_uring_poll_arm(req, mask, apoll->poll.events); in io_arm_poll_handler()
720 struct io_kiocb *req; in io_poll_remove_all_table() local
728 hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) { in io_poll_remove_all_table()
729 if (io_match_task_safe(req, tsk, cancel_all)) { in io_poll_remove_all_table()
730 hlist_del_init(&req->hash_node); in io_poll_remove_all_table()
731 io_poll_cancel_req(req); in io_poll_remove_all_table()
759 struct io_kiocb *req; in io_poll_find() local
766 hlist_for_each_entry(req, &hb->list, hash_node) { in io_poll_find()
767 if (cd->data != req->cqe.user_data) in io_poll_find()
769 if (poll_only && req->opcode != IORING_OP_POLL_ADD) in io_poll_find()
772 if (cd->seq == req->work.cancel_seq) in io_poll_find()
774 req->work.cancel_seq = cd->seq; in io_poll_find()
777 return req; in io_poll_find()
789 struct io_kiocb *req; in io_poll_file_find() local
798 hlist_for_each_entry(req, &hb->list, hash_node) { in io_poll_file_find()
800 req->file != cd->file) in io_poll_file_find()
802 if (cd->seq == req->work.cancel_seq) in io_poll_file_find()
804 req->work.cancel_seq = cd->seq; in io_poll_file_find()
806 return req; in io_poll_file_find()
813 static int io_poll_disarm(struct io_kiocb *req) in io_poll_disarm() argument
815 if (!req) in io_poll_disarm()
817 if (!io_poll_get_ownership(req)) in io_poll_disarm()
819 io_poll_remove_entries(req); in io_poll_disarm()
820 hash_del(&req->hash_node); in io_poll_disarm()
828 struct io_kiocb *req; in __io_poll_cancel() local
831 req = io_poll_file_find(ctx, cd, table, &bucket); in __io_poll_cancel()
833 req = io_poll_find(ctx, false, cd, table, &bucket); in __io_poll_cancel()
835 if (req) in __io_poll_cancel()
836 io_poll_cancel_req(req); in __io_poll_cancel()
839 return req ? 0 : -ENOENT; in __io_poll_cancel()
874 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_poll_remove_prep() argument
876 struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update); in io_poll_remove_prep()
904 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_poll_add_prep() argument
906 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); in io_poll_add_prep()
914 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP)) in io_poll_add_prep()
921 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) in io_poll_add() argument
923 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); in io_poll_add()
933 if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER)) in io_poll_add()
934 req->flags |= REQ_F_HASH_LOCKED; in io_poll_add()
936 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags); in io_poll_add()
938 io_req_set_res(req, ipt.result_mask, 0); in io_poll_add()
944 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) in io_poll_remove() argument
946 struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update); in io_poll_remove()
948 struct io_ring_ctx *ctx = req->ctx; in io_poll_remove()
1006 req_set_fail(req); in io_poll_remove()
1010 io_req_set_res(req, ret, 0); in io_poll_remove()