Lines Matching refs:tctx
453 struct io_uring_task *tctx = req->task->io_uring; in io_queue_iowq() local
455 BUG_ON(!tctx); in io_queue_iowq()
456 BUG_ON(!tctx->io_wq); in io_queue_iowq()
472 io_wq_enqueue(tctx->io_wq, &req->work); in io_queue_iowq()
661 struct io_uring_task *tctx = task->io_uring; in __io_put_task() local
663 percpu_counter_sub(&tctx->inflight, nr); in __io_put_task()
664 if (unlikely(atomic_read(&tctx->in_idle))) in __io_put_task()
665 wake_up(&tctx->wait); in __io_put_task()
669 void io_task_refs_refill(struct io_uring_task *tctx) in io_task_refs_refill() argument
671 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR; in io_task_refs_refill()
673 percpu_counter_add(&tctx->inflight, refill); in io_task_refs_refill()
675 tctx->cached_refs += refill; in io_task_refs_refill()
680 struct io_uring_task *tctx = task->io_uring; in io_uring_drop_tctx_refs() local
681 unsigned int refs = tctx->cached_refs; in io_uring_drop_tctx_refs()
684 tctx->cached_refs = 0; in io_uring_drop_tctx_refs()
685 percpu_counter_sub(&tctx->inflight, refs); in io_uring_drop_tctx_refs()
1084 struct io_uring_task *tctx = container_of(cb, struct io_uring_task, in tctx_task_work() local
1087 struct llist_node *node = io_llist_xchg(&tctx->task_list, &fake); in tctx_task_work()
1091 node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL); in tctx_task_work()
1094 node = io_llist_xchg(&tctx->task_list, &fake); in tctx_task_work()
1096 node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL); in tctx_task_work()
1102 if (unlikely(atomic_read(&tctx->in_idle))) in tctx_task_work()
1105 trace_io_uring_task_work_run(tctx, count, loops); in tctx_task_work()
1132 struct io_uring_task *tctx = req->task->io_uring; in __io_req_task_work_add() local
1142 if (!llist_add(&req->io_task_work.node, &tctx->task_list)) in __io_req_task_work_add()
1148 if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method))) in __io_req_task_work_add()
1151 node = llist_del_all(&tctx->task_list); in __io_req_task_work_add()
1702 struct io_uring_task *tctx = req->task->io_uring; in io_clean_op() local
1704 atomic_dec(&tctx->inflight_tracked); in io_clean_op()
2703 struct io_uring_task *tctx = current->io_uring; in io_tctx_exit_cb() local
2713 if (tctx && !atomic_read(&tctx->in_idle)) in io_tctx_exit_cb()
2888 struct io_uring_task *tctx = node->task->io_uring; in io_uring_try_cancel_iowq() local
2894 if (!tctx || !tctx->io_wq) in io_uring_try_cancel_iowq()
2896 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true); in io_uring_try_cancel_iowq()
2909 struct io_uring_task *tctx = task ? task->io_uring : NULL; in io_uring_try_cancel_requests() local
2919 } else if (tctx && tctx->io_wq) { in io_uring_try_cancel_requests()
2924 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb, in io_uring_try_cancel_requests()
2950 static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked) in tctx_inflight() argument
2953 return atomic_read(&tctx->inflight_tracked); in tctx_inflight()
2954 return percpu_counter_sum(&tctx->inflight); in tctx_inflight()
2963 struct io_uring_task *tctx = current->io_uring; in io_uring_cancel_generic() local
2972 if (tctx->io_wq) in io_uring_cancel_generic()
2973 io_wq_exit_start(tctx->io_wq); in io_uring_cancel_generic()
2975 atomic_inc(&tctx->in_idle); in io_uring_cancel_generic()
2981 inflight = tctx_inflight(tctx, !cancel_all); in io_uring_cancel_generic()
2989 xa_for_each(&tctx->xa, index, node) { in io_uring_cancel_generic()
3008 prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE); in io_uring_cancel_generic()
3017 if (inflight == tctx_inflight(tctx, !cancel_all)) in io_uring_cancel_generic()
3019 finish_wait(&tctx->wait, &wait); in io_uring_cancel_generic()
3022 io_uring_clean_tctx(tctx); in io_uring_cancel_generic()
3028 atomic_dec(&tctx->in_idle); in io_uring_cancel_generic()
3172 struct io_uring_task *tctx = current->io_uring; in SYSCALL_DEFINE6() local
3174 if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX)) in SYSCALL_DEFINE6()
3177 f.file = tctx->registered_rings[fd]; in SYSCALL_DEFINE6()
3757 struct io_uring_task *tctx = current->io_uring; in io_register_iowq_aff() local
3761 if (!tctx || !tctx->io_wq) in io_register_iowq_aff()
3784 ret = io_wq_cpu_affinity(tctx->io_wq, new_mask); in io_register_iowq_aff()
3791 struct io_uring_task *tctx = current->io_uring; in io_unregister_iowq_aff() local
3793 if (!tctx || !tctx->io_wq) in io_unregister_iowq_aff()
3796 return io_wq_cpu_affinity(tctx->io_wq, NULL); in io_unregister_iowq_aff()
3804 struct io_uring_task *tctx = NULL; in io_register_iowq_max_workers() local
3828 tctx = sqd->thread->io_uring; in io_register_iowq_max_workers()
3831 tctx = current->io_uring; in io_register_iowq_max_workers()
3841 if (tctx && tctx->io_wq) { in io_register_iowq_max_workers()
3842 ret = io_wq_max_workers(tctx->io_wq, new_count); in io_register_iowq_max_workers()
3863 struct io_uring_task *tctx = node->task->io_uring; in io_register_iowq_max_workers() local
3865 if (WARN_ON_ONCE(!tctx->io_wq)) in io_register_iowq_max_workers()
3871 (void)io_wq_max_workers(tctx->io_wq, new_count); in io_register_iowq_max_workers()