Lines Matching refs:tctx
402 struct io_uring_task *tctx = req->task->io_uring; in io_clean_op() local
404 atomic_dec(&tctx->inflight_tracked); in io_clean_op()
509 struct io_uring_task *tctx = req->task->io_uring; in io_queue_iowq() local
511 BUG_ON(!tctx); in io_queue_iowq()
512 BUG_ON(!tctx->io_wq); in io_queue_iowq()
528 io_wq_enqueue(tctx->io_wq, &req->work); in io_queue_iowq()
743 struct io_uring_task *tctx = task->io_uring; in io_put_task_remote() local
745 percpu_counter_sub(&tctx->inflight, 1); in io_put_task_remote()
746 if (unlikely(atomic_read(&tctx->in_cancel))) in io_put_task_remote()
747 wake_up(&tctx->wait); in io_put_task_remote()
766 void io_task_refs_refill(struct io_uring_task *tctx) in io_task_refs_refill() argument
768 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR; in io_task_refs_refill()
770 percpu_counter_add(&tctx->inflight, refill); in io_task_refs_refill()
772 tctx->cached_refs += refill; in io_task_refs_refill()
777 struct io_uring_task *tctx = task->io_uring; in io_uring_drop_tctx_refs() local
778 unsigned int refs = tctx->cached_refs; in io_uring_drop_tctx_refs()
781 tctx->cached_refs = 0; in io_uring_drop_tctx_refs()
782 percpu_counter_sub(&tctx->inflight, refs); in io_uring_drop_tctx_refs()
1245 static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync) in io_fallback_tw() argument
1247 struct llist_node *node = llist_del_all(&tctx->task_list); in io_fallback_tw()
1277 struct io_uring_task *tctx = container_of(cb, struct io_uring_task, in tctx_task_work() local
1285 io_fallback_tw(tctx, true); in tctx_task_work()
1291 node = io_llist_xchg(&tctx->task_list, &fake); in tctx_task_work()
1295 if (READ_ONCE(tctx->task_list.first) != &fake) in tctx_task_work()
1299 if (READ_ONCE(tctx->task_list.first) != &fake) in tctx_task_work()
1302 node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL); in tctx_task_work()
1308 if (unlikely(atomic_read(&tctx->in_cancel))) in tctx_task_work()
1311 trace_io_uring_task_work_run(tctx, count, loops); in tctx_task_work()
1367 struct io_uring_task *tctx = req->task->io_uring; in io_req_normal_work_add() local
1371 if (!llist_add(&req->io_task_work.node, &tctx->task_list)) in io_req_normal_work_add()
1377 if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method))) in io_req_normal_work_add()
1380 io_fallback_tw(tctx, false); in io_req_normal_work_add()
2516 struct io_uring_task *tctx = current->io_uring; in current_pending_io() local
2518 if (!tctx) in current_pending_io()
2520 return percpu_counter_read_positive(&tctx->inflight); in current_pending_io()
3052 struct io_uring_task *tctx = current->io_uring; in io_tctx_exit_cb() local
3062 if (tctx && !atomic_read(&tctx->in_cancel)) in io_tctx_exit_cb()
3263 struct io_uring_task *tctx = node->task->io_uring; in io_uring_try_cancel_iowq() local
3269 if (!tctx || !tctx->io_wq) in io_uring_try_cancel_iowq()
3271 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true); in io_uring_try_cancel_iowq()
3284 struct io_uring_task *tctx = task ? task->io_uring : NULL; in io_uring_try_cancel_requests() local
3300 } else if (tctx && tctx->io_wq) { in io_uring_try_cancel_requests()
3305 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb, in io_uring_try_cancel_requests()
3333 static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked) in tctx_inflight() argument
3336 return atomic_read(&tctx->inflight_tracked); in tctx_inflight()
3337 return percpu_counter_sum(&tctx->inflight); in tctx_inflight()
3346 struct io_uring_task *tctx = current->io_uring; in io_uring_cancel_generic() local
3357 if (tctx->io_wq) in io_uring_cancel_generic()
3358 io_wq_exit_start(tctx->io_wq); in io_uring_cancel_generic()
3360 atomic_inc(&tctx->in_cancel); in io_uring_cancel_generic()
3366 inflight = tctx_inflight(tctx, !cancel_all); in io_uring_cancel_generic()
3371 xa_for_each(&tctx->xa, index, node) { in io_uring_cancel_generic()
3390 prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE); in io_uring_cancel_generic()
3393 xa_for_each(&tctx->xa, index, node) { in io_uring_cancel_generic()
3405 if (inflight == tctx_inflight(tctx, !cancel_all)) in io_uring_cancel_generic()
3408 finish_wait(&tctx->wait, &wait); in io_uring_cancel_generic()
3411 io_uring_clean_tctx(tctx); in io_uring_cancel_generic()
3417 atomic_dec(&tctx->in_cancel); in io_uring_cancel_generic()
3619 struct io_uring_task *tctx = current->io_uring; in SYSCALL_DEFINE6() local
3621 if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX)) in SYSCALL_DEFINE6()
3624 f.file = tctx->registered_rings[fd]; in SYSCALL_DEFINE6()
3855 struct io_uring_task *tctx; in io_uring_create() local
4034 tctx = current->io_uring; in io_uring_create()
4041 ret = io_ring_add_registered_file(tctx, file, 0, IO_RINGFD_REG_MAX); in io_uring_create()
4323 struct io_uring_task *tctx = NULL; in io_register_iowq_max_workers() local
4347 tctx = sqd->thread->io_uring; in io_register_iowq_max_workers()
4350 tctx = current->io_uring; in io_register_iowq_max_workers()
4360 if (tctx && tctx->io_wq) { in io_register_iowq_max_workers()
4361 ret = io_wq_max_workers(tctx->io_wq, new_count); in io_register_iowq_max_workers()
4382 struct io_uring_task *tctx = node->task->io_uring; in io_register_iowq_max_workers() local
4384 if (WARN_ON_ONCE(!tctx->io_wq)) in io_register_iowq_max_workers()
4390 (void)io_wq_max_workers(tctx->io_wq, new_count); in io_register_iowq_max_workers()
4586 struct io_uring_task *tctx = current->io_uring; in SYSCALL_DEFINE4() local
4588 if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX)) in SYSCALL_DEFINE4()
4591 f.file = tctx->registered_rings[fd]; in SYSCALL_DEFINE4()