Lines Matching refs:tctx
1447 struct io_uring_task *tctx = req->task->io_uring; in io_queue_async_work() local
1452 BUG_ON(!tctx); in io_queue_async_work()
1453 BUG_ON(!tctx->io_wq); in io_queue_async_work()
1470 io_wq_enqueue(tctx->io_wq, &req->work); in io_queue_async_work()
1694 struct io_uring_task *tctx = task->io_uring; in io_put_task() local
1697 tctx->cached_refs += nr; in io_put_task()
1699 percpu_counter_sub(&tctx->inflight, nr); in io_put_task()
1700 if (unlikely(atomic_read(&tctx->in_idle))) in io_put_task()
1701 wake_up(&tctx->wait); in io_put_task()
1706 static void io_task_refs_refill(struct io_uring_task *tctx) in io_task_refs_refill() argument
1708 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR; in io_task_refs_refill()
1710 percpu_counter_add(&tctx->inflight, refill); in io_task_refs_refill()
1712 tctx->cached_refs += refill; in io_task_refs_refill()
1717 struct io_uring_task *tctx = current->io_uring; in io_get_task_refs() local
1719 tctx->cached_refs -= nr; in io_get_task_refs()
1720 if (unlikely(tctx->cached_refs < 0)) in io_get_task_refs()
1721 io_task_refs_refill(tctx); in io_get_task_refs()
2126 struct io_uring_task *tctx = container_of(cb, struct io_uring_task, in tctx_task_work() local
2132 if (!tctx->task_list.first && locked && ctx->submit_state.compl_nr) in tctx_task_work()
2135 spin_lock_irq(&tctx->task_lock); in tctx_task_work()
2136 node = tctx->task_list.first; in tctx_task_work()
2137 INIT_WQ_LIST(&tctx->task_list); in tctx_task_work()
2139 tctx->task_running = false; in tctx_task_work()
2140 spin_unlock_irq(&tctx->task_lock); in tctx_task_work()
2169 struct io_uring_task *tctx = tsk->io_uring; in io_req_task_work_add() local
2175 WARN_ON_ONCE(!tctx); in io_req_task_work_add()
2177 spin_lock_irqsave(&tctx->task_lock, flags); in io_req_task_work_add()
2178 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list); in io_req_task_work_add()
2179 running = tctx->task_running; in io_req_task_work_add()
2181 tctx->task_running = true; in io_req_task_work_add()
2182 spin_unlock_irqrestore(&tctx->task_lock, flags); in io_req_task_work_add()
2195 if (!task_work_add(tsk, &tctx->task_work, notify)) { in io_req_task_work_add()
2200 spin_lock_irqsave(&tctx->task_lock, flags); in io_req_task_work_add()
2201 tctx->task_running = false; in io_req_task_work_add()
2202 node = tctx->task_list.first; in io_req_task_work_add()
2203 INIT_WQ_LIST(&tctx->task_list); in io_req_task_work_add()
2204 spin_unlock_irqrestore(&tctx->task_lock, flags); in io_req_task_work_add()
6229 static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data, in io_async_cancel_one() argument
6236 if (!tctx || !tctx->io_wq) in io_async_cancel_one()
6239 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false); in io_async_cancel_one()
6308 struct io_uring_task *tctx = node->task->io_uring; in io_async_cancel() local
6310 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx); in io_async_cancel()
6625 struct io_uring_task *tctx = req->task->io_uring; in io_clean_op() local
6627 atomic_dec(&tctx->inflight_tracked); in io_clean_op()
8561 struct io_uring_task *tctx; in io_uring_alloc_task_context() local
8564 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL); in io_uring_alloc_task_context()
8565 if (unlikely(!tctx)) in io_uring_alloc_task_context()
8568 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL); in io_uring_alloc_task_context()
8570 kfree(tctx); in io_uring_alloc_task_context()
8574 tctx->io_wq = io_init_wq_offload(ctx, task); in io_uring_alloc_task_context()
8575 if (IS_ERR(tctx->io_wq)) { in io_uring_alloc_task_context()
8576 ret = PTR_ERR(tctx->io_wq); in io_uring_alloc_task_context()
8577 percpu_counter_destroy(&tctx->inflight); in io_uring_alloc_task_context()
8578 kfree(tctx); in io_uring_alloc_task_context()
8582 xa_init(&tctx->xa); in io_uring_alloc_task_context()
8583 init_waitqueue_head(&tctx->wait); in io_uring_alloc_task_context()
8584 atomic_set(&tctx->in_idle, 0); in io_uring_alloc_task_context()
8585 atomic_set(&tctx->inflight_tracked, 0); in io_uring_alloc_task_context()
8586 task->io_uring = tctx; in io_uring_alloc_task_context()
8587 spin_lock_init(&tctx->task_lock); in io_uring_alloc_task_context()
8588 INIT_WQ_LIST(&tctx->task_list); in io_uring_alloc_task_context()
8589 init_task_work(&tctx->task_work, tctx_task_work); in io_uring_alloc_task_context()
8595 struct io_uring_task *tctx = tsk->io_uring; in __io_uring_free() local
8597 WARN_ON_ONCE(!xa_empty(&tctx->xa)); in __io_uring_free()
8598 WARN_ON_ONCE(tctx->io_wq); in __io_uring_free()
8599 WARN_ON_ONCE(tctx->cached_refs); in __io_uring_free()
8601 percpu_counter_destroy(&tctx->inflight); in __io_uring_free()
8602 kfree(tctx); in __io_uring_free()
9361 struct io_uring_task *tctx = current->io_uring; in io_tctx_exit_cb() local
9369 if (!atomic_read(&tctx->in_idle)) in io_tctx_exit_cb()
9569 struct io_uring_task *tctx = node->task->io_uring; in io_uring_try_cancel_iowq() local
9575 if (!tctx || !tctx->io_wq) in io_uring_try_cancel_iowq()
9577 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true); in io_uring_try_cancel_iowq()
9590 struct io_uring_task *tctx = task ? task->io_uring : NULL; in io_uring_try_cancel_requests() local
9598 } else if (tctx && tctx->io_wq) { in io_uring_try_cancel_requests()
9603 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb, in io_uring_try_cancel_requests()
9630 struct io_uring_task *tctx = current->io_uring; in __io_uring_add_tctx_node() local
9634 if (unlikely(!tctx)) { in __io_uring_add_tctx_node()
9639 tctx = current->io_uring; in __io_uring_add_tctx_node()
9644 ret = io_wq_max_workers(tctx->io_wq, limits); in __io_uring_add_tctx_node()
9649 if (!xa_load(&tctx->xa, (unsigned long)ctx)) { in __io_uring_add_tctx_node()
9656 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx, in __io_uring_add_tctx_node()
9667 tctx->last = ctx; in __io_uring_add_tctx_node()
9676 struct io_uring_task *tctx = current->io_uring; in io_uring_add_tctx_node() local
9678 if (likely(tctx && tctx->last == ctx)) in io_uring_add_tctx_node()
9688 struct io_uring_task *tctx = current->io_uring; in io_uring_del_tctx_node() local
9691 if (!tctx) in io_uring_del_tctx_node()
9693 node = xa_erase(&tctx->xa, index); in io_uring_del_tctx_node()
9704 if (tctx->last == node->ctx) in io_uring_del_tctx_node()
9705 tctx->last = NULL; in io_uring_del_tctx_node()
9709 static void io_uring_clean_tctx(struct io_uring_task *tctx) in io_uring_clean_tctx() argument
9711 struct io_wq *wq = tctx->io_wq; in io_uring_clean_tctx()
9715 xa_for_each(&tctx->xa, index, node) { in io_uring_clean_tctx()
9725 tctx->io_wq = NULL; in io_uring_clean_tctx()
9729 static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked) in tctx_inflight() argument
9732 return atomic_read(&tctx->inflight_tracked); in tctx_inflight()
9733 return percpu_counter_sum(&tctx->inflight); in tctx_inflight()
9738 struct io_uring_task *tctx = task->io_uring; in io_uring_drop_tctx_refs() local
9739 unsigned int refs = tctx->cached_refs; in io_uring_drop_tctx_refs()
9742 tctx->cached_refs = 0; in io_uring_drop_tctx_refs()
9743 percpu_counter_sub(&tctx->inflight, refs); in io_uring_drop_tctx_refs()
9754 struct io_uring_task *tctx = current->io_uring; in io_uring_cancel_generic() local
9763 if (tctx->io_wq) in io_uring_cancel_generic()
9764 io_wq_exit_start(tctx->io_wq); in io_uring_cancel_generic()
9766 atomic_inc(&tctx->in_idle); in io_uring_cancel_generic()
9770 inflight = tctx_inflight(tctx, !cancel_all); in io_uring_cancel_generic()
9778 xa_for_each(&tctx->xa, index, node) { in io_uring_cancel_generic()
9791 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE); in io_uring_cancel_generic()
9798 if (inflight == tctx_inflight(tctx, !cancel_all)) in io_uring_cancel_generic()
9800 finish_wait(&tctx->wait, &wait); in io_uring_cancel_generic()
9802 atomic_dec(&tctx->in_idle); in io_uring_cancel_generic()
9804 io_uring_clean_tctx(tctx); in io_uring_cancel_generic()
10616 struct io_uring_task *tctx = current->io_uring; in io_register_iowq_aff() local
10620 if (!tctx || !tctx->io_wq) in io_register_iowq_aff()
10635 ret = io_wq_cpu_affinity(tctx->io_wq, new_mask); in io_register_iowq_aff()
10642 struct io_uring_task *tctx = current->io_uring; in io_unregister_iowq_aff() local
10644 if (!tctx || !tctx->io_wq) in io_unregister_iowq_aff()
10647 return io_wq_cpu_affinity(tctx->io_wq, NULL); in io_unregister_iowq_aff()
10655 struct io_uring_task *tctx = NULL; in io_register_iowq_max_workers() local
10679 tctx = sqd->thread->io_uring; in io_register_iowq_max_workers()
10682 tctx = current->io_uring; in io_register_iowq_max_workers()
10691 if (tctx && tctx->io_wq) { in io_register_iowq_max_workers()
10692 ret = io_wq_max_workers(tctx->io_wq, new_count); in io_register_iowq_max_workers()
10713 struct io_uring_task *tctx = node->task->io_uring; in io_register_iowq_max_workers() local
10715 if (WARN_ON_ONCE(!tctx->io_wq)) in io_register_iowq_max_workers()
10721 (void)io_wq_max_workers(tctx->io_wq, new_count); in io_register_iowq_max_workers()