Lines Matching refs:tctx
1100 struct io_uring_task *tctx = current->io_uring; in io_req_init_async() local
1108 req->work.identity = tctx->identity; in io_req_init_async()
1109 if (tctx->identity != &tctx->__identity) in io_req_init_async()
1216 static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req) in io_put_identity() argument
1218 if (req->work.identity == &tctx->__identity) in io_put_identity()
1266 struct io_uring_task *tctx = current->io_uring; in io_identity_cow() local
1293 if (tctx->identity != &tctx->__identity && in io_identity_cow()
1294 refcount_dec_and_test(&tctx->identity->count)) in io_identity_cow()
1295 kfree(tctx->identity); in io_identity_cow()
1296 if (req->work.identity != &tctx->__identity && in io_identity_cow()
1301 tctx->identity = id; in io_identity_cow()
1868 struct io_uring_task *tctx = req->task->io_uring; in __io_free_req() local
1873 percpu_counter_dec(&tctx->inflight); in __io_free_req()
1874 if (atomic_read(&tctx->in_idle)) in __io_free_req()
1875 wake_up(&tctx->wait); in __io_free_req()
2132 struct io_uring_task *tctx = rb->task->io_uring; in io_req_free_batch_finish() local
2134 percpu_counter_sub(&tctx->inflight, rb->task_refs); in io_req_free_batch_finish()
2151 struct io_uring_task *tctx = rb->task->io_uring; in io_req_free_batch() local
2153 percpu_counter_sub(&tctx->inflight, rb->task_refs); in io_req_free_batch()
6604 struct io_uring_task *tctx = current->io_uring; in io_submit_sqes() local
6608 percpu_counter_sub(&tctx->inflight, unused); in io_submit_sqes()
7740 struct io_uring_task *tctx; in io_uring_alloc_task_context() local
7743 tctx = kmalloc(sizeof(*tctx), GFP_KERNEL); in io_uring_alloc_task_context()
7744 if (unlikely(!tctx)) in io_uring_alloc_task_context()
7747 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL); in io_uring_alloc_task_context()
7749 kfree(tctx); in io_uring_alloc_task_context()
7753 xa_init(&tctx->xa); in io_uring_alloc_task_context()
7754 init_waitqueue_head(&tctx->wait); in io_uring_alloc_task_context()
7755 tctx->last = NULL; in io_uring_alloc_task_context()
7756 atomic_set(&tctx->in_idle, 0); in io_uring_alloc_task_context()
7757 tctx->sqpoll = false; in io_uring_alloc_task_context()
7758 io_init_identity(&tctx->__identity); in io_uring_alloc_task_context()
7759 tctx->identity = &tctx->__identity; in io_uring_alloc_task_context()
7760 task->io_uring = tctx; in io_uring_alloc_task_context()
7766 struct io_uring_task *tctx = tsk->io_uring; in __io_uring_free() local
7768 WARN_ON_ONCE(!xa_empty(&tctx->xa)); in __io_uring_free()
7769 WARN_ON_ONCE(refcount_read(&tctx->identity->count) != 1); in __io_uring_free()
7770 if (tctx->identity != &tctx->__identity) in __io_uring_free()
7771 kfree(tctx->identity); in __io_uring_free()
7772 percpu_counter_destroy(&tctx->inflight); in __io_uring_free()
7773 kfree(tctx); in __io_uring_free()
8694 struct io_uring_task *tctx = current->io_uring; in io_uring_add_task_file() local
8696 if (unlikely(!tctx)) { in io_uring_add_task_file()
8702 tctx = current->io_uring; in io_uring_add_task_file()
8704 if (tctx->last != file) { in io_uring_add_task_file()
8705 void *old = xa_load(&tctx->xa, (unsigned long)file); in io_uring_add_task_file()
8709 xa_store(&tctx->xa, (unsigned long)file, file, GFP_KERNEL); in io_uring_add_task_file()
8711 tctx->last = file; in io_uring_add_task_file()
8719 if (!tctx->sqpoll && (ctx->flags & IORING_SETUP_SQPOLL)) in io_uring_add_task_file()
8720 tctx->sqpoll = true; in io_uring_add_task_file()
8730 struct io_uring_task *tctx = current->io_uring; in io_uring_del_task_file() local
8732 if (tctx->last == file) in io_uring_del_task_file()
8733 tctx->last = NULL; in io_uring_del_task_file()
8734 file = xa_erase(&tctx->xa, (unsigned long)file); in io_uring_del_task_file()
8758 struct io_uring_task *tctx = current->io_uring; in __io_uring_files_cancel() local
8763 atomic_inc(&tctx->in_idle); in __io_uring_files_cancel()
8765 xa_for_each(&tctx->xa, index, file) { in __io_uring_files_cancel()
8773 atomic_dec(&tctx->in_idle); in __io_uring_files_cancel()
8776 static s64 tctx_inflight(struct io_uring_task *tctx) in tctx_inflight() argument
8782 inflight = percpu_counter_sum(&tctx->inflight); in tctx_inflight()
8783 if (!tctx->sqpoll) in tctx_inflight()
8790 xa_for_each(&tctx->xa, index, file) { in tctx_inflight()
8809 struct io_uring_task *tctx = current->io_uring; in __io_uring_task_cancel() local
8814 atomic_inc(&tctx->in_idle); in __io_uring_task_cancel()
8818 inflight = tctx_inflight(tctx); in __io_uring_task_cancel()
8823 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE); in __io_uring_task_cancel()
8829 if (inflight != tctx_inflight(tctx)) in __io_uring_task_cancel()
8834 finish_wait(&tctx->wait, &wait); in __io_uring_task_cancel()
8835 atomic_dec(&tctx->in_idle); in __io_uring_task_cancel()