Lines Matching refs:sqd

27 void io_sq_thread_unpark(struct io_sq_data *sqd)  in io_sq_thread_unpark()  argument
28 __releases(&sqd->lock) in io_sq_thread_unpark()
30 WARN_ON_ONCE(sqd->thread == current); in io_sq_thread_unpark()
36 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); in io_sq_thread_unpark()
37 if (atomic_dec_return(&sqd->park_pending)) in io_sq_thread_unpark()
38 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); in io_sq_thread_unpark()
39 mutex_unlock(&sqd->lock); in io_sq_thread_unpark()
42 void io_sq_thread_park(struct io_sq_data *sqd) in io_sq_thread_park() argument
43 __acquires(&sqd->lock) in io_sq_thread_park()
45 WARN_ON_ONCE(sqd->thread == current); in io_sq_thread_park()
47 atomic_inc(&sqd->park_pending); in io_sq_thread_park()
48 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); in io_sq_thread_park()
49 mutex_lock(&sqd->lock); in io_sq_thread_park()
50 if (sqd->thread) in io_sq_thread_park()
51 wake_up_process(sqd->thread); in io_sq_thread_park()
54 void io_sq_thread_stop(struct io_sq_data *sqd) in io_sq_thread_stop() argument
56 WARN_ON_ONCE(sqd->thread == current); in io_sq_thread_stop()
57 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)); in io_sq_thread_stop()
59 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); in io_sq_thread_stop()
60 mutex_lock(&sqd->lock); in io_sq_thread_stop()
61 if (sqd->thread) in io_sq_thread_stop()
62 wake_up_process(sqd->thread); in io_sq_thread_stop()
63 mutex_unlock(&sqd->lock); in io_sq_thread_stop()
64 wait_for_completion(&sqd->exited); in io_sq_thread_stop()
67 void io_put_sq_data(struct io_sq_data *sqd) in io_put_sq_data() argument
69 if (refcount_dec_and_test(&sqd->refs)) { in io_put_sq_data()
70 WARN_ON_ONCE(atomic_read(&sqd->park_pending)); in io_put_sq_data()
72 io_sq_thread_stop(sqd); in io_put_sq_data()
73 kfree(sqd); in io_put_sq_data()
77 static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd) in io_sqd_update_thread_idle() argument
82 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_sqd_update_thread_idle()
84 sqd->sq_thread_idle = sq_thread_idle; in io_sqd_update_thread_idle()
89 struct io_sq_data *sqd = ctx->sq_data; in io_sq_thread_finish() local
91 if (sqd) { in io_sq_thread_finish()
92 io_sq_thread_park(sqd); in io_sq_thread_finish()
94 io_sqd_update_thread_idle(sqd); in io_sq_thread_finish()
95 io_sq_thread_unpark(sqd); in io_sq_thread_finish()
97 io_put_sq_data(sqd); in io_sq_thread_finish()
105 struct io_sq_data *sqd; in io_attach_sq_data() local
117 sqd = ctx_attach->sq_data; in io_attach_sq_data()
118 if (!sqd) { in io_attach_sq_data()
122 if (sqd->task_tgid != current->tgid) { in io_attach_sq_data()
127 refcount_inc(&sqd->refs); in io_attach_sq_data()
129 return sqd; in io_attach_sq_data()
135 struct io_sq_data *sqd; in io_get_sq_data() local
139 sqd = io_attach_sq_data(p); in io_get_sq_data()
140 if (!IS_ERR(sqd)) { in io_get_sq_data()
142 return sqd; in io_get_sq_data()
145 if (PTR_ERR(sqd) != -EPERM) in io_get_sq_data()
146 return sqd; in io_get_sq_data()
149 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL); in io_get_sq_data()
150 if (!sqd) in io_get_sq_data()
153 atomic_set(&sqd->park_pending, 0); in io_get_sq_data()
154 refcount_set(&sqd->refs, 1); in io_get_sq_data()
155 INIT_LIST_HEAD(&sqd->ctx_list); in io_get_sq_data()
156 mutex_init(&sqd->lock); in io_get_sq_data()
157 init_waitqueue_head(&sqd->wait); in io_get_sq_data()
158 init_completion(&sqd->exited); in io_get_sq_data()
159 return sqd; in io_get_sq_data()
162 static inline bool io_sqd_events_pending(struct io_sq_data *sqd) in io_sqd_events_pending() argument
164 return READ_ONCE(sqd->state); in io_sqd_events_pending()
205 static bool io_sqd_handle_event(struct io_sq_data *sqd) in io_sqd_handle_event() argument
210 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || in io_sqd_handle_event()
212 mutex_unlock(&sqd->lock); in io_sqd_handle_event()
216 mutex_lock(&sqd->lock); in io_sqd_handle_event()
218 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); in io_sqd_handle_event()
223 struct io_sq_data *sqd = data; in io_sq_thread() local
229 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid); in io_sq_thread()
232 if (sqd->sq_cpu != -1) in io_sq_thread()
233 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu)); in io_sq_thread()
237 mutex_lock(&sqd->lock); in io_sq_thread()
241 if (io_sqd_events_pending(sqd) || signal_pending(current)) { in io_sq_thread()
242 if (io_sqd_handle_event(sqd)) in io_sq_thread()
244 timeout = jiffies + sqd->sq_thread_idle; in io_sq_thread()
247 cap_entries = !list_is_singular(&sqd->ctx_list); in io_sq_thread()
248 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { in io_sq_thread()
259 timeout = jiffies + sqd->sq_thread_idle; in io_sq_thread()
261 mutex_unlock(&sqd->lock); in io_sq_thread()
263 mutex_lock(&sqd->lock); in io_sq_thread()
268 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE); in io_sq_thread()
269 if (!io_sqd_events_pending(sqd) && !task_work_pending(current)) { in io_sq_thread()
272 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { in io_sq_thread()
294 mutex_unlock(&sqd->lock); in io_sq_thread()
296 mutex_lock(&sqd->lock); in io_sq_thread()
298 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_sq_thread()
303 finish_wait(&sqd->wait, &wait); in io_sq_thread()
304 timeout = jiffies + sqd->sq_thread_idle; in io_sq_thread()
307 io_uring_cancel_generic(true, sqd); in io_sq_thread()
308 sqd->thread = NULL; in io_sq_thread()
309 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_sq_thread()
312 mutex_unlock(&sqd->lock); in io_sq_thread()
314 complete(&sqd->exited); in io_sq_thread()
356 struct io_sq_data *sqd; in io_sq_offload_create() local
363 sqd = io_get_sq_data(p, &attached); in io_sq_offload_create()
364 if (IS_ERR(sqd)) { in io_sq_offload_create()
365 ret = PTR_ERR(sqd); in io_sq_offload_create()
370 ctx->sq_data = sqd; in io_sq_offload_create()
375 io_sq_thread_park(sqd); in io_sq_offload_create()
376 list_add(&ctx->sqd_list, &sqd->ctx_list); in io_sq_offload_create()
377 io_sqd_update_thread_idle(sqd); in io_sq_offload_create()
379 ret = (attached && !sqd->thread) ? -ENXIO : 0; in io_sq_offload_create()
380 io_sq_thread_unpark(sqd); in io_sq_offload_create()
393 sqd->sq_cpu = cpu; in io_sq_offload_create()
395 sqd->sq_cpu = -1; in io_sq_offload_create()
398 sqd->task_pid = current->pid; in io_sq_offload_create()
399 sqd->task_tgid = current->tgid; in io_sq_offload_create()
400 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE); in io_sq_offload_create()
406 sqd->thread = tsk; in io_sq_offload_create()
428 struct io_sq_data *sqd = ctx->sq_data; in io_sqpoll_wq_cpu_affinity() local
431 if (sqd) { in io_sqpoll_wq_cpu_affinity()
432 io_sq_thread_park(sqd); in io_sqpoll_wq_cpu_affinity()
434 if (sqd->thread) in io_sqpoll_wq_cpu_affinity()
435 ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask); in io_sqpoll_wq_cpu_affinity()
436 io_sq_thread_unpark(sqd); in io_sqpoll_wq_cpu_affinity()