Lines Matching refs:worker
144 static void io_wqe_dec_running(struct io_worker *worker);
151 static bool io_worker_get(struct io_worker *worker) in io_worker_get() argument
153 return refcount_inc_not_zero(&worker->ref); in io_worker_get()
156 static void io_worker_release(struct io_worker *worker) in io_worker_release() argument
158 if (refcount_dec_and_test(&worker->ref)) in io_worker_release()
159 complete(&worker->ref_done); in io_worker_release()
173 static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker) in io_wqe_get_acct() argument
175 return io_get_acct(worker->wqe, worker->flags & IO_WORKER_F_BOUND); in io_wqe_get_acct()
184 static void io_worker_cancel_cb(struct io_worker *worker) in io_worker_cancel_cb() argument
186 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_worker_cancel_cb()
187 struct io_wqe *wqe = worker->wqe; in io_worker_cancel_cb()
191 raw_spin_lock(&worker->wqe->lock); in io_worker_cancel_cb()
193 raw_spin_unlock(&worker->wqe->lock); in io_worker_cancel_cb()
195 clear_bit_unlock(0, &worker->create_state); in io_worker_cancel_cb()
196 io_worker_release(worker); in io_worker_cancel_cb()
201 struct io_worker *worker; in io_task_worker_match() local
205 worker = container_of(cb, struct io_worker, create_work); in io_task_worker_match()
206 return worker == data; in io_task_worker_match()
209 static void io_worker_exit(struct io_worker *worker) in io_worker_exit() argument
211 struct io_wqe *wqe = worker->wqe; in io_worker_exit()
216 io_task_worker_match, worker); in io_worker_exit()
220 io_worker_cancel_cb(worker); in io_worker_exit()
223 io_worker_release(worker); in io_worker_exit()
224 wait_for_completion(&worker->ref_done); in io_worker_exit()
227 if (worker->flags & IO_WORKER_F_FREE) in io_worker_exit()
228 hlist_nulls_del_rcu(&worker->nulls_node); in io_worker_exit()
229 list_del_rcu(&worker->all_list); in io_worker_exit()
231 io_wqe_dec_running(worker); in io_worker_exit()
232 worker->flags = 0; in io_worker_exit()
237 kfree_rcu(worker, rcu); in io_worker_exit()
264 struct io_worker *worker; in io_wqe_activate_free_worker() local
271 hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) { in io_wqe_activate_free_worker()
272 if (!io_worker_get(worker)) in io_wqe_activate_free_worker()
274 if (io_wqe_get_acct(worker) != acct) { in io_wqe_activate_free_worker()
275 io_worker_release(worker); in io_wqe_activate_free_worker()
278 if (wake_up_process(worker->task)) { in io_wqe_activate_free_worker()
279 io_worker_release(worker); in io_wqe_activate_free_worker()
282 io_worker_release(worker); in io_wqe_activate_free_worker()
313 static void io_wqe_inc_running(struct io_worker *worker) in io_wqe_inc_running() argument
315 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_wqe_inc_running()
322 struct io_worker *worker; in create_worker_cb() local
328 worker = container_of(cb, struct io_worker, create_work); in create_worker_cb()
329 wqe = worker->wqe; in create_worker_cb()
331 acct = &wqe->acct[worker->create_index]; in create_worker_cb()
339 create_io_worker(wq, wqe, worker->create_index); in create_worker_cb()
344 clear_bit_unlock(0, &worker->create_state); in create_worker_cb()
345 io_worker_release(worker); in create_worker_cb()
348 static bool io_queue_worker_create(struct io_worker *worker, in io_queue_worker_create() argument
352 struct io_wqe *wqe = worker->wqe; in io_queue_worker_create()
358 if (!io_worker_get(worker)) in io_queue_worker_create()
366 if (test_bit(0, &worker->create_state) || in io_queue_worker_create()
367 test_and_set_bit_lock(0, &worker->create_state)) in io_queue_worker_create()
371 init_task_work(&worker->create_work, func); in io_queue_worker_create()
372 worker->create_index = acct->index; in io_queue_worker_create()
373 if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) { in io_queue_worker_create()
386 clear_bit_unlock(0, &worker->create_state); in io_queue_worker_create()
388 io_worker_release(worker); in io_queue_worker_create()
395 static void io_wqe_dec_running(struct io_worker *worker) in io_wqe_dec_running() argument
397 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_wqe_dec_running()
398 struct io_wqe *wqe = worker->wqe; in io_wqe_dec_running()
400 if (!(worker->flags & IO_WORKER_F_UP)) in io_wqe_dec_running()
410 io_queue_worker_create(worker, acct, create_worker_cb); in io_wqe_dec_running()
417 static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker) in __io_worker_busy() argument
419 if (worker->flags & IO_WORKER_F_FREE) { in __io_worker_busy()
420 worker->flags &= ~IO_WORKER_F_FREE; in __io_worker_busy()
422 hlist_nulls_del_init_rcu(&worker->nulls_node); in __io_worker_busy()
434 static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker) in __io_worker_idle() argument
437 if (!(worker->flags & IO_WORKER_F_FREE)) { in __io_worker_idle()
438 worker->flags |= IO_WORKER_F_FREE; in __io_worker_idle()
439 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); in __io_worker_idle()
467 struct io_worker *worker) in io_get_next_work() argument
473 struct io_wqe *wqe = worker->wqe; in io_get_next_work()
523 static void io_assign_current_work(struct io_worker *worker, in io_assign_current_work() argument
531 raw_spin_lock(&worker->lock); in io_assign_current_work()
532 worker->cur_work = work; in io_assign_current_work()
533 worker->next_work = NULL; in io_assign_current_work()
534 raw_spin_unlock(&worker->lock); in io_assign_current_work()
539 static void io_worker_handle_work(struct io_worker *worker) in io_worker_handle_work() argument
541 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_worker_handle_work()
542 struct io_wqe *wqe = worker->wqe; in io_worker_handle_work()
557 work = io_get_next_work(acct, worker); in io_worker_handle_work()
560 __io_worker_busy(wqe, worker); in io_worker_handle_work()
569 raw_spin_lock(&worker->lock); in io_worker_handle_work()
570 worker->next_work = work; in io_worker_handle_work()
571 raw_spin_unlock(&worker->lock); in io_worker_handle_work()
575 io_assign_current_work(worker, work); in io_worker_handle_work()
588 io_assign_current_work(worker, NULL); in io_worker_handle_work()
596 io_assign_current_work(worker, work); in io_worker_handle_work()
615 struct io_worker *worker = data; in io_wqe_worker() local
616 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_wqe_worker()
617 struct io_wqe *wqe = worker->wqe; in io_wqe_worker()
622 worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING); in io_wqe_worker()
632 io_worker_handle_work(worker); in io_wqe_worker()
643 __io_worker_idle(wqe, worker); in io_wqe_worker()
659 io_worker_handle_work(worker); in io_wqe_worker()
661 io_worker_exit(worker); in io_wqe_worker()
670 struct io_worker *worker = tsk->worker_private; in io_wq_worker_running() local
672 if (!worker) in io_wq_worker_running()
674 if (!(worker->flags & IO_WORKER_F_UP)) in io_wq_worker_running()
676 if (worker->flags & IO_WORKER_F_RUNNING) in io_wq_worker_running()
678 worker->flags |= IO_WORKER_F_RUNNING; in io_wq_worker_running()
679 io_wqe_inc_running(worker); in io_wq_worker_running()
688 struct io_worker *worker = tsk->worker_private; in io_wq_worker_sleeping() local
690 if (!worker) in io_wq_worker_sleeping()
692 if (!(worker->flags & IO_WORKER_F_UP)) in io_wq_worker_sleeping()
694 if (!(worker->flags & IO_WORKER_F_RUNNING)) in io_wq_worker_sleeping()
697 worker->flags &= ~IO_WORKER_F_RUNNING; in io_wq_worker_sleeping()
698 io_wqe_dec_running(worker); in io_wq_worker_sleeping()
701 static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker, in io_init_new_worker() argument
704 tsk->worker_private = worker; in io_init_new_worker()
705 worker->task = tsk; in io_init_new_worker()
710 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); in io_init_new_worker()
711 list_add_tail_rcu(&worker->all_list, &wqe->all_list); in io_init_new_worker()
712 worker->flags |= IO_WORKER_F_FREE; in io_init_new_worker()
744 struct io_worker *worker; in create_worker_cont() local
748 worker = container_of(cb, struct io_worker, create_work); in create_worker_cont()
749 clear_bit_unlock(0, &worker->create_state); in create_worker_cont()
750 wqe = worker->wqe; in create_worker_cont()
751 tsk = create_io_thread(io_wqe_worker, worker, wqe->node); in create_worker_cont()
753 io_init_new_worker(wqe, worker, tsk); in create_worker_cont()
754 io_worker_release(worker); in create_worker_cont()
757 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in create_worker_cont()
775 kfree(worker); in create_worker_cont()
780 io_worker_release(worker); in create_worker_cont()
781 schedule_work(&worker->work); in create_worker_cont()
786 struct io_worker *worker = container_of(work, struct io_worker, work); in io_workqueue_create() local
787 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_workqueue_create()
789 if (!io_queue_worker_create(worker, acct, create_worker_cont)) in io_workqueue_create()
790 kfree(worker); in io_workqueue_create()
796 struct io_worker *worker; in create_io_worker() local
801 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node); in create_io_worker()
802 if (!worker) { in create_io_worker()
812 refcount_set(&worker->ref, 1); in create_io_worker()
813 worker->wqe = wqe; in create_io_worker()
814 raw_spin_lock_init(&worker->lock); in create_io_worker()
815 init_completion(&worker->ref_done); in create_io_worker()
818 worker->flags |= IO_WORKER_F_BOUND; in create_io_worker()
820 tsk = create_io_thread(io_wqe_worker, worker, wqe->node); in create_io_worker()
822 io_init_new_worker(wqe, worker, tsk); in create_io_worker()
824 kfree(worker); in create_io_worker()
827 INIT_WORK(&worker->work, io_workqueue_create); in create_io_worker()
828 schedule_work(&worker->work); in create_io_worker()
842 struct io_worker *worker; in io_wq_for_each_worker() local
845 list_for_each_entry_rcu(worker, &wqe->all_list, all_list) { in io_wq_for_each_worker()
846 if (io_worker_get(worker)) { in io_wq_for_each_worker()
848 if (worker->task) in io_wq_for_each_worker()
849 ret = func(worker, data); in io_wq_for_each_worker()
850 io_worker_release(worker); in io_wq_for_each_worker()
859 static bool io_wq_worker_wake(struct io_worker *worker, void *data) in io_wq_worker_wake() argument
861 __set_notify_signal(worker->task); in io_wq_worker_wake()
862 wake_up_process(worker->task); in io_wq_worker_wake()
975 static bool __io_wq_worker_cancel(struct io_worker *worker, in __io_wq_worker_cancel() argument
981 __set_notify_signal(worker->task); in __io_wq_worker_cancel()
988 static bool io_wq_worker_cancel(struct io_worker *worker, void *data) in io_wq_worker_cancel() argument
996 raw_spin_lock(&worker->lock); in io_wq_worker_cancel()
997 if (__io_wq_worker_cancel(worker, match, worker->cur_work) || in io_wq_worker_cancel()
998 __io_wq_worker_cancel(worker, match, worker->next_work)) in io_wq_worker_cancel()
1000 raw_spin_unlock(&worker->lock); in io_wq_worker_cancel()
1211 struct io_worker *worker; in io_task_work_match() local
1215 worker = container_of(cb, struct io_worker, create_work); in io_task_work_match()
1216 return worker->wqe->wq == data; in io_task_work_match()
1229 struct io_worker *worker; in io_wq_cancel_tw_create() local
1231 worker = container_of(cb, struct io_worker, create_work); in io_wq_cancel_tw_create()
1232 io_worker_cancel_cb(worker); in io_wq_cancel_tw_create()
1297 static bool io_wq_worker_affinity(struct io_worker *worker, void *data) in io_wq_worker_affinity() argument
1302 cpumask_set_cpu(od->cpu, worker->wqe->cpu_mask); in io_wq_worker_affinity()
1304 cpumask_clear_cpu(od->cpu, worker->wqe->cpu_mask); in io_wq_worker_affinity()