Lines Matching refs:worker

137 static void io_wq_dec_running(struct io_worker *worker);
144 static bool io_worker_get(struct io_worker *worker) in io_worker_get() argument
146 return refcount_inc_not_zero(&worker->ref); in io_worker_get()
149 static void io_worker_release(struct io_worker *worker) in io_worker_release() argument
151 if (refcount_dec_and_test(&worker->ref)) in io_worker_release()
152 complete(&worker->ref_done); in io_worker_release()
166 static inline struct io_wq_acct *io_wq_get_acct(struct io_worker *worker) in io_wq_get_acct() argument
168 return io_get_acct(worker->wq, worker->flags & IO_WORKER_F_BOUND); in io_wq_get_acct()
179 struct io_worker *worker = current->worker_private; in io_wq_worker_stopped() local
184 return test_bit(IO_WQ_BIT_EXIT, &worker->wq->state); in io_wq_worker_stopped()
187 static void io_worker_cancel_cb(struct io_worker *worker) in io_worker_cancel_cb() argument
189 struct io_wq_acct *acct = io_wq_get_acct(worker); in io_worker_cancel_cb()
190 struct io_wq *wq = worker->wq; in io_worker_cancel_cb()
197 clear_bit_unlock(0, &worker->create_state); in io_worker_cancel_cb()
198 io_worker_release(worker); in io_worker_cancel_cb()
203 struct io_worker *worker; in io_task_worker_match() local
207 worker = container_of(cb, struct io_worker, create_work); in io_task_worker_match()
208 return worker == data; in io_task_worker_match()
211 static void io_worker_exit(struct io_worker *worker) in io_worker_exit() argument
213 struct io_wq *wq = worker->wq; in io_worker_exit()
217 io_task_worker_match, worker); in io_worker_exit()
221 io_worker_cancel_cb(worker); in io_worker_exit()
224 io_worker_release(worker); in io_worker_exit()
225 wait_for_completion(&worker->ref_done); in io_worker_exit()
228 if (worker->flags & IO_WORKER_F_FREE) in io_worker_exit()
229 hlist_nulls_del_rcu(&worker->nulls_node); in io_worker_exit()
230 list_del_rcu(&worker->all_list); in io_worker_exit()
232 io_wq_dec_running(worker); in io_worker_exit()
240 kfree_rcu(worker, rcu); in io_worker_exit()
275 struct io_worker *worker; in io_wq_activate_free_worker() local
282 hlist_nulls_for_each_entry_rcu(worker, n, &wq->free_list, nulls_node) { in io_wq_activate_free_worker()
283 if (!io_worker_get(worker)) in io_wq_activate_free_worker()
285 if (io_wq_get_acct(worker) != acct) { in io_wq_activate_free_worker()
286 io_worker_release(worker); in io_wq_activate_free_worker()
294 wake_up_process(worker->task); in io_wq_activate_free_worker()
295 io_worker_release(worker); in io_wq_activate_free_worker()
327 static void io_wq_inc_running(struct io_worker *worker) in io_wq_inc_running() argument
329 struct io_wq_acct *acct = io_wq_get_acct(worker); in io_wq_inc_running()
336 struct io_worker *worker; in create_worker_cb() local
342 worker = container_of(cb, struct io_worker, create_work); in create_worker_cb()
343 wq = worker->wq; in create_worker_cb()
344 acct = &wq->acct[worker->create_index]; in create_worker_cb()
353 create_io_worker(wq, worker->create_index); in create_worker_cb()
358 clear_bit_unlock(0, &worker->create_state); in create_worker_cb()
359 io_worker_release(worker); in create_worker_cb()
362 static bool io_queue_worker_create(struct io_worker *worker, in io_queue_worker_create() argument
366 struct io_wq *wq = worker->wq; in io_queue_worker_create()
371 if (!io_worker_get(worker)) in io_queue_worker_create()
379 if (test_bit(0, &worker->create_state) || in io_queue_worker_create()
380 test_and_set_bit_lock(0, &worker->create_state)) in io_queue_worker_create()
384 init_task_work(&worker->create_work, func); in io_queue_worker_create()
385 worker->create_index = acct->index; in io_queue_worker_create()
386 if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) { in io_queue_worker_create()
399 clear_bit_unlock(0, &worker->create_state); in io_queue_worker_create()
401 io_worker_release(worker); in io_queue_worker_create()
408 static void io_wq_dec_running(struct io_worker *worker) in io_wq_dec_running() argument
410 struct io_wq_acct *acct = io_wq_get_acct(worker); in io_wq_dec_running()
411 struct io_wq *wq = worker->wq; in io_wq_dec_running()
413 if (!(worker->flags & IO_WORKER_F_UP)) in io_wq_dec_running()
424 io_queue_worker_create(worker, acct, create_worker_cb); in io_wq_dec_running()
431 static void __io_worker_busy(struct io_wq *wq, struct io_worker *worker) in __io_worker_busy() argument
433 if (worker->flags & IO_WORKER_F_FREE) { in __io_worker_busy()
434 worker->flags &= ~IO_WORKER_F_FREE; in __io_worker_busy()
436 hlist_nulls_del_init_rcu(&worker->nulls_node); in __io_worker_busy()
444 static void __io_worker_idle(struct io_wq *wq, struct io_worker *worker) in __io_worker_idle() argument
447 if (!(worker->flags & IO_WORKER_F_FREE)) { in __io_worker_idle()
448 worker->flags |= IO_WORKER_F_FREE; in __io_worker_idle()
449 hlist_nulls_add_head_rcu(&worker->nulls_node, &wq->free_list); in __io_worker_idle()
476 struct io_worker *worker) in io_get_next_work() argument
482 struct io_wq *wq = worker->wq; in io_get_next_work()
532 static void io_assign_current_work(struct io_worker *worker, in io_assign_current_work() argument
540 raw_spin_lock(&worker->lock); in io_assign_current_work()
541 worker->cur_work = work; in io_assign_current_work()
542 worker->next_work = NULL; in io_assign_current_work()
543 raw_spin_unlock(&worker->lock); in io_assign_current_work()
550 struct io_worker *worker) in io_worker_handle_work() argument
553 struct io_wq *wq = worker->wq; in io_worker_handle_work()
566 work = io_get_next_work(acct, worker); in io_worker_handle_work()
569 __io_worker_busy(wq, worker); in io_worker_handle_work()
578 raw_spin_lock(&worker->lock); in io_worker_handle_work()
579 worker->next_work = work; in io_worker_handle_work()
580 raw_spin_unlock(&worker->lock); in io_worker_handle_work()
584 io_assign_current_work(worker, work); in io_worker_handle_work()
597 io_assign_current_work(worker, NULL); in io_worker_handle_work()
605 io_assign_current_work(worker, work); in io_worker_handle_work()
628 struct io_worker *worker = data; in io_wq_worker() local
629 struct io_wq_acct *acct = io_wq_get_acct(worker); in io_wq_worker()
630 struct io_wq *wq = worker->wq; in io_wq_worker()
634 worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING); in io_wq_worker()
649 io_worker_handle_work(acct, worker); in io_wq_worker()
663 __io_worker_idle(wq, worker); in io_wq_worker()
683 io_worker_handle_work(acct, worker); in io_wq_worker()
685 io_worker_exit(worker); in io_wq_worker()
694 struct io_worker *worker = tsk->worker_private; in io_wq_worker_running() local
696 if (!worker) in io_wq_worker_running()
698 if (!(worker->flags & IO_WORKER_F_UP)) in io_wq_worker_running()
700 if (worker->flags & IO_WORKER_F_RUNNING) in io_wq_worker_running()
702 worker->flags |= IO_WORKER_F_RUNNING; in io_wq_worker_running()
703 io_wq_inc_running(worker); in io_wq_worker_running()
712 struct io_worker *worker = tsk->worker_private; in io_wq_worker_sleeping() local
714 if (!worker) in io_wq_worker_sleeping()
716 if (!(worker->flags & IO_WORKER_F_UP)) in io_wq_worker_sleeping()
718 if (!(worker->flags & IO_WORKER_F_RUNNING)) in io_wq_worker_sleeping()
721 worker->flags &= ~IO_WORKER_F_RUNNING; in io_wq_worker_sleeping()
722 io_wq_dec_running(worker); in io_wq_worker_sleeping()
725 static void io_init_new_worker(struct io_wq *wq, struct io_worker *worker, in io_init_new_worker() argument
728 tsk->worker_private = worker; in io_init_new_worker()
729 worker->task = tsk; in io_init_new_worker()
733 hlist_nulls_add_head_rcu(&worker->nulls_node, &wq->free_list); in io_init_new_worker()
734 list_add_tail_rcu(&worker->all_list, &wq->all_list); in io_init_new_worker()
735 worker->flags |= IO_WORKER_F_FREE; in io_init_new_worker()
767 struct io_worker *worker; in create_worker_cont() local
771 worker = container_of(cb, struct io_worker, create_work); in create_worker_cont()
772 clear_bit_unlock(0, &worker->create_state); in create_worker_cont()
773 wq = worker->wq; in create_worker_cont()
774 tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE); in create_worker_cont()
776 io_init_new_worker(wq, worker, tsk); in create_worker_cont()
777 io_worker_release(worker); in create_worker_cont()
780 struct io_wq_acct *acct = io_wq_get_acct(worker); in create_worker_cont()
798 kfree(worker); in create_worker_cont()
803 io_worker_release(worker); in create_worker_cont()
804 schedule_work(&worker->work); in create_worker_cont()
809 struct io_worker *worker = container_of(work, struct io_worker, work); in io_workqueue_create() local
810 struct io_wq_acct *acct = io_wq_get_acct(worker); in io_workqueue_create()
812 if (!io_queue_worker_create(worker, acct, create_worker_cont)) in io_workqueue_create()
813 kfree(worker); in io_workqueue_create()
819 struct io_worker *worker; in create_io_worker() local
824 worker = kzalloc(sizeof(*worker), GFP_KERNEL); in create_io_worker()
825 if (!worker) { in create_io_worker()
835 refcount_set(&worker->ref, 1); in create_io_worker()
836 worker->wq = wq; in create_io_worker()
837 raw_spin_lock_init(&worker->lock); in create_io_worker()
838 init_completion(&worker->ref_done); in create_io_worker()
841 worker->flags |= IO_WORKER_F_BOUND; in create_io_worker()
843 tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE); in create_io_worker()
845 io_init_new_worker(wq, worker, tsk); in create_io_worker()
847 kfree(worker); in create_io_worker()
850 INIT_WORK(&worker->work, io_workqueue_create); in create_io_worker()
851 schedule_work(&worker->work); in create_io_worker()
865 struct io_worker *worker; in io_wq_for_each_worker() local
868 list_for_each_entry_rcu(worker, &wq->all_list, all_list) { in io_wq_for_each_worker()
869 if (io_worker_get(worker)) { in io_wq_for_each_worker()
871 if (worker->task) in io_wq_for_each_worker()
872 ret = func(worker, data); in io_wq_for_each_worker()
873 io_worker_release(worker); in io_wq_for_each_worker()
882 static bool io_wq_worker_wake(struct io_worker *worker, void *data) in io_wq_worker_wake() argument
884 __set_notify_signal(worker->task); in io_wq_worker_wake()
885 wake_up_process(worker->task); in io_wq_worker_wake()
986 static bool __io_wq_worker_cancel(struct io_worker *worker, in __io_wq_worker_cancel() argument
992 __set_notify_signal(worker->task); in __io_wq_worker_cancel()
999 static bool io_wq_worker_cancel(struct io_worker *worker, void *data) in io_wq_worker_cancel() argument
1007 raw_spin_lock(&worker->lock); in io_wq_worker_cancel()
1008 if (__io_wq_worker_cancel(worker, match, worker->cur_work) || in io_wq_worker_cancel()
1009 __io_wq_worker_cancel(worker, match, worker->next_work)) in io_wq_worker_cancel()
1011 raw_spin_unlock(&worker->lock); in io_wq_worker_cancel()
1200 struct io_worker *worker; in io_task_work_match() local
1204 worker = container_of(cb, struct io_worker, create_work); in io_task_work_match()
1205 return worker->wq == data; in io_task_work_match()
1218 struct io_worker *worker; in io_wq_cancel_tw_create() local
1220 worker = container_of(cb, struct io_worker, create_work); in io_wq_cancel_tw_create()
1221 io_worker_cancel_cb(worker); in io_wq_cancel_tw_create()
1227 kfree(worker); in io_wq_cancel_tw_create()
1279 static bool io_wq_worker_affinity(struct io_worker *worker, void *data) in io_wq_worker_affinity() argument
1284 cpumask_set_cpu(od->cpu, worker->wq->cpu_mask); in io_wq_worker_affinity()
1286 cpumask_clear_cpu(od->cpu, worker->wq->cpu_mask); in io_wq_worker_affinity()