Lines Matching refs:worker
186 struct worker *manager; /* L: purely informational */
298 struct worker *rescuer; /* MD: rescue worker */
489 #define for_each_pool_worker(worker, pool) \ argument
490 list_for_each_entry((worker), &(pool)->workers, node) \
867 static inline void worker_set_flags(struct worker *worker, unsigned int flags) in worker_set_flags() argument
869 struct worker_pool *pool = worker->pool; in worker_set_flags()
875 !(worker->flags & WORKER_NOT_RUNNING)) { in worker_set_flags()
879 worker->flags |= flags; in worker_set_flags()
889 static inline void worker_clr_flags(struct worker *worker, unsigned int flags) in worker_clr_flags() argument
891 struct worker_pool *pool = worker->pool; in worker_clr_flags()
892 unsigned int oflags = worker->flags; in worker_clr_flags()
896 worker->flags &= ~flags; in worker_clr_flags()
904 if (!(worker->flags & WORKER_NOT_RUNNING)) in worker_clr_flags()
909 static struct worker *first_idle_worker(struct worker_pool *pool) in first_idle_worker()
914 return list_first_entry(&pool->idle_list, struct worker, entry); in first_idle_worker()
927 static void worker_enter_idle(struct worker *worker) in worker_enter_idle() argument
929 struct worker_pool *pool = worker->pool; in worker_enter_idle()
931 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) || in worker_enter_idle()
932 WARN_ON_ONCE(!list_empty(&worker->entry) && in worker_enter_idle()
933 (worker->hentry.next || worker->hentry.pprev))) in worker_enter_idle()
937 worker->flags |= WORKER_IDLE; in worker_enter_idle()
939 worker->last_active = jiffies; in worker_enter_idle()
942 list_add(&worker->entry, &pool->idle_list); in worker_enter_idle()
960 static void worker_leave_idle(struct worker *worker) in worker_leave_idle() argument
962 struct worker_pool *pool = worker->pool; in worker_leave_idle()
964 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) in worker_leave_idle()
966 worker_clr_flags(worker, WORKER_IDLE); in worker_leave_idle()
968 list_del_init(&worker->entry); in worker_leave_idle()
1004 static struct worker *find_worker_executing_work(struct worker_pool *pool, in find_worker_executing_work()
1007 struct worker *worker; in find_worker_executing_work() local
1009 hash_for_each_possible(pool->busy_hash, worker, hentry, in find_worker_executing_work()
1011 if (worker->current_work == work && in find_worker_executing_work()
1012 worker->current_func == work->func) in find_worker_executing_work()
1013 return worker; in find_worker_executing_work()
1072 static bool assign_work(struct work_struct *work, struct worker *worker, in assign_work() argument
1075 struct worker_pool *pool = worker->pool; in assign_work()
1076 struct worker *collision; in assign_work()
1094 move_linked_works(work, &worker->scheduled, nextp); in assign_work()
1107 struct worker *worker = first_idle_worker(pool); in kick_pool() local
1112 if (!need_more_worker(pool) || !worker) in kick_pool()
1115 p = worker->task; in kick_pool()
1246 struct worker *worker = kthread_data(task); in wq_worker_running() local
1248 if (!READ_ONCE(worker->sleeping)) in wq_worker_running()
1258 if (!(worker->flags & WORKER_NOT_RUNNING)) in wq_worker_running()
1259 worker->pool->nr_running++; in wq_worker_running()
1266 worker->current_at = worker->task->se.sum_exec_runtime; in wq_worker_running()
1268 WRITE_ONCE(worker->sleeping, 0); in wq_worker_running()
1280 struct worker *worker = kthread_data(task); in wq_worker_sleeping() local
1288 if (worker->flags & WORKER_NOT_RUNNING) in wq_worker_sleeping()
1291 pool = worker->pool; in wq_worker_sleeping()
1294 if (READ_ONCE(worker->sleeping)) in wq_worker_sleeping()
1297 WRITE_ONCE(worker->sleeping, 1); in wq_worker_sleeping()
1305 if (worker->flags & WORKER_NOT_RUNNING) { in wq_worker_sleeping()
1312 worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++; in wq_worker_sleeping()
1326 struct worker *worker = kthread_data(task); in wq_worker_tick() local
1327 struct pool_workqueue *pwq = worker->current_pwq; in wq_worker_tick()
1328 struct worker_pool *pool = worker->pool; in wq_worker_tick()
1350 if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) || in wq_worker_tick()
1351 worker->task->se.sum_exec_runtime - worker->current_at < in wq_worker_tick()
1357 worker_set_flags(worker, WORKER_CPU_INTENSIVE); in wq_worker_tick()
1358 wq_cpu_intensive_report(worker->current_func); in wq_worker_tick()
1393 struct worker *worker = kthread_data(task); in wq_worker_last_func() local
1395 return worker->last_func; in wq_worker_last_func()
1661 struct worker *worker; in is_chained_work() local
1663 worker = current_wq_worker(); in is_chained_work()
1668 return worker && worker->current_pwq->wq == wq; in is_chained_work()
1747 struct worker *worker; in __queue_work() local
1751 worker = find_worker_executing_work(last_pool, work); in __queue_work()
1753 if (worker && worker->current_pwq->wq == wq) { in __queue_work()
1754 pwq = worker->current_pwq; in __queue_work()
2067 static struct worker *alloc_worker(int node) in alloc_worker()
2069 struct worker *worker; in alloc_worker() local
2071 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node); in alloc_worker()
2072 if (worker) { in alloc_worker()
2073 INIT_LIST_HEAD(&worker->entry); in alloc_worker()
2074 INIT_LIST_HEAD(&worker->scheduled); in alloc_worker()
2075 INIT_LIST_HEAD(&worker->node); in alloc_worker()
2077 worker->flags = WORKER_PREP; in alloc_worker()
2079 return worker; in alloc_worker()
2099 static void worker_attach_to_pool(struct worker *worker, in worker_attach_to_pool() argument
2110 worker->flags |= WORKER_UNBOUND; in worker_attach_to_pool()
2112 kthread_set_per_cpu(worker->task, pool->cpu); in worker_attach_to_pool()
2114 if (worker->rescue_wq) in worker_attach_to_pool()
2115 set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool)); in worker_attach_to_pool()
2117 list_add_tail(&worker->node, &pool->workers); in worker_attach_to_pool()
2118 worker->pool = pool; in worker_attach_to_pool()
2131 static void worker_detach_from_pool(struct worker *worker) in worker_detach_from_pool() argument
2133 struct worker_pool *pool = worker->pool; in worker_detach_from_pool()
2138 kthread_set_per_cpu(worker->task, -1); in worker_detach_from_pool()
2139 list_del(&worker->node); in worker_detach_from_pool()
2140 worker->pool = NULL; in worker_detach_from_pool()
2147 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); in worker_detach_from_pool()
2165 static struct worker *create_worker(struct worker_pool *pool) in create_worker()
2167 struct worker *worker; in create_worker() local
2179 worker = alloc_worker(pool->node); in create_worker()
2180 if (!worker) { in create_worker()
2185 worker->id = id; in create_worker()
2193 worker->task = kthread_create_on_node(worker_thread, worker, pool->node, in create_worker()
2195 if (IS_ERR(worker->task)) { in create_worker()
2196 if (PTR_ERR(worker->task) == -EINTR) { in create_worker()
2201 worker->task); in create_worker()
2206 set_user_nice(worker->task, pool->attrs->nice); in create_worker()
2207 kthread_bind_mask(worker->task, pool_allowed_cpus(pool)); in create_worker()
2210 worker_attach_to_pool(worker, pool); in create_worker()
2215 worker->pool->nr_workers++; in create_worker()
2216 worker_enter_idle(worker); in create_worker()
2224 wake_up_process(worker->task); in create_worker()
2228 return worker; in create_worker()
2232 kfree(worker); in create_worker()
2236 static void unbind_worker(struct worker *worker) in unbind_worker() argument
2240 kthread_set_per_cpu(worker->task, -1); in unbind_worker()
2242 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0); in unbind_worker()
2244 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); in unbind_worker()
2249 struct worker *worker, *tmp; in wake_dying_workers() local
2251 list_for_each_entry_safe(worker, tmp, cull_list, entry) { in wake_dying_workers()
2252 list_del_init(&worker->entry); in wake_dying_workers()
2253 unbind_worker(worker); in wake_dying_workers()
2264 wake_up_process(worker->task); in wake_dying_workers()
2279 static void set_worker_dying(struct worker *worker, struct list_head *list) in set_worker_dying() argument
2281 struct worker_pool *pool = worker->pool; in set_worker_dying()
2287 if (WARN_ON(worker->current_work) || in set_worker_dying()
2288 WARN_ON(!list_empty(&worker->scheduled)) || in set_worker_dying()
2289 WARN_ON(!(worker->flags & WORKER_IDLE))) in set_worker_dying()
2295 worker->flags |= WORKER_DIE; in set_worker_dying()
2297 list_move(&worker->entry, list); in set_worker_dying()
2298 list_move(&worker->node, &pool->dying_workers); in set_worker_dying()
2322 struct worker *worker; in idle_worker_timeout() local
2326 worker = list_entry(pool->idle_list.prev, struct worker, entry); in idle_worker_timeout()
2327 expires = worker->last_active + IDLE_WORKER_TIMEOUT; in idle_worker_timeout()
2365 struct worker *worker; in idle_cull_fn() local
2368 worker = list_entry(pool->idle_list.prev, struct worker, entry); in idle_cull_fn()
2369 expires = worker->last_active + IDLE_WORKER_TIMEOUT; in idle_cull_fn()
2376 set_worker_dying(worker, &cull_list); in idle_cull_fn()
2504 static bool manage_workers(struct worker *worker) in manage_workers() argument
2506 struct worker_pool *pool = worker->pool; in manage_workers()
2512 pool->manager = worker; in manage_workers()
2536 static void process_one_work(struct worker *worker, struct work_struct *work) in process_one_work() argument
2541 struct worker_pool *pool = worker->pool; in process_one_work()
2561 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); in process_one_work()
2562 worker->current_work = work; in process_one_work()
2563 worker->current_func = work->func; in process_one_work()
2564 worker->current_pwq = pwq; in process_one_work()
2565 worker->current_at = worker->task->se.sum_exec_runtime; in process_one_work()
2567 worker->current_color = get_work_color(work_data); in process_one_work()
2573 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); in process_one_work()
2584 worker_set_flags(worker, WORKER_CPU_INTENSIVE); in process_one_work()
2630 worker->current_func(work); in process_one_work()
2635 trace_workqueue_execute_end(work, worker->current_func); in process_one_work()
2644 worker->current_func); in process_one_work()
2666 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); in process_one_work()
2669 worker->last_func = worker->current_func; in process_one_work()
2672 hash_del(&worker->hentry); in process_one_work()
2673 worker->current_work = NULL; in process_one_work()
2674 worker->current_func = NULL; in process_one_work()
2675 worker->current_pwq = NULL; in process_one_work()
2676 worker->current_color = INT_MAX; in process_one_work()
2692 static void process_scheduled_works(struct worker *worker) in process_scheduled_works() argument
2697 while ((work = list_first_entry_or_null(&worker->scheduled, in process_scheduled_works()
2700 worker->pool->watchdog_ts = jiffies; in process_scheduled_works()
2703 process_one_work(worker, work); in process_scheduled_works()
2731 struct worker *worker = __worker; in worker_thread() local
2732 struct worker_pool *pool = worker->pool; in worker_thread()
2740 if (unlikely(worker->flags & WORKER_DIE)) { in worker_thread()
2744 set_task_comm(worker->task, "kworker/dying"); in worker_thread()
2745 ida_free(&pool->worker_ida, worker->id); in worker_thread()
2746 worker_detach_from_pool(worker); in worker_thread()
2747 WARN_ON_ONCE(!list_empty(&worker->entry)); in worker_thread()
2748 kfree(worker); in worker_thread()
2752 worker_leave_idle(worker); in worker_thread()
2759 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) in worker_thread()
2767 WARN_ON_ONCE(!list_empty(&worker->scheduled)); in worker_thread()
2776 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); in worker_thread()
2783 if (assign_work(work, worker, NULL)) in worker_thread()
2784 process_scheduled_works(worker); in worker_thread()
2787 worker_set_flags(worker, WORKER_PREP); in worker_thread()
2796 worker_enter_idle(worker); in worker_thread()
2826 struct worker *rescuer = __rescuer; in rescuer_thread()
2953 struct worker *worker; in check_flush_dependency() local
2958 worker = current_wq_worker(); in check_flush_dependency()
2963 WARN_ONCE(worker && ((worker->current_pwq->wq->flags & in check_flush_dependency()
2966 worker->current_pwq->wq->name, worker->current_func, in check_flush_dependency()
3008 struct work_struct *target, struct worker *worker) in insert_wq_barrier() argument
3034 if (worker) { in insert_wq_barrier()
3035 head = worker->scheduled.next; in insert_wq_barrier()
3036 work_color = worker->current_color; in insert_wq_barrier()
3340 struct worker *worker = NULL; in start_flush_work() local
3360 worker = find_worker_executing_work(pool, work); in start_flush_work()
3361 if (!worker) in start_flush_work()
3363 pwq = worker->current_pwq; in start_flush_work()
3368 insert_wq_barrier(pwq, barr, work, worker); in start_flush_work()
3961 struct worker *worker; in put_unbound_pool() local
4007 while ((worker = first_idle_worker(pool))) in put_unbound_pool()
4008 set_worker_dying(worker, &cull_list); in put_unbound_pool()
4641 struct worker *rescuer; in init_rescuer()
4807 struct worker *rescuer = wq->rescuer; in destroy_workqueue()
4910 struct worker *worker = current_wq_worker(); in current_work() local
4912 return worker ? worker->current_work : NULL; in current_work()
4926 struct worker *worker = current_wq_worker(); in current_is_workqueue_rescuer() local
4928 return worker && worker->rescue_wq; in current_is_workqueue_rescuer()
5017 struct worker *worker = current_wq_worker(); in set_worker_desc() local
5020 if (worker) { in set_worker_desc()
5022 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); in set_worker_desc()
5048 struct worker *worker; in print_worker_info() local
5057 worker = kthread_probe_data(task); in print_worker_info()
5063 copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn)); in print_worker_info()
5064 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq)); in print_worker_info()
5067 copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1); in print_worker_info()
5134 struct worker *worker; in show_pwq() local
5145 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
5146 if (worker->current_pwq == pwq) { in show_pwq()
5155 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
5156 if (worker->current_pwq != pwq) in show_pwq()
5160 task_pid_nr(worker->task), in show_pwq()
5161 worker->rescue_wq ? "(RESCUER)" : "", in show_pwq()
5162 worker->current_func); in show_pwq()
5163 list_for_each_entry(work, &worker->scheduled, entry) in show_pwq()
5255 struct worker *worker; in show_one_worker_pool() local
5280 list_for_each_entry(worker, &pool->idle_list, entry) { in show_one_worker_pool()
5282 task_pid_nr(worker->task)); in show_one_worker_pool()
5359 struct worker *worker = kthread_data(task); in wq_worker_comm() local
5360 struct worker_pool *pool = worker->pool; in wq_worker_comm()
5369 if (worker->desc[0] != '\0') { in wq_worker_comm()
5370 if (worker->current_work) in wq_worker_comm()
5372 worker->desc); in wq_worker_comm()
5375 worker->desc); in wq_worker_comm()
5404 struct worker *worker; in unbind_workers() local
5418 for_each_pool_worker(worker, pool) in unbind_workers()
5419 worker->flags |= WORKER_UNBOUND; in unbind_workers()
5442 for_each_pool_worker(worker, pool) in unbind_workers()
5443 unbind_worker(worker); in unbind_workers()
5457 struct worker *worker; in rebind_workers() local
5468 for_each_pool_worker(worker, pool) { in rebind_workers()
5469 kthread_set_per_cpu(worker->task, pool->cpu); in rebind_workers()
5470 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, in rebind_workers()
5478 for_each_pool_worker(worker, pool) { in rebind_workers()
5479 unsigned int worker_flags = worker->flags; in rebind_workers()
5499 WRITE_ONCE(worker->flags, worker_flags); in rebind_workers()
5518 struct worker *worker; in restore_unbound_workers_cpumask() local
5529 for_each_pool_worker(worker, pool) in restore_unbound_workers_cpumask()
5530 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0); in restore_unbound_workers_cpumask()
6337 struct worker *worker; in show_cpu_pool_hog() local
6343 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_cpu_pool_hog()
6344 if (task_is_running(worker->task)) { in show_cpu_pool_hog()
6353 sched_show_task(worker->task); in show_cpu_pool_hog()