Lines Matching refs:pool

200 	struct worker_pool	*pool;		/* I: the associated pool */  member
378 #define for_each_cpu_worker_pool(pool, cpu) \ argument
379 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
380 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
381 (pool)++)
395 #define for_each_pool(pool, pi) \ argument
396 idr_for_each_entry(&worker_pool_idr, pool, pi) \
410 #define for_each_pool_worker(worker, pool) \ argument
411 list_for_each_entry((worker), &(pool)->workers, node) \
536 static int worker_pool_assign_id(struct worker_pool *pool) in worker_pool_assign_id() argument
542 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, in worker_pool_assign_id()
545 pool->id = ret; in worker_pool_assign_id()
719 (data & WORK_STRUCT_WQ_DATA_MASK))->pool; in get_work_pool()
741 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id; in get_work_pool_id()
767 static bool __need_more_worker(struct worker_pool *pool) in __need_more_worker() argument
769 return !atomic_read(&pool->nr_running); in __need_more_worker()
780 static bool need_more_worker(struct worker_pool *pool) in need_more_worker() argument
782 return !list_empty(&pool->worklist) && __need_more_worker(pool); in need_more_worker()
786 static bool may_start_working(struct worker_pool *pool) in may_start_working() argument
788 return pool->nr_idle; in may_start_working()
792 static bool keep_working(struct worker_pool *pool) in keep_working() argument
794 return !list_empty(&pool->worklist) && in keep_working()
795 atomic_read(&pool->nr_running) <= 1; in keep_working()
799 static bool need_to_create_worker(struct worker_pool *pool) in need_to_create_worker() argument
801 return need_more_worker(pool) && !may_start_working(pool); in need_to_create_worker()
805 static bool too_many_workers(struct worker_pool *pool) in too_many_workers() argument
807 bool managing = pool->flags & POOL_MANAGER_ACTIVE; in too_many_workers()
808 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ in too_many_workers()
809 int nr_busy = pool->nr_workers - nr_idle; in too_many_workers()
819 static struct worker *first_idle_worker(struct worker_pool *pool) in first_idle_worker() argument
821 if (unlikely(list_empty(&pool->idle_list))) in first_idle_worker()
824 return list_first_entry(&pool->idle_list, struct worker, entry); in first_idle_worker()
836 static void wake_up_worker(struct worker_pool *pool) in wake_up_worker() argument
838 struct worker *worker = first_idle_worker(pool); in wake_up_worker()
857 atomic_inc(&worker->pool->nr_running); in wq_worker_running()
871 struct worker_pool *pool; in wq_worker_sleeping() local
881 pool = worker->pool; in wq_worker_sleeping()
887 spin_lock_irq(&pool->lock); in wq_worker_sleeping()
900 if (atomic_dec_and_test(&pool->nr_running) && in wq_worker_sleeping()
901 !list_empty(&pool->worklist)) { in wq_worker_sleeping()
902 next = first_idle_worker(pool); in wq_worker_sleeping()
906 spin_unlock_irq(&pool->lock); in wq_worker_sleeping()
952 struct worker_pool *pool = worker->pool; in worker_set_flags() local
959 atomic_dec(&pool->nr_running); in worker_set_flags()
977 struct worker_pool *pool = worker->pool; in worker_clr_flags() local
991 atomic_inc(&pool->nr_running); in worker_clr_flags()
1027 static struct worker *find_worker_executing_work(struct worker_pool *pool, in find_worker_executing_work() argument
1032 hash_for_each_possible(pool->busy_hash, worker, hentry, in find_worker_executing_work()
1091 lockdep_assert_held(&pwq->pool->lock); in get_pwq()
1105 lockdep_assert_held(&pwq->pool->lock); in put_pwq()
1134 spin_lock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1136 spin_unlock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1145 if (list_empty(&pwq->pool->worklist)) in pwq_activate_delayed_work()
1146 pwq->pool->watchdog_ts = jiffies; in pwq_activate_delayed_work()
1147 move_linked_works(work, &pwq->pool->worklist, NULL); in pwq_activate_delayed_work()
1237 struct worker_pool *pool; in try_to_grab_pending() local
1264 pool = get_work_pool(work); in try_to_grab_pending()
1265 if (!pool) in try_to_grab_pending()
1268 spin_lock(&pool->lock); in try_to_grab_pending()
1278 if (pwq && pwq->pool == pool) { in try_to_grab_pending()
1295 set_work_pool_and_keep_pending(work, pool->id); in try_to_grab_pending()
1297 spin_unlock(&pool->lock); in try_to_grab_pending()
1301 spin_unlock(&pool->lock); in try_to_grab_pending()
1327 struct worker_pool *pool = pwq->pool; in insert_work() local
1341 if (__need_more_worker(pool)) in insert_work()
1342 wake_up_worker(pool); in insert_work()
1434 if (last_pool && last_pool != pwq->pool) { in __queue_work()
1446 spin_lock(&pwq->pool->lock); in __queue_work()
1449 spin_lock(&pwq->pool->lock); in __queue_work()
1462 spin_unlock(&pwq->pool->lock); in __queue_work()
1483 worklist = &pwq->pool->worklist; in __queue_work()
1485 pwq->pool->watchdog_ts = jiffies; in __queue_work()
1494 spin_unlock(&pwq->pool->lock); in __queue_work()
1767 struct worker_pool *pool = worker->pool; in worker_enter_idle() local
1776 pool->nr_idle++; in worker_enter_idle()
1780 list_add(&worker->entry, &pool->idle_list); in worker_enter_idle()
1782 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) in worker_enter_idle()
1783 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); in worker_enter_idle()
1791 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && in worker_enter_idle()
1792 pool->nr_workers == pool->nr_idle && in worker_enter_idle()
1793 atomic_read(&pool->nr_running)); in worker_enter_idle()
1807 struct worker_pool *pool = worker->pool; in worker_leave_idle() local
1812 pool->nr_idle--; in worker_leave_idle()
1841 struct worker_pool *pool) in worker_attach_to_pool() argument
1849 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); in worker_attach_to_pool()
1856 if (pool->flags & POOL_DISASSOCIATED) in worker_attach_to_pool()
1859 list_add_tail(&worker->node, &pool->workers); in worker_attach_to_pool()
1860 worker->pool = pool; in worker_attach_to_pool()
1875 struct worker_pool *pool = worker->pool; in worker_detach_from_pool() local
1881 worker->pool = NULL; in worker_detach_from_pool()
1883 if (list_empty(&pool->workers)) in worker_detach_from_pool()
1884 detach_completion = pool->detach_completion; in worker_detach_from_pool()
1906 static struct worker *create_worker(struct worker_pool *pool) in create_worker() argument
1913 id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL); in create_worker()
1917 worker = alloc_worker(pool->node); in create_worker()
1923 if (pool->cpu >= 0) in create_worker()
1924 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, in create_worker()
1925 pool->attrs->nice < 0 ? "H" : ""); in create_worker()
1927 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); in create_worker()
1929 worker->task = kthread_create_on_node(worker_thread, worker, pool->node, in create_worker()
1934 set_user_nice(worker->task, pool->attrs->nice); in create_worker()
1935 kthread_bind_mask(worker->task, pool->attrs->cpumask); in create_worker()
1938 worker_attach_to_pool(worker, pool); in create_worker()
1941 spin_lock_irq(&pool->lock); in create_worker()
1942 worker->pool->nr_workers++; in create_worker()
1945 spin_unlock_irq(&pool->lock); in create_worker()
1951 ida_simple_remove(&pool->worker_ida, id); in create_worker()
1968 struct worker_pool *pool = worker->pool; in destroy_worker() local
1970 lockdep_assert_held(&pool->lock); in destroy_worker()
1978 pool->nr_workers--; in destroy_worker()
1979 pool->nr_idle--; in destroy_worker()
1988 struct worker_pool *pool = from_timer(pool, t, idle_timer); in idle_worker_timeout() local
1990 spin_lock_irq(&pool->lock); in idle_worker_timeout()
1992 while (too_many_workers(pool)) { in idle_worker_timeout()
1997 worker = list_entry(pool->idle_list.prev, struct worker, entry); in idle_worker_timeout()
2001 mod_timer(&pool->idle_timer, expires); in idle_worker_timeout()
2008 spin_unlock_irq(&pool->lock); in idle_worker_timeout()
2036 struct worker_pool *pool = from_timer(pool, t, mayday_timer); in pool_mayday_timeout() local
2039 spin_lock_irq(&pool->lock); in pool_mayday_timeout()
2042 if (need_to_create_worker(pool)) { in pool_mayday_timeout()
2049 list_for_each_entry(work, &pool->worklist, entry) in pool_mayday_timeout()
2054 spin_unlock_irq(&pool->lock); in pool_mayday_timeout()
2056 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); in pool_mayday_timeout()
2077 static void maybe_create_worker(struct worker_pool *pool) in maybe_create_worker() argument
2078 __releases(&pool->lock) in maybe_create_worker()
2079 __acquires(&pool->lock) in maybe_create_worker()
2082 spin_unlock_irq(&pool->lock); in maybe_create_worker()
2085 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); in maybe_create_worker()
2088 if (create_worker(pool) || !need_to_create_worker(pool)) in maybe_create_worker()
2093 if (!need_to_create_worker(pool)) in maybe_create_worker()
2097 del_timer_sync(&pool->mayday_timer); in maybe_create_worker()
2098 spin_lock_irq(&pool->lock); in maybe_create_worker()
2104 if (need_to_create_worker(pool)) in maybe_create_worker()
2132 struct worker_pool *pool = worker->pool; in manage_workers() local
2134 if (pool->flags & POOL_MANAGER_ACTIVE) in manage_workers()
2137 pool->flags |= POOL_MANAGER_ACTIVE; in manage_workers()
2138 pool->manager = worker; in manage_workers()
2140 maybe_create_worker(pool); in manage_workers()
2142 pool->manager = NULL; in manage_workers()
2143 pool->flags &= ~POOL_MANAGER_ACTIVE; in manage_workers()
2163 __releases(&pool->lock) in process_one_work()
2164 __acquires(&pool->lock) in process_one_work()
2167 struct worker_pool *pool = worker->pool; in process_one_work() local
2184 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && in process_one_work()
2185 raw_smp_processor_id() != pool->cpu); in process_one_work()
2193 collision = find_worker_executing_work(pool, work); in process_one_work()
2201 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); in process_one_work()
2231 if (need_more_worker(pool)) in process_one_work()
2232 wake_up_worker(pool); in process_one_work()
2240 set_work_pool_and_clear_pending(work, pool->id); in process_one_work()
2242 spin_unlock_irq(&pool->lock); in process_one_work()
2297 spin_lock_irq(&pool->lock); in process_one_work()
2360 struct worker_pool *pool = worker->pool; in worker_thread() local
2365 spin_lock_irq(&pool->lock); in worker_thread()
2369 spin_unlock_irq(&pool->lock); in worker_thread()
2374 ida_simple_remove(&pool->worker_ida, worker->id); in worker_thread()
2383 if (!need_more_worker(pool)) in worker_thread()
2387 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) in worker_thread()
2408 list_first_entry(&pool->worklist, in worker_thread()
2411 pool->watchdog_ts = jiffies; in worker_thread()
2422 } while (keep_working(pool)); in worker_thread()
2435 spin_unlock_irq(&pool->lock); in worker_thread()
2494 struct worker_pool *pool = pwq->pool; in rescuer_thread() local
2503 worker_attach_to_pool(rescuer, pool); in rescuer_thread()
2505 spin_lock_irq(&pool->lock); in rescuer_thread()
2512 list_for_each_entry_safe(work, n, &pool->worklist, entry) { in rescuer_thread()
2515 pool->watchdog_ts = jiffies; in rescuer_thread()
2533 if (need_to_create_worker(pool)) { in rescuer_thread()
2552 if (need_more_worker(pool)) in rescuer_thread()
2553 wake_up_worker(pool); in rescuer_thread()
2555 spin_unlock_irq(&pool->lock); in rescuer_thread()
2727 struct worker_pool *pool = pwq->pool; in flush_workqueue_prep_pwqs() local
2729 spin_lock_irq(&pool->lock); in flush_workqueue_prep_pwqs()
2746 spin_unlock_irq(&pool->lock); in flush_workqueue_prep_pwqs()
2946 spin_lock_irq(&pwq->pool->lock); in drain_workqueue()
2948 spin_unlock_irq(&pwq->pool->lock); in drain_workqueue()
2972 struct worker_pool *pool; in start_flush_work() local
2978 pool = get_work_pool(work); in start_flush_work()
2979 if (!pool) { in start_flush_work()
2984 spin_lock_irq(&pool->lock); in start_flush_work()
2988 if (unlikely(pwq->pool != pool)) in start_flush_work()
2991 worker = find_worker_executing_work(pool, work); in start_flush_work()
3000 spin_unlock_irq(&pool->lock); in start_flush_work()
3019 spin_unlock_irq(&pool->lock); in start_flush_work()
3410 static int init_worker_pool(struct worker_pool *pool) in init_worker_pool() argument
3412 spin_lock_init(&pool->lock); in init_worker_pool()
3413 pool->id = -1; in init_worker_pool()
3414 pool->cpu = -1; in init_worker_pool()
3415 pool->node = NUMA_NO_NODE; in init_worker_pool()
3416 pool->flags |= POOL_DISASSOCIATED; in init_worker_pool()
3417 pool->watchdog_ts = jiffies; in init_worker_pool()
3418 INIT_LIST_HEAD(&pool->worklist); in init_worker_pool()
3419 INIT_LIST_HEAD(&pool->idle_list); in init_worker_pool()
3420 hash_init(pool->busy_hash); in init_worker_pool()
3422 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE); in init_worker_pool()
3424 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0); in init_worker_pool()
3426 INIT_LIST_HEAD(&pool->workers); in init_worker_pool()
3428 ida_init(&pool->worker_ida); in init_worker_pool()
3429 INIT_HLIST_NODE(&pool->hash_node); in init_worker_pool()
3430 pool->refcnt = 1; in init_worker_pool()
3433 pool->attrs = alloc_workqueue_attrs(); in init_worker_pool()
3434 if (!pool->attrs) in init_worker_pool()
3495 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); in rcu_free_pool() local
3497 ida_destroy(&pool->worker_ida); in rcu_free_pool()
3498 free_workqueue_attrs(pool->attrs); in rcu_free_pool()
3499 kfree(pool); in rcu_free_pool()
3513 static void put_unbound_pool(struct worker_pool *pool) in put_unbound_pool() argument
3520 if (--pool->refcnt) in put_unbound_pool()
3524 if (WARN_ON(!(pool->cpu < 0)) || in put_unbound_pool()
3525 WARN_ON(!list_empty(&pool->worklist))) in put_unbound_pool()
3529 if (pool->id >= 0) in put_unbound_pool()
3530 idr_remove(&worker_pool_idr, pool->id); in put_unbound_pool()
3531 hash_del(&pool->hash_node); in put_unbound_pool()
3538 spin_lock_irq(&pool->lock); in put_unbound_pool()
3540 !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock); in put_unbound_pool()
3541 pool->flags |= POOL_MANAGER_ACTIVE; in put_unbound_pool()
3543 while ((worker = first_idle_worker(pool))) in put_unbound_pool()
3545 WARN_ON(pool->nr_workers || pool->nr_idle); in put_unbound_pool()
3546 spin_unlock_irq(&pool->lock); in put_unbound_pool()
3549 if (!list_empty(&pool->workers)) in put_unbound_pool()
3550 pool->detach_completion = &detach_completion; in put_unbound_pool()
3553 if (pool->detach_completion) in put_unbound_pool()
3554 wait_for_completion(pool->detach_completion); in put_unbound_pool()
3557 del_timer_sync(&pool->idle_timer); in put_unbound_pool()
3558 del_timer_sync(&pool->mayday_timer); in put_unbound_pool()
3561 call_rcu(&pool->rcu, rcu_free_pool); in put_unbound_pool()
3581 struct worker_pool *pool; in get_unbound_pool() local
3588 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { in get_unbound_pool()
3589 if (wqattrs_equal(pool->attrs, attrs)) { in get_unbound_pool()
3590 pool->refcnt++; in get_unbound_pool()
3591 return pool; in get_unbound_pool()
3607 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node); in get_unbound_pool()
3608 if (!pool || init_worker_pool(pool) < 0) in get_unbound_pool()
3611 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ in get_unbound_pool()
3612 copy_workqueue_attrs(pool->attrs, attrs); in get_unbound_pool()
3613 pool->node = target_node; in get_unbound_pool()
3619 pool->attrs->no_numa = false; in get_unbound_pool()
3621 if (worker_pool_assign_id(pool) < 0) in get_unbound_pool()
3625 if (wq_online && !create_worker(pool)) in get_unbound_pool()
3629 hash_add(unbound_pool_hash, &pool->hash_node, hash); in get_unbound_pool()
3631 return pool; in get_unbound_pool()
3633 if (pool) in get_unbound_pool()
3634 put_unbound_pool(pool); in get_unbound_pool()
3653 struct worker_pool *pool = pwq->pool; in pwq_unbound_release_workfn() local
3665 put_unbound_pool(pool); in pwq_unbound_release_workfn()
3702 spin_lock_irqsave(&pwq->pool->lock, flags); in pwq_adjust_max_active()
3720 wake_up_worker(pwq->pool); in pwq_adjust_max_active()
3725 spin_unlock_irqrestore(&pwq->pool->lock, flags); in pwq_adjust_max_active()
3730 struct worker_pool *pool) in init_pwq() argument
3736 pwq->pool = pool; in init_pwq()
3771 struct worker_pool *pool; in alloc_unbound_pwq() local
3776 pool = get_unbound_pool(attrs); in alloc_unbound_pwq()
3777 if (!pool) in alloc_unbound_pwq()
3780 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); in alloc_unbound_pwq()
3782 put_unbound_pool(pool); in alloc_unbound_pwq()
3786 init_pwq(pwq, wq, pool); in alloc_unbound_pwq()
4105 if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) { in wq_update_unbound_numa()
4106 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask)) in wq_update_unbound_numa()
4127 spin_lock_irq(&wq->dfl_pwq->pool->lock); in wq_update_unbound_numa()
4129 spin_unlock_irq(&wq->dfl_pwq->pool->lock); in wq_update_unbound_numa()
4515 struct worker_pool *pool; in work_busy() local
4523 pool = get_work_pool(work); in work_busy()
4524 if (pool) { in work_busy()
4525 spin_lock_irqsave(&pool->lock, flags); in work_busy()
4526 if (find_worker_executing_work(pool, work)) in work_busy()
4528 spin_unlock_irqrestore(&pool->lock, flags); in work_busy()
4608 static void pr_cont_pool_info(struct worker_pool *pool) in pr_cont_pool_info() argument
4610 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); in pr_cont_pool_info()
4611 if (pool->node != NUMA_NO_NODE) in pr_cont_pool_info()
4612 pr_cont(" node=%d", pool->node); in pr_cont_pool_info()
4613 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice); in pr_cont_pool_info()
4632 struct worker_pool *pool = pwq->pool; in show_pwq() local
4638 pr_info(" pwq %d:", pool->id); in show_pwq()
4639 pr_cont_pool_info(pool); in show_pwq()
4644 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
4654 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
4669 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
4679 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
4710 struct worker_pool *pool; in show_workqueue_state() local
4734 spin_lock_irqsave(&pwq->pool->lock, flags); in show_workqueue_state()
4737 spin_unlock_irqrestore(&pwq->pool->lock, flags); in show_workqueue_state()
4747 for_each_pool(pool, pi) { in show_workqueue_state()
4751 spin_lock_irqsave(&pool->lock, flags); in show_workqueue_state()
4752 if (pool->nr_workers == pool->nr_idle) in show_workqueue_state()
4755 pr_info("pool %d:", pool->id); in show_workqueue_state()
4756 pr_cont_pool_info(pool); in show_workqueue_state()
4758 jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000, in show_workqueue_state()
4759 pool->nr_workers); in show_workqueue_state()
4760 if (pool->manager) in show_workqueue_state()
4762 task_pid_nr(pool->manager->task)); in show_workqueue_state()
4763 list_for_each_entry(worker, &pool->idle_list, entry) { in show_workqueue_state()
4770 spin_unlock_irqrestore(&pool->lock, flags); in show_workqueue_state()
4797 struct worker_pool *pool = worker->pool; in wq_worker_comm() local
4799 if (pool) { in wq_worker_comm()
4800 spin_lock_irq(&pool->lock); in wq_worker_comm()
4814 spin_unlock_irq(&pool->lock); in wq_worker_comm()
4840 struct worker_pool *pool; in unbind_workers() local
4843 for_each_cpu_worker_pool(pool, cpu) { in unbind_workers()
4845 spin_lock_irq(&pool->lock); in unbind_workers()
4854 for_each_pool_worker(worker, pool) in unbind_workers()
4857 pool->flags |= POOL_DISASSOCIATED; in unbind_workers()
4859 spin_unlock_irq(&pool->lock); in unbind_workers()
4878 atomic_set(&pool->nr_running, 0); in unbind_workers()
4885 spin_lock_irq(&pool->lock); in unbind_workers()
4886 wake_up_worker(pool); in unbind_workers()
4887 spin_unlock_irq(&pool->lock); in unbind_workers()
4897 static void rebind_workers(struct worker_pool *pool) in rebind_workers() argument
4910 for_each_pool_worker(worker, pool) in rebind_workers()
4912 pool->attrs->cpumask) < 0); in rebind_workers()
4914 spin_lock_irq(&pool->lock); in rebind_workers()
4916 pool->flags &= ~POOL_DISASSOCIATED; in rebind_workers()
4918 for_each_pool_worker(worker, pool) { in rebind_workers()
4953 spin_unlock_irq(&pool->lock); in rebind_workers()
4966 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) in restore_unbound_workers_cpumask() argument
4974 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) in restore_unbound_workers_cpumask()
4977 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); in restore_unbound_workers_cpumask()
4980 for_each_pool_worker(worker, pool) in restore_unbound_workers_cpumask()
4986 struct worker_pool *pool; in workqueue_prepare_cpu() local
4988 for_each_cpu_worker_pool(pool, cpu) { in workqueue_prepare_cpu()
4989 if (pool->nr_workers) in workqueue_prepare_cpu()
4991 if (!create_worker(pool)) in workqueue_prepare_cpu()
4999 struct worker_pool *pool; in workqueue_online_cpu() local
5005 for_each_pool(pool, pi) { in workqueue_online_cpu()
5008 if (pool->cpu == cpu) in workqueue_online_cpu()
5009 rebind_workers(pool); in workqueue_online_cpu()
5010 else if (pool->cpu < 0) in workqueue_online_cpu()
5011 restore_unbound_workers_cpumask(pool, cpu); in workqueue_online_cpu()
5375 unbound_pwq_by_node(wq, node)->pool->id); in wq_pool_ids_show()
5706 struct worker_pool *pool; in wq_watchdog_timer_fn() local
5714 for_each_pool(pool, pi) { in wq_watchdog_timer_fn()
5717 if (list_empty(&pool->worklist)) in wq_watchdog_timer_fn()
5721 pool_ts = READ_ONCE(pool->watchdog_ts); in wq_watchdog_timer_fn()
5729 if (pool->cpu >= 0) { in wq_watchdog_timer_fn()
5732 pool->cpu)); in wq_watchdog_timer_fn()
5741 pr_cont_pool_info(pool); in wq_watchdog_timer_fn()
5881 struct worker_pool *pool; in workqueue_init_early() local
5884 for_each_cpu_worker_pool(pool, cpu) { in workqueue_init_early()
5885 BUG_ON(init_worker_pool(pool)); in workqueue_init_early()
5886 pool->cpu = cpu; in workqueue_init_early()
5887 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); in workqueue_init_early()
5888 pool->attrs->nice = std_nice[i++]; in workqueue_init_early()
5889 pool->node = cpu_to_node(cpu); in workqueue_init_early()
5893 BUG_ON(worker_pool_assign_id(pool)); in workqueue_init_early()
5949 struct worker_pool *pool; in workqueue_init() local
5966 for_each_cpu_worker_pool(pool, cpu) { in workqueue_init()
5967 pool->node = cpu_to_node(cpu); in workqueue_init()
5982 for_each_cpu_worker_pool(pool, cpu) { in workqueue_init()
5983 pool->flags &= ~POOL_DISASSOCIATED; in workqueue_init()
5984 BUG_ON(!create_worker(pool)); in workqueue_init()
5988 hash_for_each(unbound_pool_hash, bkt, pool, hash_node) in workqueue_init()
5989 BUG_ON(!create_worker(pool)); in workqueue_init()