Lines Matching refs:pool
199 struct worker_pool *pool; /* I: the associated pool */ member
375 #define for_each_cpu_worker_pool(pool, cpu) \ argument
376 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
377 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
378 (pool)++)
392 #define for_each_pool(pool, pi) \ argument
393 idr_for_each_entry(&worker_pool_idr, pool, pi) \
407 #define for_each_pool_worker(worker, pool) \ argument
408 list_for_each_entry((worker), &(pool)->workers, node) \
533 static int worker_pool_assign_id(struct worker_pool *pool) in worker_pool_assign_id() argument
539 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, in worker_pool_assign_id()
542 pool->id = ret; in worker_pool_assign_id()
716 (data & WORK_STRUCT_WQ_DATA_MASK))->pool; in get_work_pool()
738 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id; in get_work_pool_id()
764 static bool __need_more_worker(struct worker_pool *pool) in __need_more_worker() argument
766 return !atomic_read(&pool->nr_running); in __need_more_worker()
777 static bool need_more_worker(struct worker_pool *pool) in need_more_worker() argument
779 return !list_empty(&pool->worklist) && __need_more_worker(pool); in need_more_worker()
783 static bool may_start_working(struct worker_pool *pool) in may_start_working() argument
785 return pool->nr_idle; in may_start_working()
789 static bool keep_working(struct worker_pool *pool) in keep_working() argument
791 return !list_empty(&pool->worklist) && in keep_working()
792 atomic_read(&pool->nr_running) <= 1; in keep_working()
796 static bool need_to_create_worker(struct worker_pool *pool) in need_to_create_worker() argument
798 return need_more_worker(pool) && !may_start_working(pool); in need_to_create_worker()
802 static bool too_many_workers(struct worker_pool *pool) in too_many_workers() argument
804 bool managing = pool->flags & POOL_MANAGER_ACTIVE; in too_many_workers()
805 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ in too_many_workers()
806 int nr_busy = pool->nr_workers - nr_idle; in too_many_workers()
816 static struct worker *first_idle_worker(struct worker_pool *pool) in first_idle_worker() argument
818 if (unlikely(list_empty(&pool->idle_list))) in first_idle_worker()
821 return list_first_entry(&pool->idle_list, struct worker, entry); in first_idle_worker()
833 static void wake_up_worker(struct worker_pool *pool) in wake_up_worker() argument
835 struct worker *worker = first_idle_worker(pool); in wake_up_worker()
857 WARN_ON_ONCE(worker->pool->cpu != cpu); in wq_worker_waking_up()
858 atomic_inc(&worker->pool->nr_running); in wq_worker_waking_up()
879 struct worker_pool *pool; in wq_worker_sleeping() local
889 pool = worker->pool; in wq_worker_sleeping()
892 if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id())) in wq_worker_sleeping()
906 if (atomic_dec_and_test(&pool->nr_running) && in wq_worker_sleeping()
907 !list_empty(&pool->worklist)) in wq_worker_sleeping()
908 to_wakeup = first_idle_worker(pool); in wq_worker_sleeping()
924 struct worker_pool *pool = worker->pool; in worker_set_flags() local
931 atomic_dec(&pool->nr_running); in worker_set_flags()
949 struct worker_pool *pool = worker->pool; in worker_clr_flags() local
963 atomic_inc(&pool->nr_running); in worker_clr_flags()
999 static struct worker *find_worker_executing_work(struct worker_pool *pool, in find_worker_executing_work() argument
1004 hash_for_each_possible(pool->busy_hash, worker, hentry, in find_worker_executing_work()
1063 lockdep_assert_held(&pwq->pool->lock); in get_pwq()
1077 lockdep_assert_held(&pwq->pool->lock); in put_pwq()
1106 spin_lock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1108 spin_unlock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1117 if (list_empty(&pwq->pool->worklist)) in pwq_activate_delayed_work()
1118 pwq->pool->watchdog_ts = jiffies; in pwq_activate_delayed_work()
1119 move_linked_works(work, &pwq->pool->worklist, NULL); in pwq_activate_delayed_work()
1209 struct worker_pool *pool; in try_to_grab_pending() local
1235 pool = get_work_pool(work); in try_to_grab_pending()
1236 if (!pool) in try_to_grab_pending()
1239 spin_lock(&pool->lock); in try_to_grab_pending()
1249 if (pwq && pwq->pool == pool) { in try_to_grab_pending()
1266 set_work_pool_and_keep_pending(work, pool->id); in try_to_grab_pending()
1268 spin_unlock(&pool->lock); in try_to_grab_pending()
1271 spin_unlock(&pool->lock); in try_to_grab_pending()
1296 struct worker_pool *pool = pwq->pool; in insert_work() local
1310 if (__need_more_worker(pool)) in insert_work()
1311 wake_up_worker(pool); in insert_work()
1402 if (last_pool && last_pool != pwq->pool) { in __queue_work()
1414 spin_lock(&pwq->pool->lock); in __queue_work()
1417 spin_lock(&pwq->pool->lock); in __queue_work()
1430 spin_unlock(&pwq->pool->lock); in __queue_work()
1443 spin_unlock(&pwq->pool->lock); in __queue_work()
1453 worklist = &pwq->pool->worklist; in __queue_work()
1455 pwq->pool->watchdog_ts = jiffies; in __queue_work()
1463 spin_unlock(&pwq->pool->lock); in __queue_work()
1651 struct worker_pool *pool = worker->pool; in worker_enter_idle() local
1660 pool->nr_idle++; in worker_enter_idle()
1664 list_add(&worker->entry, &pool->idle_list); in worker_enter_idle()
1666 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) in worker_enter_idle()
1667 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); in worker_enter_idle()
1675 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && in worker_enter_idle()
1676 pool->nr_workers == pool->nr_idle && in worker_enter_idle()
1677 atomic_read(&pool->nr_running)); in worker_enter_idle()
1691 struct worker_pool *pool = worker->pool; in worker_leave_idle() local
1696 pool->nr_idle--; in worker_leave_idle()
1725 struct worker_pool *pool) in worker_attach_to_pool() argument
1733 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); in worker_attach_to_pool()
1740 if (pool->flags & POOL_DISASSOCIATED) in worker_attach_to_pool()
1743 list_add_tail(&worker->node, &pool->workers); in worker_attach_to_pool()
1744 worker->pool = pool; in worker_attach_to_pool()
1759 struct worker_pool *pool = worker->pool; in worker_detach_from_pool() local
1765 worker->pool = NULL; in worker_detach_from_pool()
1767 if (list_empty(&pool->workers)) in worker_detach_from_pool()
1768 detach_completion = pool->detach_completion; in worker_detach_from_pool()
1790 static struct worker *create_worker(struct worker_pool *pool) in create_worker() argument
1797 id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL); in create_worker()
1801 worker = alloc_worker(pool->node); in create_worker()
1807 if (pool->cpu >= 0) in create_worker()
1808 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, in create_worker()
1809 pool->attrs->nice < 0 ? "H" : ""); in create_worker()
1811 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); in create_worker()
1813 worker->task = kthread_create_on_node(worker_thread, worker, pool->node, in create_worker()
1818 set_user_nice(worker->task, pool->attrs->nice); in create_worker()
1819 kthread_bind_mask(worker->task, pool->attrs->cpumask); in create_worker()
1822 worker_attach_to_pool(worker, pool); in create_worker()
1825 spin_lock_irq(&pool->lock); in create_worker()
1826 worker->pool->nr_workers++; in create_worker()
1829 spin_unlock_irq(&pool->lock); in create_worker()
1835 ida_simple_remove(&pool->worker_ida, id); in create_worker()
1852 struct worker_pool *pool = worker->pool; in destroy_worker() local
1854 lockdep_assert_held(&pool->lock); in destroy_worker()
1862 pool->nr_workers--; in destroy_worker()
1863 pool->nr_idle--; in destroy_worker()
1872 struct worker_pool *pool = from_timer(pool, t, idle_timer); in idle_worker_timeout() local
1874 spin_lock_irq(&pool->lock); in idle_worker_timeout()
1876 while (too_many_workers(pool)) { in idle_worker_timeout()
1881 worker = list_entry(pool->idle_list.prev, struct worker, entry); in idle_worker_timeout()
1885 mod_timer(&pool->idle_timer, expires); in idle_worker_timeout()
1892 spin_unlock_irq(&pool->lock); in idle_worker_timeout()
1920 struct worker_pool *pool = from_timer(pool, t, mayday_timer); in pool_mayday_timeout() local
1923 spin_lock_irq(&pool->lock); in pool_mayday_timeout()
1926 if (need_to_create_worker(pool)) { in pool_mayday_timeout()
1933 list_for_each_entry(work, &pool->worklist, entry) in pool_mayday_timeout()
1938 spin_unlock_irq(&pool->lock); in pool_mayday_timeout()
1940 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); in pool_mayday_timeout()
1961 static void maybe_create_worker(struct worker_pool *pool) in maybe_create_worker() argument
1962 __releases(&pool->lock) in maybe_create_worker()
1963 __acquires(&pool->lock) in maybe_create_worker()
1966 spin_unlock_irq(&pool->lock); in maybe_create_worker()
1969 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); in maybe_create_worker()
1972 if (create_worker(pool) || !need_to_create_worker(pool)) in maybe_create_worker()
1977 if (!need_to_create_worker(pool)) in maybe_create_worker()
1981 del_timer_sync(&pool->mayday_timer); in maybe_create_worker()
1982 spin_lock_irq(&pool->lock); in maybe_create_worker()
1988 if (need_to_create_worker(pool)) in maybe_create_worker()
2016 struct worker_pool *pool = worker->pool; in manage_workers() local
2018 if (pool->flags & POOL_MANAGER_ACTIVE) in manage_workers()
2021 pool->flags |= POOL_MANAGER_ACTIVE; in manage_workers()
2022 pool->manager = worker; in manage_workers()
2024 maybe_create_worker(pool); in manage_workers()
2026 pool->manager = NULL; in manage_workers()
2027 pool->flags &= ~POOL_MANAGER_ACTIVE; in manage_workers()
2047 __releases(&pool->lock) in process_one_work()
2048 __acquires(&pool->lock) in process_one_work()
2051 struct worker_pool *pool = worker->pool; in process_one_work() local
2068 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && in process_one_work()
2069 raw_smp_processor_id() != pool->cpu); in process_one_work()
2077 collision = find_worker_executing_work(pool, work); in process_one_work()
2085 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); in process_one_work()
2115 if (need_more_worker(pool)) in process_one_work()
2116 wake_up_worker(pool); in process_one_work()
2124 set_work_pool_and_clear_pending(work, pool->id); in process_one_work()
2126 spin_unlock_irq(&pool->lock); in process_one_work()
2181 spin_lock_irq(&pool->lock); in process_one_work()
2241 struct worker_pool *pool = worker->pool; in worker_thread() local
2246 spin_lock_irq(&pool->lock); in worker_thread()
2250 spin_unlock_irq(&pool->lock); in worker_thread()
2255 ida_simple_remove(&pool->worker_ida, worker->id); in worker_thread()
2264 if (!need_more_worker(pool)) in worker_thread()
2268 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) in worker_thread()
2289 list_first_entry(&pool->worklist, in worker_thread()
2292 pool->watchdog_ts = jiffies; in worker_thread()
2303 } while (keep_working(pool)); in worker_thread()
2316 spin_unlock_irq(&pool->lock); in worker_thread()
2375 struct worker_pool *pool = pwq->pool; in rescuer_thread() local
2384 worker_attach_to_pool(rescuer, pool); in rescuer_thread()
2386 spin_lock_irq(&pool->lock); in rescuer_thread()
2393 list_for_each_entry_safe(work, n, &pool->worklist, entry) { in rescuer_thread()
2396 pool->watchdog_ts = jiffies; in rescuer_thread()
2414 if (need_to_create_worker(pool)) { in rescuer_thread()
2433 if (need_more_worker(pool)) in rescuer_thread()
2434 wake_up_worker(pool); in rescuer_thread()
2436 spin_unlock_irq(&pool->lock); in rescuer_thread()
2608 struct worker_pool *pool = pwq->pool; in flush_workqueue_prep_pwqs() local
2610 spin_lock_irq(&pool->lock); in flush_workqueue_prep_pwqs()
2627 spin_unlock_irq(&pool->lock); in flush_workqueue_prep_pwqs()
2827 spin_lock_irq(&pwq->pool->lock); in drain_workqueue()
2829 spin_unlock_irq(&pwq->pool->lock); in drain_workqueue()
2853 struct worker_pool *pool; in start_flush_work() local
2859 pool = get_work_pool(work); in start_flush_work()
2860 if (!pool) { in start_flush_work()
2865 spin_lock(&pool->lock); in start_flush_work()
2869 if (unlikely(pwq->pool != pool)) in start_flush_work()
2872 worker = find_worker_executing_work(pool, work); in start_flush_work()
2881 spin_unlock_irq(&pool->lock); in start_flush_work()
2900 spin_unlock_irq(&pool->lock); in start_flush_work()
3288 static int init_worker_pool(struct worker_pool *pool) in init_worker_pool() argument
3290 spin_lock_init(&pool->lock); in init_worker_pool()
3291 pool->id = -1; in init_worker_pool()
3292 pool->cpu = -1; in init_worker_pool()
3293 pool->node = NUMA_NO_NODE; in init_worker_pool()
3294 pool->flags |= POOL_DISASSOCIATED; in init_worker_pool()
3295 pool->watchdog_ts = jiffies; in init_worker_pool()
3296 INIT_LIST_HEAD(&pool->worklist); in init_worker_pool()
3297 INIT_LIST_HEAD(&pool->idle_list); in init_worker_pool()
3298 hash_init(pool->busy_hash); in init_worker_pool()
3300 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE); in init_worker_pool()
3302 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0); in init_worker_pool()
3304 INIT_LIST_HEAD(&pool->workers); in init_worker_pool()
3306 ida_init(&pool->worker_ida); in init_worker_pool()
3307 INIT_HLIST_NODE(&pool->hash_node); in init_worker_pool()
3308 pool->refcnt = 1; in init_worker_pool()
3311 pool->attrs = alloc_workqueue_attrs(GFP_KERNEL); in init_worker_pool()
3312 if (!pool->attrs) in init_worker_pool()
3333 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); in rcu_free_pool() local
3335 ida_destroy(&pool->worker_ida); in rcu_free_pool()
3336 free_workqueue_attrs(pool->attrs); in rcu_free_pool()
3337 kfree(pool); in rcu_free_pool()
3351 static void put_unbound_pool(struct worker_pool *pool) in put_unbound_pool() argument
3358 if (--pool->refcnt) in put_unbound_pool()
3362 if (WARN_ON(!(pool->cpu < 0)) || in put_unbound_pool()
3363 WARN_ON(!list_empty(&pool->worklist))) in put_unbound_pool()
3367 if (pool->id >= 0) in put_unbound_pool()
3368 idr_remove(&worker_pool_idr, pool->id); in put_unbound_pool()
3369 hash_del(&pool->hash_node); in put_unbound_pool()
3376 spin_lock_irq(&pool->lock); in put_unbound_pool()
3378 !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock); in put_unbound_pool()
3379 pool->flags |= POOL_MANAGER_ACTIVE; in put_unbound_pool()
3381 while ((worker = first_idle_worker(pool))) in put_unbound_pool()
3383 WARN_ON(pool->nr_workers || pool->nr_idle); in put_unbound_pool()
3384 spin_unlock_irq(&pool->lock); in put_unbound_pool()
3387 if (!list_empty(&pool->workers)) in put_unbound_pool()
3388 pool->detach_completion = &detach_completion; in put_unbound_pool()
3391 if (pool->detach_completion) in put_unbound_pool()
3392 wait_for_completion(pool->detach_completion); in put_unbound_pool()
3395 del_timer_sync(&pool->idle_timer); in put_unbound_pool()
3396 del_timer_sync(&pool->mayday_timer); in put_unbound_pool()
3399 call_rcu_sched(&pool->rcu, rcu_free_pool); in put_unbound_pool()
3419 struct worker_pool *pool; in get_unbound_pool() local
3426 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { in get_unbound_pool()
3427 if (wqattrs_equal(pool->attrs, attrs)) { in get_unbound_pool()
3428 pool->refcnt++; in get_unbound_pool()
3429 return pool; in get_unbound_pool()
3445 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node); in get_unbound_pool()
3446 if (!pool || init_worker_pool(pool) < 0) in get_unbound_pool()
3449 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ in get_unbound_pool()
3450 copy_workqueue_attrs(pool->attrs, attrs); in get_unbound_pool()
3451 pool->node = target_node; in get_unbound_pool()
3457 pool->attrs->no_numa = false; in get_unbound_pool()
3459 if (worker_pool_assign_id(pool) < 0) in get_unbound_pool()
3463 if (wq_online && !create_worker(pool)) in get_unbound_pool()
3467 hash_add(unbound_pool_hash, &pool->hash_node, hash); in get_unbound_pool()
3469 return pool; in get_unbound_pool()
3471 if (pool) in get_unbound_pool()
3472 put_unbound_pool(pool); in get_unbound_pool()
3491 struct worker_pool *pool = pwq->pool; in pwq_unbound_release_workfn() local
3503 put_unbound_pool(pool); in pwq_unbound_release_workfn()
3538 spin_lock_irqsave(&pwq->pool->lock, flags); in pwq_adjust_max_active()
3556 wake_up_worker(pwq->pool); in pwq_adjust_max_active()
3561 spin_unlock_irqrestore(&pwq->pool->lock, flags); in pwq_adjust_max_active()
3566 struct worker_pool *pool) in init_pwq() argument
3572 pwq->pool = pool; in init_pwq()
3607 struct worker_pool *pool; in alloc_unbound_pwq() local
3612 pool = get_unbound_pool(attrs); in alloc_unbound_pwq()
3613 if (!pool) in alloc_unbound_pwq()
3616 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); in alloc_unbound_pwq()
3618 put_unbound_pool(pool); in alloc_unbound_pwq()
3622 init_pwq(pwq, wq, pool); in alloc_unbound_pwq()
3938 if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) { in wq_update_unbound_numa()
3939 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask)) in wq_update_unbound_numa()
3960 spin_lock_irq(&wq->dfl_pwq->pool->lock); in wq_update_unbound_numa()
3962 spin_unlock_irq(&wq->dfl_pwq->pool->lock); in wq_update_unbound_numa()
4338 struct worker_pool *pool; in work_busy() local
4346 pool = get_work_pool(work); in work_busy()
4347 if (pool) { in work_busy()
4348 spin_lock(&pool->lock); in work_busy()
4349 if (find_worker_executing_work(pool, work)) in work_busy()
4351 spin_unlock(&pool->lock); in work_busy()
4431 static void pr_cont_pool_info(struct worker_pool *pool) in pr_cont_pool_info() argument
4433 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); in pr_cont_pool_info()
4434 if (pool->node != NUMA_NO_NODE) in pr_cont_pool_info()
4435 pr_cont(" node=%d", pool->node); in pr_cont_pool_info()
4436 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice); in pr_cont_pool_info()
4455 struct worker_pool *pool = pwq->pool; in show_pwq() local
4461 pr_info(" pwq %d:", pool->id); in show_pwq()
4462 pr_cont_pool_info(pool); in show_pwq()
4467 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
4477 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
4492 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
4502 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
4533 struct worker_pool *pool; in show_workqueue_state() local
4557 spin_lock_irqsave(&pwq->pool->lock, flags); in show_workqueue_state()
4560 spin_unlock_irqrestore(&pwq->pool->lock, flags); in show_workqueue_state()
4570 for_each_pool(pool, pi) { in show_workqueue_state()
4574 spin_lock_irqsave(&pool->lock, flags); in show_workqueue_state()
4575 if (pool->nr_workers == pool->nr_idle) in show_workqueue_state()
4578 pr_info("pool %d:", pool->id); in show_workqueue_state()
4579 pr_cont_pool_info(pool); in show_workqueue_state()
4581 jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000, in show_workqueue_state()
4582 pool->nr_workers); in show_workqueue_state()
4583 if (pool->manager) in show_workqueue_state()
4585 task_pid_nr(pool->manager->task)); in show_workqueue_state()
4586 list_for_each_entry(worker, &pool->idle_list, entry) { in show_workqueue_state()
4593 spin_unlock_irqrestore(&pool->lock, flags); in show_workqueue_state()
4620 struct worker_pool *pool = worker->pool; in wq_worker_comm() local
4622 if (pool) { in wq_worker_comm()
4623 spin_lock_irq(&pool->lock); in wq_worker_comm()
4637 spin_unlock_irq(&pool->lock); in wq_worker_comm()
4663 struct worker_pool *pool; in unbind_workers() local
4666 for_each_cpu_worker_pool(pool, cpu) { in unbind_workers()
4668 spin_lock_irq(&pool->lock); in unbind_workers()
4677 for_each_pool_worker(worker, pool) in unbind_workers()
4680 pool->flags |= POOL_DISASSOCIATED; in unbind_workers()
4682 spin_unlock_irq(&pool->lock); in unbind_workers()
4701 atomic_set(&pool->nr_running, 0); in unbind_workers()
4708 spin_lock_irq(&pool->lock); in unbind_workers()
4709 wake_up_worker(pool); in unbind_workers()
4710 spin_unlock_irq(&pool->lock); in unbind_workers()
4720 static void rebind_workers(struct worker_pool *pool) in rebind_workers() argument
4733 for_each_pool_worker(worker, pool) in rebind_workers()
4735 pool->attrs->cpumask) < 0); in rebind_workers()
4737 spin_lock_irq(&pool->lock); in rebind_workers()
4739 pool->flags &= ~POOL_DISASSOCIATED; in rebind_workers()
4741 for_each_pool_worker(worker, pool) { in rebind_workers()
4776 spin_unlock_irq(&pool->lock); in rebind_workers()
4789 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) in restore_unbound_workers_cpumask() argument
4797 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) in restore_unbound_workers_cpumask()
4800 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); in restore_unbound_workers_cpumask()
4803 for_each_pool_worker(worker, pool) in restore_unbound_workers_cpumask()
4809 struct worker_pool *pool; in workqueue_prepare_cpu() local
4811 for_each_cpu_worker_pool(pool, cpu) { in workqueue_prepare_cpu()
4812 if (pool->nr_workers) in workqueue_prepare_cpu()
4814 if (!create_worker(pool)) in workqueue_prepare_cpu()
4822 struct worker_pool *pool; in workqueue_online_cpu() local
4828 for_each_pool(pool, pi) { in workqueue_online_cpu()
4831 if (pool->cpu == cpu) in workqueue_online_cpu()
4832 rebind_workers(pool); in workqueue_online_cpu()
4833 else if (pool->cpu < 0) in workqueue_online_cpu()
4834 restore_unbound_workers_cpumask(pool, cpu); in workqueue_online_cpu()
5197 unbound_pwq_by_node(wq, node)->pool->id); in wq_pool_ids_show()
5527 struct worker_pool *pool; in wq_watchdog_timer_fn() local
5535 for_each_pool(pool, pi) { in wq_watchdog_timer_fn()
5538 if (list_empty(&pool->worklist)) in wq_watchdog_timer_fn()
5542 pool_ts = READ_ONCE(pool->watchdog_ts); in wq_watchdog_timer_fn()
5550 if (pool->cpu >= 0) { in wq_watchdog_timer_fn()
5553 pool->cpu)); in wq_watchdog_timer_fn()
5562 pr_cont_pool_info(pool); in wq_watchdog_timer_fn()
5702 struct worker_pool *pool; in workqueue_init_early() local
5705 for_each_cpu_worker_pool(pool, cpu) { in workqueue_init_early()
5706 BUG_ON(init_worker_pool(pool)); in workqueue_init_early()
5707 pool->cpu = cpu; in workqueue_init_early()
5708 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); in workqueue_init_early()
5709 pool->attrs->nice = std_nice[i++]; in workqueue_init_early()
5710 pool->node = cpu_to_node(cpu); in workqueue_init_early()
5714 BUG_ON(worker_pool_assign_id(pool)); in workqueue_init_early()
5770 struct worker_pool *pool; in workqueue_init() local
5787 for_each_cpu_worker_pool(pool, cpu) { in workqueue_init()
5788 pool->node = cpu_to_node(cpu); in workqueue_init()
5803 for_each_cpu_worker_pool(pool, cpu) { in workqueue_init()
5804 pool->flags &= ~POOL_DISASSOCIATED; in workqueue_init()
5805 BUG_ON(!create_worker(pool)); in workqueue_init()
5809 hash_for_each(unbound_pool_hash, bkt, pool, hash_node) in workqueue_init()
5810 BUG_ON(!create_worker(pool)); in workqueue_init()