Lines Matching refs:pwq

359 static void show_pwq(struct pool_workqueue *pwq);
424 #define for_each_pwq(pwq, wq) \ argument
425 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
619 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, in set_work_pwq() argument
622 set_work_data(work, (unsigned long)pwq, in set_work_pwq()
1087 static void get_pwq(struct pool_workqueue *pwq) in get_pwq() argument
1089 lockdep_assert_held(&pwq->pool->lock); in get_pwq()
1090 WARN_ON_ONCE(pwq->refcnt <= 0); in get_pwq()
1091 pwq->refcnt++; in get_pwq()
1101 static void put_pwq(struct pool_workqueue *pwq) in put_pwq() argument
1103 lockdep_assert_held(&pwq->pool->lock); in put_pwq()
1104 if (likely(--pwq->refcnt)) in put_pwq()
1106 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND))) in put_pwq()
1116 schedule_work(&pwq->unbound_release_work); in put_pwq()
1125 static void put_pwq_unlocked(struct pool_workqueue *pwq) in put_pwq_unlocked() argument
1127 if (pwq) { in put_pwq_unlocked()
1132 raw_spin_lock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1133 put_pwq(pwq); in put_pwq_unlocked()
1134 raw_spin_unlock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1140 struct pool_workqueue *pwq = get_work_pwq(work); in pwq_activate_delayed_work() local
1143 if (list_empty(&pwq->pool->worklist)) in pwq_activate_delayed_work()
1144 pwq->pool->watchdog_ts = jiffies; in pwq_activate_delayed_work()
1145 move_linked_works(work, &pwq->pool->worklist, NULL); in pwq_activate_delayed_work()
1147 pwq->nr_active++; in pwq_activate_delayed_work()
1150 static void pwq_activate_first_delayed(struct pool_workqueue *pwq) in pwq_activate_first_delayed() argument
1152 struct work_struct *work = list_first_entry(&pwq->delayed_works, in pwq_activate_first_delayed()
1169 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color) in pwq_dec_nr_in_flight() argument
1175 pwq->nr_in_flight[color]--; in pwq_dec_nr_in_flight()
1177 pwq->nr_active--; in pwq_dec_nr_in_flight()
1178 if (!list_empty(&pwq->delayed_works)) { in pwq_dec_nr_in_flight()
1180 if (pwq->nr_active < pwq->max_active) in pwq_dec_nr_in_flight()
1181 pwq_activate_first_delayed(pwq); in pwq_dec_nr_in_flight()
1185 if (likely(pwq->flush_color != color)) in pwq_dec_nr_in_flight()
1189 if (pwq->nr_in_flight[color]) in pwq_dec_nr_in_flight()
1193 pwq->flush_color = -1; in pwq_dec_nr_in_flight()
1199 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) in pwq_dec_nr_in_flight()
1200 complete(&pwq->wq->first_flusher->done); in pwq_dec_nr_in_flight()
1202 put_pwq(pwq); in pwq_dec_nr_in_flight()
1239 struct pool_workqueue *pwq; in try_to_grab_pending() local
1278 pwq = get_work_pwq(work); in try_to_grab_pending()
1279 if (pwq && pwq->pool == pool) { in try_to_grab_pending()
1293 pwq_dec_nr_in_flight(pwq, get_work_color(work)); in try_to_grab_pending()
1325 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, in insert_work() argument
1328 struct worker_pool *pool = pwq->pool; in insert_work()
1331 set_work_pwq(work, pwq, extra_flags); in insert_work()
1333 get_pwq(pwq); in insert_work()
1398 struct pool_workqueue *pwq; in __queue_work() local
1424 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); in __queue_work()
1428 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in __queue_work()
1437 if (last_pool && last_pool != pwq->pool) { in __queue_work()
1445 pwq = worker->current_pwq; in __queue_work()
1449 raw_spin_lock(&pwq->pool->lock); in __queue_work()
1452 raw_spin_lock(&pwq->pool->lock); in __queue_work()
1463 if (unlikely(!pwq->refcnt)) { in __queue_work()
1465 raw_spin_unlock(&pwq->pool->lock); in __queue_work()
1475 trace_workqueue_queue_work(req_cpu, pwq, work); in __queue_work()
1480 pwq->nr_in_flight[pwq->work_color]++; in __queue_work()
1481 work_flags = work_color_to_flags(pwq->work_color); in __queue_work()
1483 if (likely(pwq->nr_active < pwq->max_active)) { in __queue_work()
1485 pwq->nr_active++; in __queue_work()
1486 worklist = &pwq->pool->worklist; in __queue_work()
1488 pwq->pool->watchdog_ts = jiffies; in __queue_work()
1491 worklist = &pwq->delayed_works; in __queue_work()
1494 insert_work(pwq, work, worklist, work_flags); in __queue_work()
1497 raw_spin_unlock(&pwq->pool->lock); in __queue_work()
2016 struct pool_workqueue *pwq = get_work_pwq(work); in send_mayday() local
2017 struct workqueue_struct *wq = pwq->wq; in send_mayday()
2025 if (list_empty(&pwq->mayday_node)) { in send_mayday()
2031 get_pwq(pwq); in send_mayday()
2032 list_add_tail(&pwq->mayday_node, &wq->maydays); in send_mayday()
2169 struct pool_workqueue *pwq = get_work_pwq(work); in process_one_work() local
2171 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; in process_one_work()
2207 worker->current_pwq = pwq; in process_one_work()
2214 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); in process_one_work()
2247 lock_map_acquire(&pwq->wq->lockdep_map); in process_one_work()
2279 lock_map_release(&pwq->wq->lockdep_map); in process_one_work()
2314 pwq_dec_nr_in_flight(pwq, work_color); in process_one_work()
2495 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, in rescuer_thread() local
2497 struct worker_pool *pool = pwq->pool; in rescuer_thread()
2502 list_del_init(&pwq->mayday_node); in rescuer_thread()
2516 if (get_work_pwq(work) == pwq) { in rescuer_thread()
2536 if (pwq->nr_active && need_to_create_worker(pool)) { in rescuer_thread()
2542 if (wq->rescuer && list_empty(&pwq->mayday_node)) { in rescuer_thread()
2543 get_pwq(pwq); in rescuer_thread()
2544 list_add_tail(&pwq->mayday_node, &wq->maydays); in rescuer_thread()
2554 put_pwq(pwq); in rescuer_thread()
2653 static void insert_wq_barrier(struct pool_workqueue *pwq, in insert_wq_barrier() argument
2689 insert_work(pwq, &barr->work, head, in insert_wq_barrier()
2728 struct pool_workqueue *pwq; in flush_workqueue_prep_pwqs() local
2735 for_each_pwq(pwq, wq) { in flush_workqueue_prep_pwqs()
2736 struct worker_pool *pool = pwq->pool; in flush_workqueue_prep_pwqs()
2741 WARN_ON_ONCE(pwq->flush_color != -1); in flush_workqueue_prep_pwqs()
2743 if (pwq->nr_in_flight[flush_color]) { in flush_workqueue_prep_pwqs()
2744 pwq->flush_color = flush_color; in flush_workqueue_prep_pwqs()
2751 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); in flush_workqueue_prep_pwqs()
2752 pwq->work_color = work_color; in flush_workqueue_prep_pwqs()
2936 struct pool_workqueue *pwq; in drain_workqueue() local
2952 for_each_pwq(pwq, wq) { in drain_workqueue()
2955 raw_spin_lock_irq(&pwq->pool->lock); in drain_workqueue()
2956 drained = !pwq->nr_active && list_empty(&pwq->delayed_works); in drain_workqueue()
2957 raw_spin_unlock_irq(&pwq->pool->lock); in drain_workqueue()
2982 struct pool_workqueue *pwq; in start_flush_work() local
2995 pwq = get_work_pwq(work); in start_flush_work()
2996 if (pwq) { in start_flush_work()
2997 if (unlikely(pwq->pool != pool)) in start_flush_work()
3003 pwq = worker->current_pwq; in start_flush_work()
3006 check_flush_dependency(pwq->wq, work); in start_flush_work()
3008 insert_wq_barrier(pwq, barr, work, worker); in start_flush_work()
3021 (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) { in start_flush_work()
3022 lock_map_acquire(&pwq->wq->lockdep_map); in start_flush_work()
3023 lock_map_release(&pwq->wq->lockdep_map); in start_flush_work()
3671 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, in pwq_unbound_release_workfn() local
3673 struct workqueue_struct *wq = pwq->wq; in pwq_unbound_release_workfn()
3674 struct worker_pool *pool = pwq->pool; in pwq_unbound_release_workfn()
3681 list_del_rcu(&pwq->pwqs_node); in pwq_unbound_release_workfn()
3689 call_rcu(&pwq->rcu, rcu_free_pwq); in pwq_unbound_release_workfn()
3709 static void pwq_adjust_max_active(struct pool_workqueue *pwq) in pwq_adjust_max_active() argument
3711 struct workqueue_struct *wq = pwq->wq; in pwq_adjust_max_active()
3719 if (!freezable && pwq->max_active == wq->saved_max_active) in pwq_adjust_max_active()
3723 raw_spin_lock_irqsave(&pwq->pool->lock, flags); in pwq_adjust_max_active()
3731 pwq->max_active = wq->saved_max_active; in pwq_adjust_max_active()
3733 while (!list_empty(&pwq->delayed_works) && in pwq_adjust_max_active()
3734 pwq->nr_active < pwq->max_active) in pwq_adjust_max_active()
3735 pwq_activate_first_delayed(pwq); in pwq_adjust_max_active()
3741 wake_up_worker(pwq->pool); in pwq_adjust_max_active()
3743 pwq->max_active = 0; in pwq_adjust_max_active()
3746 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); in pwq_adjust_max_active()
3750 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, in init_pwq() argument
3753 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); in init_pwq()
3755 memset(pwq, 0, sizeof(*pwq)); in init_pwq()
3757 pwq->pool = pool; in init_pwq()
3758 pwq->wq = wq; in init_pwq()
3759 pwq->flush_color = -1; in init_pwq()
3760 pwq->refcnt = 1; in init_pwq()
3761 INIT_LIST_HEAD(&pwq->delayed_works); in init_pwq()
3762 INIT_LIST_HEAD(&pwq->pwqs_node); in init_pwq()
3763 INIT_LIST_HEAD(&pwq->mayday_node); in init_pwq()
3764 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); in init_pwq()
3768 static void link_pwq(struct pool_workqueue *pwq) in link_pwq() argument
3770 struct workqueue_struct *wq = pwq->wq; in link_pwq()
3775 if (!list_empty(&pwq->pwqs_node)) in link_pwq()
3779 pwq->work_color = wq->work_color; in link_pwq()
3782 pwq_adjust_max_active(pwq); in link_pwq()
3785 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); in link_pwq()
3793 struct pool_workqueue *pwq; in alloc_unbound_pwq() local
3801 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); in alloc_unbound_pwq()
3802 if (!pwq) { in alloc_unbound_pwq()
3807 init_pwq(pwq, wq, pool); in alloc_unbound_pwq()
3808 return pwq; in alloc_unbound_pwq()
3866 struct pool_workqueue *pwq) in numa_pwq_tbl_install() argument
3874 link_pwq(pwq); in numa_pwq_tbl_install()
3877 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq); in numa_pwq_tbl_install()
4099 struct pool_workqueue *old_pwq = NULL, *pwq; in wq_update_unbound_numa() local
4118 pwq = unbound_pwq_by_node(wq, node); in wq_update_unbound_numa()
4127 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask)) in wq_update_unbound_numa()
4134 pwq = alloc_unbound_pwq(wq, target_attrs); in wq_update_unbound_numa()
4135 if (!pwq) { in wq_update_unbound_numa()
4143 old_pwq = numa_pwq_tbl_install(wq, node, pwq); in wq_update_unbound_numa()
4168 struct pool_workqueue *pwq = in alloc_and_link_pwqs() local
4173 init_pwq(pwq, wq, &cpu_pools[highpri]); in alloc_and_link_pwqs()
4176 link_pwq(pwq); in alloc_and_link_pwqs()
4248 struct pool_workqueue *pwq; in alloc_workqueue() local
4315 for_each_pwq(pwq, wq) in alloc_workqueue()
4316 pwq_adjust_max_active(pwq); in alloc_workqueue()
4338 static bool pwq_busy(struct pool_workqueue *pwq) in pwq_busy() argument
4343 if (pwq->nr_in_flight[i]) in pwq_busy()
4346 if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1)) in pwq_busy()
4348 if (pwq->nr_active || !list_empty(&pwq->delayed_works)) in pwq_busy()
4362 struct pool_workqueue *pwq; in destroy_workqueue() local
4394 for_each_pwq(pwq, wq) { in destroy_workqueue()
4395 raw_spin_lock_irq(&pwq->pool->lock); in destroy_workqueue()
4396 if (WARN_ON(pwq_busy(pwq))) { in destroy_workqueue()
4399 show_pwq(pwq); in destroy_workqueue()
4400 raw_spin_unlock_irq(&pwq->pool->lock); in destroy_workqueue()
4406 raw_spin_unlock_irq(&pwq->pool->lock); in destroy_workqueue()
4431 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); in destroy_workqueue()
4433 put_pwq_unlocked(pwq); in destroy_workqueue()
4440 pwq = wq->dfl_pwq; in destroy_workqueue()
4442 put_pwq_unlocked(pwq); in destroy_workqueue()
4459 struct pool_workqueue *pwq; in workqueue_set_max_active() local
4472 for_each_pwq(pwq, wq) in workqueue_set_max_active()
4473 pwq_adjust_max_active(pwq); in workqueue_set_max_active()
4530 struct pool_workqueue *pwq; in workqueue_congested() local
4540 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in workqueue_congested()
4542 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); in workqueue_congested()
4544 ret = !list_empty(&pwq->delayed_works); in workqueue_congested()
4627 struct pool_workqueue *pwq = NULL; in print_worker_info() local
4645 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq)); in print_worker_info()
4646 copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq)); in print_worker_info()
4680 static void show_pwq(struct pool_workqueue *pwq) in show_pwq() argument
4682 struct worker_pool *pool = pwq->pool; in show_pwq()
4692 pwq->nr_active, pwq->max_active, pwq->refcnt, in show_pwq()
4693 !list_empty(&pwq->mayday_node) ? " MAYDAY" : ""); in show_pwq()
4696 if (worker->current_pwq == pwq) { in show_pwq()
4706 if (worker->current_pwq != pwq) in show_pwq()
4721 if (get_work_pwq(work) == pwq) { in show_pwq()
4731 if (get_work_pwq(work) != pwq) in show_pwq()
4740 if (!list_empty(&pwq->delayed_works)) { in show_pwq()
4744 list_for_each_entry(work, &pwq->delayed_works, entry) { in show_pwq()
4770 struct pool_workqueue *pwq; in show_workqueue_state() local
4773 for_each_pwq(pwq, wq) { in show_workqueue_state()
4774 if (pwq->nr_active || !list_empty(&pwq->delayed_works)) { in show_workqueue_state()
4784 for_each_pwq(pwq, wq) { in show_workqueue_state()
4785 raw_spin_lock_irqsave(&pwq->pool->lock, flags); in show_workqueue_state()
4786 if (pwq->nr_active || !list_empty(&pwq->delayed_works)) in show_workqueue_state()
4787 show_pwq(pwq); in show_workqueue_state()
4788 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); in show_workqueue_state()
5170 struct pool_workqueue *pwq; in freeze_workqueues_begin() local
5179 for_each_pwq(pwq, wq) in freeze_workqueues_begin()
5180 pwq_adjust_max_active(pwq); in freeze_workqueues_begin()
5204 struct pool_workqueue *pwq; in freeze_workqueues_busy() local
5218 for_each_pwq(pwq, wq) { in freeze_workqueues_busy()
5219 WARN_ON_ONCE(pwq->nr_active < 0); in freeze_workqueues_busy()
5220 if (pwq->nr_active) { in freeze_workqueues_busy()
5245 struct pool_workqueue *pwq; in thaw_workqueues() local
5257 for_each_pwq(pwq, wq) in thaw_workqueues()
5258 pwq_adjust_max_active(pwq); in thaw_workqueues()