Lines Matching refs:pwq

378 static void show_pwq(struct pool_workqueue *pwq);
444 #define for_each_pwq(pwq, wq) \ argument
445 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
639 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, in set_work_pwq() argument
642 set_work_data(work, (unsigned long)pwq, in set_work_pwq()
1110 static void get_pwq(struct pool_workqueue *pwq) in get_pwq() argument
1112 lockdep_assert_held(&pwq->pool->lock); in get_pwq()
1113 WARN_ON_ONCE(pwq->refcnt <= 0); in get_pwq()
1114 pwq->refcnt++; in get_pwq()
1124 static void put_pwq(struct pool_workqueue *pwq) in put_pwq() argument
1126 lockdep_assert_held(&pwq->pool->lock); in put_pwq()
1127 if (likely(--pwq->refcnt)) in put_pwq()
1129 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND))) in put_pwq()
1139 schedule_work(&pwq->unbound_release_work); in put_pwq()
1148 static void put_pwq_unlocked(struct pool_workqueue *pwq) in put_pwq_unlocked() argument
1150 if (pwq) { in put_pwq_unlocked()
1155 raw_spin_lock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1156 put_pwq(pwq); in put_pwq_unlocked()
1157 raw_spin_unlock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1163 struct pool_workqueue *pwq = get_work_pwq(work); in pwq_activate_inactive_work() local
1166 if (list_empty(&pwq->pool->worklist)) in pwq_activate_inactive_work()
1167 pwq->pool->watchdog_ts = jiffies; in pwq_activate_inactive_work()
1168 move_linked_works(work, &pwq->pool->worklist, NULL); in pwq_activate_inactive_work()
1170 pwq->nr_active++; in pwq_activate_inactive_work()
1173 static void pwq_activate_first_inactive(struct pool_workqueue *pwq) in pwq_activate_first_inactive() argument
1175 struct work_struct *work = list_first_entry(&pwq->inactive_works, in pwq_activate_first_inactive()
1192 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data) in pwq_dec_nr_in_flight() argument
1197 pwq->nr_active--; in pwq_dec_nr_in_flight()
1198 if (!list_empty(&pwq->inactive_works)) { in pwq_dec_nr_in_flight()
1200 if (pwq->nr_active < pwq->max_active) in pwq_dec_nr_in_flight()
1201 pwq_activate_first_inactive(pwq); in pwq_dec_nr_in_flight()
1205 pwq->nr_in_flight[color]--; in pwq_dec_nr_in_flight()
1208 if (likely(pwq->flush_color != color)) in pwq_dec_nr_in_flight()
1212 if (pwq->nr_in_flight[color]) in pwq_dec_nr_in_flight()
1216 pwq->flush_color = -1; in pwq_dec_nr_in_flight()
1222 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) in pwq_dec_nr_in_flight()
1223 complete(&pwq->wq->first_flusher->done); in pwq_dec_nr_in_flight()
1225 put_pwq(pwq); in pwq_dec_nr_in_flight()
1262 struct pool_workqueue *pwq; in try_to_grab_pending() local
1301 pwq = get_work_pwq(work); in try_to_grab_pending()
1302 if (pwq && pwq->pool == pool) { in try_to_grab_pending()
1320 pwq_dec_nr_in_flight(pwq, *work_data_bits(work)); in try_to_grab_pending()
1352 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, in insert_work() argument
1355 struct worker_pool *pool = pwq->pool; in insert_work()
1361 set_work_pwq(work, pwq, extra_flags); in insert_work()
1363 get_pwq(pwq); in insert_work()
1421 struct pool_workqueue *pwq; in __queue_work() local
1446 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); in __queue_work()
1450 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in __queue_work()
1459 if (last_pool && last_pool != pwq->pool) { in __queue_work()
1467 pwq = worker->current_pwq; in __queue_work()
1471 raw_spin_lock(&pwq->pool->lock); in __queue_work()
1474 raw_spin_lock(&pwq->pool->lock); in __queue_work()
1485 if (unlikely(!pwq->refcnt)) { in __queue_work()
1487 raw_spin_unlock(&pwq->pool->lock); in __queue_work()
1497 trace_workqueue_queue_work(req_cpu, pwq, work); in __queue_work()
1502 pwq->nr_in_flight[pwq->work_color]++; in __queue_work()
1503 work_flags = work_color_to_flags(pwq->work_color); in __queue_work()
1505 if (likely(pwq->nr_active < pwq->max_active)) { in __queue_work()
1507 pwq->nr_active++; in __queue_work()
1508 worklist = &pwq->pool->worklist; in __queue_work()
1510 pwq->pool->watchdog_ts = jiffies; in __queue_work()
1513 worklist = &pwq->inactive_works; in __queue_work()
1517 insert_work(pwq, work, worklist, work_flags); in __queue_work()
1520 raw_spin_unlock(&pwq->pool->lock); in __queue_work()
2032 struct pool_workqueue *pwq = get_work_pwq(work); in send_mayday() local
2033 struct workqueue_struct *wq = pwq->wq; in send_mayday()
2041 if (list_empty(&pwq->mayday_node)) { in send_mayday()
2047 get_pwq(pwq); in send_mayday()
2048 list_add_tail(&pwq->mayday_node, &wq->maydays); in send_mayday()
2185 struct pool_workqueue *pwq = get_work_pwq(work); in process_one_work() local
2187 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; in process_one_work()
2223 worker->current_pwq = pwq; in process_one_work()
2231 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); in process_one_work()
2264 lock_map_acquire(&pwq->wq->lockdep_map); in process_one_work()
2296 lock_map_release(&pwq->wq->lockdep_map); in process_one_work()
2332 pwq_dec_nr_in_flight(pwq, work_data); in process_one_work()
2513 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, in rescuer_thread() local
2515 struct worker_pool *pool = pwq->pool; in rescuer_thread()
2520 list_del_init(&pwq->mayday_node); in rescuer_thread()
2534 if (get_work_pwq(work) == pwq) { in rescuer_thread()
2554 if (pwq->nr_active && need_to_create_worker(pool)) { in rescuer_thread()
2560 if (wq->rescuer && list_empty(&pwq->mayday_node)) { in rescuer_thread()
2561 get_pwq(pwq); in rescuer_thread()
2562 list_add_tail(&pwq->mayday_node, &wq->maydays); in rescuer_thread()
2572 put_pwq(pwq); in rescuer_thread()
2671 static void insert_wq_barrier(struct pool_workqueue *pwq, in insert_wq_barrier() argument
2712 pwq->nr_in_flight[work_color]++; in insert_wq_barrier()
2716 insert_work(pwq, &barr->work, head, work_flags); in insert_wq_barrier()
2754 struct pool_workqueue *pwq; in flush_workqueue_prep_pwqs() local
2761 for_each_pwq(pwq, wq) { in flush_workqueue_prep_pwqs()
2762 struct worker_pool *pool = pwq->pool; in flush_workqueue_prep_pwqs()
2767 WARN_ON_ONCE(pwq->flush_color != -1); in flush_workqueue_prep_pwqs()
2769 if (pwq->nr_in_flight[flush_color]) { in flush_workqueue_prep_pwqs()
2770 pwq->flush_color = flush_color; in flush_workqueue_prep_pwqs()
2777 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); in flush_workqueue_prep_pwqs()
2778 pwq->work_color = work_color; in flush_workqueue_prep_pwqs()
2962 struct pool_workqueue *pwq; in drain_workqueue() local
2978 for_each_pwq(pwq, wq) { in drain_workqueue()
2981 raw_spin_lock_irq(&pwq->pool->lock); in drain_workqueue()
2982 drained = !pwq->nr_active && list_empty(&pwq->inactive_works); in drain_workqueue()
2983 raw_spin_unlock_irq(&pwq->pool->lock); in drain_workqueue()
3008 struct pool_workqueue *pwq; in start_flush_work() local
3021 pwq = get_work_pwq(work); in start_flush_work()
3022 if (pwq) { in start_flush_work()
3023 if (unlikely(pwq->pool != pool)) in start_flush_work()
3029 pwq = worker->current_pwq; in start_flush_work()
3032 check_flush_dependency(pwq->wq, work); in start_flush_work()
3034 insert_wq_barrier(pwq, barr, work, worker); in start_flush_work()
3047 (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) { in start_flush_work()
3048 lock_map_acquire(&pwq->wq->lockdep_map); in start_flush_work()
3049 lock_map_release(&pwq->wq->lockdep_map); in start_flush_work()
3704 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, in pwq_unbound_release_workfn() local
3706 struct workqueue_struct *wq = pwq->wq; in pwq_unbound_release_workfn()
3707 struct worker_pool *pool = pwq->pool; in pwq_unbound_release_workfn()
3714 if (!list_empty(&pwq->pwqs_node)) { in pwq_unbound_release_workfn()
3719 list_del_rcu(&pwq->pwqs_node); in pwq_unbound_release_workfn()
3728 call_rcu(&pwq->rcu, rcu_free_pwq); in pwq_unbound_release_workfn()
3748 static void pwq_adjust_max_active(struct pool_workqueue *pwq) in pwq_adjust_max_active() argument
3750 struct workqueue_struct *wq = pwq->wq; in pwq_adjust_max_active()
3758 if (!freezable && pwq->max_active == wq->saved_max_active) in pwq_adjust_max_active()
3762 raw_spin_lock_irqsave(&pwq->pool->lock, flags); in pwq_adjust_max_active()
3772 pwq->max_active = wq->saved_max_active; in pwq_adjust_max_active()
3774 while (!list_empty(&pwq->inactive_works) && in pwq_adjust_max_active()
3775 pwq->nr_active < pwq->max_active) { in pwq_adjust_max_active()
3776 pwq_activate_first_inactive(pwq); in pwq_adjust_max_active()
3787 wake_up_worker(pwq->pool); in pwq_adjust_max_active()
3789 pwq->max_active = 0; in pwq_adjust_max_active()
3792 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); in pwq_adjust_max_active()
3796 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, in init_pwq() argument
3799 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); in init_pwq()
3801 memset(pwq, 0, sizeof(*pwq)); in init_pwq()
3803 pwq->pool = pool; in init_pwq()
3804 pwq->wq = wq; in init_pwq()
3805 pwq->flush_color = -1; in init_pwq()
3806 pwq->refcnt = 1; in init_pwq()
3807 INIT_LIST_HEAD(&pwq->inactive_works); in init_pwq()
3808 INIT_LIST_HEAD(&pwq->pwqs_node); in init_pwq()
3809 INIT_LIST_HEAD(&pwq->mayday_node); in init_pwq()
3810 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); in init_pwq()
3814 static void link_pwq(struct pool_workqueue *pwq) in link_pwq() argument
3816 struct workqueue_struct *wq = pwq->wq; in link_pwq()
3821 if (!list_empty(&pwq->pwqs_node)) in link_pwq()
3825 pwq->work_color = wq->work_color; in link_pwq()
3828 pwq_adjust_max_active(pwq); in link_pwq()
3831 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); in link_pwq()
3839 struct pool_workqueue *pwq; in alloc_unbound_pwq() local
3847 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); in alloc_unbound_pwq()
3848 if (!pwq) { in alloc_unbound_pwq()
3853 init_pwq(pwq, wq, pool); in alloc_unbound_pwq()
3854 return pwq; in alloc_unbound_pwq()
3912 struct pool_workqueue *pwq) in numa_pwq_tbl_install() argument
3920 link_pwq(pwq); in numa_pwq_tbl_install()
3923 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq); in numa_pwq_tbl_install()
4145 struct pool_workqueue *old_pwq = NULL, *pwq; in wq_update_unbound_numa() local
4164 pwq = unbound_pwq_by_node(wq, node); in wq_update_unbound_numa()
4173 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask)) in wq_update_unbound_numa()
4180 pwq = alloc_unbound_pwq(wq, target_attrs); in wq_update_unbound_numa()
4181 if (!pwq) { in wq_update_unbound_numa()
4189 old_pwq = numa_pwq_tbl_install(wq, node, pwq); in wq_update_unbound_numa()
4214 struct pool_workqueue *pwq = in alloc_and_link_pwqs() local
4219 init_pwq(pwq, wq, &cpu_pools[highpri]); in alloc_and_link_pwqs()
4222 link_pwq(pwq); in alloc_and_link_pwqs()
4294 struct pool_workqueue *pwq; in alloc_workqueue() local
4361 for_each_pwq(pwq, wq) in alloc_workqueue()
4362 pwq_adjust_max_active(pwq); in alloc_workqueue()
4384 static bool pwq_busy(struct pool_workqueue *pwq) in pwq_busy() argument
4389 if (pwq->nr_in_flight[i]) in pwq_busy()
4392 if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1)) in pwq_busy()
4394 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) in pwq_busy()
4408 struct pool_workqueue *pwq; in destroy_workqueue() local
4440 for_each_pwq(pwq, wq) { in destroy_workqueue()
4441 raw_spin_lock_irq(&pwq->pool->lock); in destroy_workqueue()
4442 if (WARN_ON(pwq_busy(pwq))) { in destroy_workqueue()
4445 show_pwq(pwq); in destroy_workqueue()
4446 raw_spin_unlock_irq(&pwq->pool->lock); in destroy_workqueue()
4452 raw_spin_unlock_irq(&pwq->pool->lock); in destroy_workqueue()
4477 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); in destroy_workqueue()
4479 put_pwq_unlocked(pwq); in destroy_workqueue()
4486 pwq = wq->dfl_pwq; in destroy_workqueue()
4488 put_pwq_unlocked(pwq); in destroy_workqueue()
4505 struct pool_workqueue *pwq; in workqueue_set_max_active() local
4518 for_each_pwq(pwq, wq) in workqueue_set_max_active()
4519 pwq_adjust_max_active(pwq); in workqueue_set_max_active()
4576 struct pool_workqueue *pwq; in workqueue_congested() local
4586 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in workqueue_congested()
4588 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); in workqueue_congested()
4590 ret = !list_empty(&pwq->inactive_works); in workqueue_congested()
4673 struct pool_workqueue *pwq = NULL; in print_worker_info() local
4691 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq)); in print_worker_info()
4692 copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq)); in print_worker_info()
4726 static void show_pwq(struct pool_workqueue *pwq) in show_pwq() argument
4728 struct worker_pool *pool = pwq->pool; in show_pwq()
4738 pwq->nr_active, pwq->max_active, pwq->refcnt, in show_pwq()
4739 !list_empty(&pwq->mayday_node) ? " MAYDAY" : ""); in show_pwq()
4742 if (worker->current_pwq == pwq) { in show_pwq()
4752 if (worker->current_pwq != pwq) in show_pwq()
4767 if (get_work_pwq(work) == pwq) { in show_pwq()
4777 if (get_work_pwq(work) != pwq) in show_pwq()
4786 if (!list_empty(&pwq->inactive_works)) { in show_pwq()
4790 list_for_each_entry(work, &pwq->inactive_works, entry) { in show_pwq()
4804 struct pool_workqueue *pwq; in show_one_workqueue() local
4808 for_each_pwq(pwq, wq) { in show_one_workqueue()
4809 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { in show_one_workqueue()
4819 for_each_pwq(pwq, wq) { in show_one_workqueue()
4820 raw_spin_lock_irqsave(&pwq->pool->lock, flags); in show_one_workqueue()
4821 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { in show_one_workqueue()
4828 show_pwq(pwq); in show_one_workqueue()
4831 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); in show_one_workqueue()
5240 struct pool_workqueue *pwq; in freeze_workqueues_begin() local
5249 for_each_pwq(pwq, wq) in freeze_workqueues_begin()
5250 pwq_adjust_max_active(pwq); in freeze_workqueues_begin()
5274 struct pool_workqueue *pwq; in freeze_workqueues_busy() local
5288 for_each_pwq(pwq, wq) { in freeze_workqueues_busy()
5289 WARN_ON_ONCE(pwq->nr_active < 0); in freeze_workqueues_busy()
5290 if (pwq->nr_active) { in freeze_workqueues_busy()
5315 struct pool_workqueue *pwq; in thaw_workqueues() local
5327 for_each_pwq(pwq, wq) in thaw_workqueues()
5328 pwq_adjust_max_active(pwq); in thaw_workqueues()