Lines Matching +refs:dev +refs:id +refs:attrs
157 int id; /* I: pool ID */ member
193 struct workqueue_attrs *attrs; /* I: worker attributes */ member
623 pool->id = ret; in worker_pool_assign_id()
793 return work_struct_pwq(data)->pool->id; in get_work_pool_id()
1134 if (!pool->attrs->affn_strict && in kick_pool()
1135 !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) { in kick_pool()
1138 p->wake_cpu = cpumask_any_distribute(pool->attrs->__pod_cpumask); in kick_pool()
1612 set_work_pool_and_keep_pending(work, pool->id); in try_to_grab_pending()
2084 if (pool->cpu < 0 && pool->attrs->affn_strict) in pool_allowed_cpus()
2085 return pool->attrs->__pod_cpumask; in pool_allowed_cpus()
2087 return pool->attrs->cpumask; in pool_allowed_cpus()
2168 int id; in create_worker() local
2172 id = ida_alloc(&pool->worker_ida, GFP_KERNEL); in create_worker()
2173 if (id < 0) { in create_worker()
2175 ERR_PTR(id)); in create_worker()
2185 worker->id = id; in create_worker()
2188 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, in create_worker()
2189 pool->attrs->nice < 0 ? "H" : ""); in create_worker()
2191 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); in create_worker()
2206 set_user_nice(worker->task, pool->attrs->nice); in create_worker()
2231 ida_free(&pool->worker_ida, id); in create_worker()
2600 set_work_pool_and_clear_pending(work, pool->id); in process_one_work()
2745 ida_free(&pool->worker_ida, worker->id); in worker_thread()
3708 void free_workqueue_attrs(struct workqueue_attrs *attrs) in free_workqueue_attrs() argument
3710 if (attrs) { in free_workqueue_attrs()
3711 free_cpumask_var(attrs->cpumask); in free_workqueue_attrs()
3712 free_cpumask_var(attrs->__pod_cpumask); in free_workqueue_attrs()
3713 kfree(attrs); in free_workqueue_attrs()
3727 struct workqueue_attrs *attrs; in alloc_workqueue_attrs() local
3729 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); in alloc_workqueue_attrs()
3730 if (!attrs) in alloc_workqueue_attrs()
3732 if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL)) in alloc_workqueue_attrs()
3734 if (!alloc_cpumask_var(&attrs->__pod_cpumask, GFP_KERNEL)) in alloc_workqueue_attrs()
3737 cpumask_copy(attrs->cpumask, cpu_possible_mask); in alloc_workqueue_attrs()
3738 attrs->affn_scope = WQ_AFFN_DFL; in alloc_workqueue_attrs()
3739 return attrs; in alloc_workqueue_attrs()
3741 free_workqueue_attrs(attrs); in alloc_workqueue_attrs()
3766 static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs) in wqattrs_clear_for_pool() argument
3768 attrs->affn_scope = WQ_AFFN_NR_TYPES; in wqattrs_clear_for_pool()
3769 attrs->ordered = false; in wqattrs_clear_for_pool()
3773 static u32 wqattrs_hash(const struct workqueue_attrs *attrs) in wqattrs_hash() argument
3777 hash = jhash_1word(attrs->nice, hash); in wqattrs_hash()
3778 hash = jhash(cpumask_bits(attrs->cpumask), in wqattrs_hash()
3780 hash = jhash(cpumask_bits(attrs->__pod_cpumask), in wqattrs_hash()
3782 hash = jhash_1word(attrs->affn_strict, hash); in wqattrs_hash()
3802 static void wqattrs_actualize_cpumask(struct workqueue_attrs *attrs, in wqattrs_actualize_cpumask() argument
3810 cpumask_and(attrs->cpumask, attrs->cpumask, unbound_cpumask); in wqattrs_actualize_cpumask()
3811 if (unlikely(cpumask_empty(attrs->cpumask))) in wqattrs_actualize_cpumask()
3812 cpumask_copy(attrs->cpumask, unbound_cpumask); in wqattrs_actualize_cpumask()
3817 wqattrs_pod_type(const struct workqueue_attrs *attrs) in wqattrs_pod_type() argument
3825 if (attrs->affn_scope == WQ_AFFN_DFL) in wqattrs_pod_type()
3828 scope = attrs->affn_scope; in wqattrs_pod_type()
3832 if (!WARN_ON_ONCE(attrs->affn_scope == WQ_AFFN_NR_TYPES) && in wqattrs_pod_type()
3858 pool->id = -1; in init_worker_pool()
3880 pool->attrs = alloc_workqueue_attrs(); in init_worker_pool()
3881 if (!pool->attrs) in init_worker_pool()
3884 wqattrs_clear_for_pool(pool->attrs); in init_worker_pool()
3943 free_workqueue_attrs(pool->attrs); in rcu_free_pool()
3975 if (pool->id >= 0) in put_unbound_pool()
3976 idr_remove(&worker_pool_idr, pool->id); in put_unbound_pool()
4044 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) in get_unbound_pool() argument
4047 u32 hash = wqattrs_hash(attrs); in get_unbound_pool()
4055 if (wqattrs_equal(pool->attrs, attrs)) { in get_unbound_pool()
4063 if (cpumask_subset(attrs->__pod_cpumask, pt->pod_cpus[pod])) { in get_unbound_pool()
4075 copy_workqueue_attrs(pool->attrs, attrs); in get_unbound_pool()
4076 wqattrs_clear_for_pool(pool->attrs); in get_unbound_pool()
4227 const struct workqueue_attrs *attrs) in alloc_unbound_pwq() argument
4234 pool = get_unbound_pool(attrs); in alloc_unbound_pwq()
4264 static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu, in wq_calc_pod_cpumask() argument
4267 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); in wq_calc_pod_cpumask()
4271 cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask); in wq_calc_pod_cpumask()
4272 cpumask_and(attrs->__pod_cpumask, attrs->__pod_cpumask, cpu_online_mask); in wq_calc_pod_cpumask()
4274 cpumask_clear_cpu(cpu_going_down, attrs->__pod_cpumask); in wq_calc_pod_cpumask()
4276 if (cpumask_empty(attrs->__pod_cpumask)) { in wq_calc_pod_cpumask()
4277 cpumask_copy(attrs->__pod_cpumask, attrs->cpumask); in wq_calc_pod_cpumask()
4282 cpumask_and(attrs->__pod_cpumask, attrs->cpumask, pt->pod_cpus[pod]); in wq_calc_pod_cpumask()
4284 if (cpumask_empty(attrs->__pod_cpumask)) in wq_calc_pod_cpumask()
4309 struct workqueue_attrs *attrs; /* attrs to apply */ member
4325 free_workqueue_attrs(ctx->attrs); in apply_wqattrs_cleanup()
4334 const struct workqueue_attrs *attrs, in apply_wqattrs_prepare() argument
4343 if (WARN_ON(attrs->affn_scope < 0 || in apply_wqattrs_prepare()
4344 attrs->affn_scope >= WQ_AFFN_NR_TYPES)) in apply_wqattrs_prepare()
4358 copy_workqueue_attrs(new_attrs, attrs); in apply_wqattrs_prepare()
4378 copy_workqueue_attrs(new_attrs, attrs); in apply_wqattrs_prepare()
4381 ctx->attrs = new_attrs; in apply_wqattrs_prepare()
4400 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); in apply_wqattrs_commit()
4428 const struct workqueue_attrs *attrs) in apply_workqueue_attrs_locked() argument
4444 ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask); in apply_workqueue_attrs_locked()
4473 const struct workqueue_attrs *attrs) in apply_workqueue_attrs() argument
4480 ret = apply_workqueue_attrs_locked(wq, attrs); in apply_workqueue_attrs()
4534 if (wqattrs_equal(target_attrs, pwq->pool->attrs)) in wq_update_pod()
5079 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); in pr_cont_pool_info()
5082 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice); in pr_cont_pool_info()
5138 pr_info(" pwq %d:", pool->id); in show_pwq()
5274 pr_info("pool %d:", pool->id); in show_one_worker_pool()
5523 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) in restore_unbound_workers_cpumask()
5526 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); in restore_unbound_workers_cpumask()
5567 struct workqueue_attrs *attrs = wq->unbound_attrs; in workqueue_online_cpu() local
5569 if (attrs) { in workqueue_online_cpu()
5570 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); in workqueue_online_cpu()
5595 struct workqueue_attrs *attrs = wq->unbound_attrs; in workqueue_offline_cpu() local
5597 if (attrs) { in workqueue_offline_cpu()
5598 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); in workqueue_offline_cpu()
5932 struct device dev; member
5935 static struct workqueue_struct *dev_to_wq(struct device *dev) in dev_to_wq() argument
5937 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); in dev_to_wq()
5942 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr, in per_cpu_show() argument
5945 struct workqueue_struct *wq = dev_to_wq(dev); in per_cpu_show()
5951 static ssize_t max_active_show(struct device *dev, in max_active_show() argument
5954 struct workqueue_struct *wq = dev_to_wq(dev); in max_active_show()
5959 static ssize_t max_active_store(struct device *dev, in max_active_store() argument
5963 struct workqueue_struct *wq = dev_to_wq(dev); in max_active_store()
5981 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr, in wq_nice_show() argument
5984 struct workqueue_struct *wq = dev_to_wq(dev); in wq_nice_show()
5997 struct workqueue_attrs *attrs; in wq_sysfs_prep_attrs() local
6001 attrs = alloc_workqueue_attrs(); in wq_sysfs_prep_attrs()
6002 if (!attrs) in wq_sysfs_prep_attrs()
6005 copy_workqueue_attrs(attrs, wq->unbound_attrs); in wq_sysfs_prep_attrs()
6006 return attrs; in wq_sysfs_prep_attrs()
6009 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr, in wq_nice_store() argument
6012 struct workqueue_struct *wq = dev_to_wq(dev); in wq_nice_store()
6013 struct workqueue_attrs *attrs; in wq_nice_store() local
6018 attrs = wq_sysfs_prep_attrs(wq); in wq_nice_store()
6019 if (!attrs) in wq_nice_store()
6022 if (sscanf(buf, "%d", &attrs->nice) == 1 && in wq_nice_store()
6023 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE) in wq_nice_store()
6024 ret = apply_workqueue_attrs_locked(wq, attrs); in wq_nice_store()
6030 free_workqueue_attrs(attrs); in wq_nice_store()
6034 static ssize_t wq_cpumask_show(struct device *dev, in wq_cpumask_show() argument
6037 struct workqueue_struct *wq = dev_to_wq(dev); in wq_cpumask_show()
6047 static ssize_t wq_cpumask_store(struct device *dev, in wq_cpumask_store() argument
6051 struct workqueue_struct *wq = dev_to_wq(dev); in wq_cpumask_store()
6052 struct workqueue_attrs *attrs; in wq_cpumask_store() local
6057 attrs = wq_sysfs_prep_attrs(wq); in wq_cpumask_store()
6058 if (!attrs) in wq_cpumask_store()
6061 ret = cpumask_parse(buf, attrs->cpumask); in wq_cpumask_store()
6063 ret = apply_workqueue_attrs_locked(wq, attrs); in wq_cpumask_store()
6067 free_workqueue_attrs(attrs); in wq_cpumask_store()
6071 static ssize_t wq_affn_scope_show(struct device *dev, in wq_affn_scope_show() argument
6074 struct workqueue_struct *wq = dev_to_wq(dev); in wq_affn_scope_show()
6090 static ssize_t wq_affn_scope_store(struct device *dev, in wq_affn_scope_store() argument
6094 struct workqueue_struct *wq = dev_to_wq(dev); in wq_affn_scope_store()
6095 struct workqueue_attrs *attrs; in wq_affn_scope_store() local
6103 attrs = wq_sysfs_prep_attrs(wq); in wq_affn_scope_store()
6104 if (attrs) { in wq_affn_scope_store()
6105 attrs->affn_scope = affn; in wq_affn_scope_store()
6106 ret = apply_workqueue_attrs_locked(wq, attrs); in wq_affn_scope_store()
6109 free_workqueue_attrs(attrs); in wq_affn_scope_store()
6113 static ssize_t wq_affinity_strict_show(struct device *dev, in wq_affinity_strict_show() argument
6116 struct workqueue_struct *wq = dev_to_wq(dev); in wq_affinity_strict_show()
6122 static ssize_t wq_affinity_strict_store(struct device *dev, in wq_affinity_strict_store() argument
6126 struct workqueue_struct *wq = dev_to_wq(dev); in wq_affinity_strict_store()
6127 struct workqueue_attrs *attrs; in wq_affinity_strict_store() local
6134 attrs = wq_sysfs_prep_attrs(wq); in wq_affinity_strict_store()
6135 if (attrs) { in wq_affinity_strict_store()
6136 attrs->affn_strict = (bool)v; in wq_affinity_strict_store()
6137 ret = apply_workqueue_attrs_locked(wq, attrs); in wq_affinity_strict_store()
6140 free_workqueue_attrs(attrs); in wq_affinity_strict_store()
6157 static ssize_t wq_unbound_cpumask_show(struct device *dev, in wq_unbound_cpumask_show() argument
6170 static ssize_t wq_unbound_cpumask_store(struct device *dev, in wq_unbound_cpumask_store() argument
6209 static void wq_device_release(struct device *dev) in wq_device_release() argument
6211 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); in wq_device_release()
6249 wq_dev->dev.bus = &wq_subsys; in workqueue_sysfs_register()
6250 wq_dev->dev.release = wq_device_release; in workqueue_sysfs_register()
6251 dev_set_name(&wq_dev->dev, "%s", wq->name); in workqueue_sysfs_register()
6257 dev_set_uevent_suppress(&wq_dev->dev, true); in workqueue_sysfs_register()
6259 ret = device_register(&wq_dev->dev); in workqueue_sysfs_register()
6261 put_device(&wq_dev->dev); in workqueue_sysfs_register()
6270 ret = device_create_file(&wq_dev->dev, attr); in workqueue_sysfs_register()
6272 device_unregister(&wq_dev->dev); in workqueue_sysfs_register()
6279 dev_set_uevent_suppress(&wq_dev->dev, false); in workqueue_sysfs_register()
6280 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); in workqueue_sysfs_register()
6298 device_unregister(&wq_dev->dev); in workqueue_sysfs_unregister()
6352 pr_info("pool %d:\n", pool->id); in show_cpu_pool_hog()
6565 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); in workqueue_init_early()
6566 cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu)); in workqueue_init_early()
6567 pool->attrs->nice = std_nice[i++]; in workqueue_init_early()
6568 pool->attrs->affn_strict = true; in workqueue_init_early()
6580 struct workqueue_attrs *attrs; in workqueue_init_early() local
6582 BUG_ON(!(attrs = alloc_workqueue_attrs())); in workqueue_init_early()
6583 attrs->nice = std_nice[i]; in workqueue_init_early()
6584 unbound_std_wq_attrs[i] = attrs; in workqueue_init_early()
6590 BUG_ON(!(attrs = alloc_workqueue_attrs())); in workqueue_init_early()
6591 attrs->nice = std_nice[i]; in workqueue_init_early()
6592 attrs->ordered = true; in workqueue_init_early()
6593 ordered_wq_attrs[i] = attrs; in workqueue_init_early()