/Linux-v5.15/include/linux/ |
D | cpumask.h | 17 typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; struct 90 extern struct cpumask __cpu_possible_mask; 91 extern struct cpumask __cpu_online_mask; 92 extern struct cpumask __cpu_present_mask; 93 extern struct cpumask __cpu_active_mask; 94 extern struct cpumask __cpu_dying_mask; 95 #define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask) 96 #define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask) 97 #define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask) 98 #define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask) [all …]
|
D | pm_opp.h | 166 int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); 167 int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); 169 void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask); 414 static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask) in dev_pm_opp_set_sharing_cpus() argument 419 static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) in dev_pm_opp_get_sharing_cpus() argument 428 static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask) in dev_pm_opp_cpumask_remove_table() argument 445 int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask); 446 void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask); 447 int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); 452 int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus); [all …]
|
D | stop_machine.h | 38 void stop_machine_yield(const struct cpumask *cpumask); 114 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); 125 int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); 128 const struct cpumask *cpus); 132 const struct cpumask *cpus) in stop_machine_cpuslocked() 143 stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) in stop_machine() 150 const struct cpumask *cpus) in stop_machine_from_inactive_cpu()
|
D | arch_topology.h | 33 void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq, 49 void topology_set_scale_freq_source(struct scale_freq_data *data, const struct cpumask *cpus); 50 void topology_clear_scale_freq_source(enum scale_freq_source source, const struct cpumask *cpus); 59 void topology_set_thermal_pressure(const struct cpumask *cpus, 82 const struct cpumask *cpu_coregroup_mask(int cpu);
|
/Linux-v5.15/Documentation/translations/zh_CN/core-api/ |
D | padata.rst | 57 cpumask_var_t cpumask); 60 行cpumask描述了哪些处理器将被用来并行执行提交给这个实例的作业,串行cpumask 61 定义了哪些处理器被允许用作串行化回调处理器。 cpumask指定了要使用的新cpumask。 65 和serial_cpumask,任何一个cpumask都可以通过在文件中回显(echo)一个bitmask 70 读取其中一个文件会显示用户提供的cpumask,它可能与“可用”的cpumask不同。 72 Padata内部维护着两对cpumask,用户提供的cpumask和“可用的”cpumask(每一对由一个 73 并行和一个串行cpumask组成)。用户提供的cpumasks在实例分配时默认为所有可能的CPU, 76 供一个包含离线CPU的cpumask是合法的。一旦用户提供的cpumask中的一个离线CPU上线, 104 在其他地方正在搞乱实例的CPU掩码,而当cb_cpu不在串行cpumask中、并行或串行cpumasks
|
/Linux-v5.15/drivers/opp/ |
D | cpu.c | 108 void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, in _dev_pm_opp_cpumask_remove_table() argument 114 WARN_ON(cpumask_empty(cpumask)); in _dev_pm_opp_cpumask_remove_table() 116 for_each_cpu(cpu, cpumask) { in _dev_pm_opp_cpumask_remove_table() 139 void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask) in dev_pm_opp_cpumask_remove_table() argument 141 _dev_pm_opp_cpumask_remove_table(cpumask, -1); in dev_pm_opp_cpumask_remove_table() 156 const struct cpumask *cpumask) in dev_pm_opp_set_sharing_cpus() argument 167 for_each_cpu(cpu, cpumask) { in dev_pm_opp_set_sharing_cpus() 205 int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) in dev_pm_opp_get_sharing_cpus() argument 220 cpumask_clear(cpumask); in dev_pm_opp_get_sharing_cpus() 225 cpumask_set_cpu(opp_dev->dev->id, cpumask); in dev_pm_opp_get_sharing_cpus() [all …]
|
/Linux-v5.15/kernel/ |
D | padata.c | 54 target_cpu = cpumask_first(pd->cpumask.pcpu); in padata_index_to_cpu() 56 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); in padata_index_to_cpu() 67 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); in padata_cpu_hash() 183 if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) { in padata_do_parallel() 184 if (!cpumask_weight(pd->cpumask.cbcpu)) in padata_do_parallel() 188 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu); in padata_do_parallel() 190 cpu = cpumask_first(pd->cpumask.cbcpu); in padata_do_parallel() 192 cpu = cpumask_next(cpu, pd->cpumask.cbcpu); in padata_do_parallel() 267 pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false); in padata_find_next() 421 cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu); in padata_setup_cpumasks() [all …]
|
D | stop_machine.c | 175 const struct cpumask *active_cpus; 197 notrace void __weak stop_machine_yield(const struct cpumask *cpumask) in stop_machine_yield() argument 208 const struct cpumask *cpumask; in multi_cpu_stop() local 219 cpumask = cpu_online_mask; in multi_cpu_stop() 220 is_active = cpu == cpumask_first(cpumask); in multi_cpu_stop() 222 cpumask = msdata->active_cpus; in multi_cpu_stop() 223 is_active = cpumask_test_cpu(cpu, cpumask); in multi_cpu_stop() 229 stop_machine_yield(cpumask); in multi_cpu_stop() 391 static bool queue_stop_cpus_work(const struct cpumask *cpumask, in queue_stop_cpus_work() argument 407 for_each_cpu(cpu, cpumask) { in queue_stop_cpus_work() [all …]
|
/Linux-v5.15/drivers/powercap/ |
D | idle_inject.c | 73 unsigned long cpumask[]; member 91 for_each_cpu_and(cpu, to_cpumask(ii_dev->cpumask), cpu_online_mask) { in idle_inject_wakeup() 205 cpumask_pr_args(to_cpumask(ii_dev->cpumask))); in idle_inject_start() 234 cpumask_pr_args(to_cpumask(ii_dev->cpumask))); in idle_inject_stop() 253 for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) { in idle_inject_stop() 300 struct idle_inject_device *idle_inject_register(struct cpumask *cpumask) in idle_inject_register() argument 309 cpumask_copy(to_cpumask(ii_dev->cpumask), cpumask); in idle_inject_register() 314 for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) { in idle_inject_register() 327 for_each_cpu(cpu_rb, to_cpumask(ii_dev->cpumask)) { in idle_inject_register() 352 for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) in idle_inject_unregister()
|
/Linux-v5.15/drivers/cpuidle/ |
D | cpuidle-big_little.c | 140 struct cpumask *cpumask; in bl_idle_driver_init() local 143 cpumask = kzalloc(cpumask_size(), GFP_KERNEL); in bl_idle_driver_init() 144 if (!cpumask) in bl_idle_driver_init() 149 cpumask_set_cpu(cpu, cpumask); in bl_idle_driver_init() 151 drv->cpumask = cpumask; in bl_idle_driver_init() 223 kfree(bl_idle_big_driver.cpumask); in bl_idle_init() 225 kfree(bl_idle_little_driver.cpumask); in bl_idle_init()
|
D | dt_idle_states.c | 98 const cpumask_t *cpumask) in idle_state_valid() argument 111 for (cpu = cpumask_next(cpumask_first(cpumask), cpumask); in idle_state_valid() 112 cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) { in idle_state_valid() 157 const cpumask_t *cpumask; in dt_init_idle_driver() local 168 cpumask = drv->cpumask ? : cpu_possible_mask; in dt_init_idle_driver() 169 cpu_node = of_cpu_device_node_get(cpumask_first(cpumask)); in dt_init_idle_driver() 187 if (!idle_state_valid(state_node, i, cpumask)) { in dt_init_idle_driver()
|
D | driver.c | 52 for_each_cpu(cpu, drv->cpumask) { in __cpuidle_unset_driver() 72 for_each_cpu(cpu, drv->cpumask) { in __cpuidle_set_driver() 80 for_each_cpu(cpu, drv->cpumask) in __cpuidle_set_driver() 163 if (!drv->cpumask) in __cpuidle_driver_init() 164 drv->cpumask = (struct cpumask *)cpu_possible_mask; in __cpuidle_driver_init() 228 on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer, in __cpuidle_register_driver() 247 on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer, in __cpuidle_unregister_driver() 366 if (!drv->cpumask) { in cpuidle_driver_state_disabled() 371 for_each_cpu(cpu, drv->cpumask) { in cpuidle_driver_state_disabled()
|
/Linux-v5.15/arch/arc/kernel/ |
D | smp.c | 46 static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask) in arc_get_cpu_map() argument 55 if (cpulist_parse(buf, cpumask)) in arc_get_cpu_map() 67 struct cpumask cpumask; in arc_init_cpu_possible() local 69 if (arc_get_cpu_map("possible-cpus", &cpumask)) { in arc_init_cpu_possible() 73 cpumask_setall(&cpumask); in arc_init_cpu_possible() 76 if (!cpumask_test_cpu(0, &cpumask)) in arc_init_cpu_possible() 79 init_cpu_possible(&cpumask); in arc_init_cpu_possible() 295 static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg) in ipi_send_msg() 310 struct cpumask targets; in smp_send_stop() 321 void arch_send_call_function_ipi_mask(const struct cpumask *mask) in arch_send_call_function_ipi_mask()
|
/Linux-v5.15/arch/riscv/include/asm/ |
D | smp.h | 19 void (*ipi_inject)(const struct cpumask *target); 40 void arch_send_call_function_ipi_mask(struct cpumask *mask); 46 void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out); 88 static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in, in riscv_cpuid_to_hartid_mask() 89 struct cpumask *out) in riscv_cpuid_to_hartid_mask()
|
/Linux-v5.15/drivers/md/ |
D | dm-ps-io-affinity.c | 16 cpumask_var_t cpumask; member 36 free_cpumask_var(pi->cpumask); in ioa_free_path() 66 if (!zalloc_cpumask_var(&pi->cpumask, GFP_KERNEL)) { in ioa_add_path() 72 ret = cpumask_parse(argv[0], pi->cpumask); in ioa_add_path() 79 for_each_cpu(cpu, pi->cpumask) { in ioa_add_path() 105 free_cpumask_var(pi->cpumask); in ioa_add_path() 171 DMEMIT("%*pb ", cpumask_pr_args(pi->cpumask)); in ioa_status() 201 const struct cpumask *cpumask; in ioa_select_path() local 219 cpumask = cpumask_of_node(node); in ioa_select_path() 220 for_each_cpu(i, cpumask) { in ioa_select_path()
|
/Linux-v5.15/arch/powerpc/include/asm/ |
D | smp.h | 124 static inline struct cpumask *cpu_sibling_mask(int cpu) in cpu_sibling_mask() 129 static inline struct cpumask *cpu_core_mask(int cpu) in cpu_core_mask() 134 static inline struct cpumask *cpu_l2_cache_mask(int cpu) in cpu_l2_cache_mask() 139 static inline struct cpumask *cpu_smallcore_mask(int cpu) in cpu_smallcore_mask() 152 static inline const struct cpumask *cpu_smt_mask(int cpu) in cpu_smt_mask() 206 static inline const struct cpumask *cpu_sibling_mask(int cpu) in cpu_sibling_mask() 211 static inline const struct cpumask *cpu_smallcore_mask(int cpu) in cpu_smallcore_mask() 216 static inline const struct cpumask *cpu_l2_cache_mask(int cpu) in cpu_l2_cache_mask() 268 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
/Linux-v5.15/lib/ |
D | cpumask.c | 17 unsigned int cpumask_next(int n, const struct cpumask *srcp) in cpumask_next() 34 int cpumask_next_and(int n, const struct cpumask *src1p, in cpumask_next_and() 35 const struct cpumask *src2p) in cpumask_next_and() 53 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) in cpumask_any_but() 77 int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) in cpumask_next_wrap() 246 int cpumask_any_and_distribute(const struct cpumask *src1p, in cpumask_any_and_distribute() 247 const struct cpumask *src2p) in cpumask_any_and_distribute() 265 int cpumask_any_distribute(const struct cpumask *srcp) in cpumask_any_distribute()
|
/Linux-v5.15/arch/x86/kernel/apic/ |
D | local.h | 59 void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector); 60 void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, int vector); 66 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector); 67 void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, int vector); 68 void default_send_IPI_mask_logical(const struct cpumask *mask, int vector);
|
D | ipi.c | 79 void native_send_call_func_ipi(const struct cpumask *mask) in native_send_call_func_ipi() 185 void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector) in default_send_IPI_mask_sequence_phys() 203 void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, in default_send_IPI_mask_allbutself_phys() 247 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, in default_send_IPI_mask_sequence_logical() 267 void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, in default_send_IPI_mask_allbutself_logical() 290 void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector) in default_send_IPI_mask_logical() argument 292 unsigned long mask = cpumask_bits(cpumask)[0]; in default_send_IPI_mask_logical()
|
D | apic_flat_64.c | 60 static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector) in flat_send_IPI_mask() argument 62 unsigned long mask = cpumask_bits(cpumask)[0]; in flat_send_IPI_mask() 68 flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) in flat_send_IPI_mask_allbutself() argument 70 unsigned long mask = cpumask_bits(cpumask)[0]; in flat_send_IPI_mask_allbutself()
|
/Linux-v5.15/drivers/infiniband/hw/hfi1/ |
D | affinity.h | 28 struct cpumask mask; 29 struct cpumask used; 64 struct cpumask general_intr_mask; 65 struct cpumask comp_vect_mask; 71 struct cpumask real_cpu_mask;
|
/Linux-v5.15/kernel/time/ |
D | tick-common.c | 207 const struct cpumask *cpumask) in tick_setup_device() argument 258 if (!cpumask_equal(newdev->cpumask, cpumask)) in tick_setup_device() 259 irq_set_affinity(newdev->irq, cpumask); in tick_setup_device() 291 if (!cpumask_test_cpu(cpu, newdev->cpumask)) in tick_check_percpu() 293 if (cpumask_equal(newdev->cpumask, cpumask_of(cpu))) in tick_check_percpu() 299 if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu))) in tick_check_percpu() 321 !cpumask_equal(curdev->cpumask, newdev->cpumask); in tick_check_preferred()
|
/Linux-v5.15/include/trace/events/ |
D | thermal.h | 96 TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load, 102 __bitmask(cpumask, num_possible_cpus()) 110 __assign_bitmask(cpumask, cpumask_bits(cpus), 120 __get_bitmask(cpumask), __entry->freq, 126 TP_PROTO(const struct cpumask *cpus, unsigned int freq, 132 __bitmask(cpumask, num_possible_cpus()) 139 __assign_bitmask(cpumask, cpumask_bits(cpus), 147 __get_bitmask(cpumask), __entry->freq, __entry->cdev_state,
|
/Linux-v5.15/drivers/cpufreq/ |
D | vexpress-spc-cpufreq.c | 281 const struct cpumask *cpumask) in _put_cluster_clk_and_freq_table() argument 293 const struct cpumask *cpumask) in put_cluster_clk_and_freq_table() argument 302 return _put_cluster_clk_and_freq_table(cpu_dev, cpumask); in put_cluster_clk_and_freq_table() 310 _put_cluster_clk_and_freq_table(cdev, cpumask); in put_cluster_clk_and_freq_table() 318 const struct cpumask *cpumask) in _get_cluster_clk_and_freq_table() argument 354 const struct cpumask *cpumask) in get_cluster_clk_and_freq_table() argument 363 ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask); in get_cluster_clk_and_freq_table() 379 ret = _get_cluster_clk_and_freq_table(cdev, cpumask); in get_cluster_clk_and_freq_table() 402 _put_cluster_clk_and_freq_table(cdev, cpumask); in get_cluster_clk_and_freq_table()
|
/Linux-v5.15/kernel/irq/ |
D | ipi.c | 24 const struct cpumask *dest) in irq_reserve_ipi() 115 int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest) in irq_destroy_ipi() 118 struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL; in irq_destroy_ipi() 165 struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL; in ipi_get_hwirq() 187 const struct cpumask *dest, unsigned int cpu) in ipi_send_verify() 189 struct cpumask *ipimask = irq_data_get_affinity_mask(data); in ipi_send_verify() 263 int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest) in __ipi_send_mask() 328 int ipi_send_mask(unsigned int virq, const struct cpumask *dest) in ipi_send_mask()
|