/Linux-v5.15/tools/lib/perf/ |
D | cpumap.c | 15 struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int)); in perf_cpu_map__dummy_new() local 17 if (cpus != NULL) { in perf_cpu_map__dummy_new() 18 cpus->nr = 1; in perf_cpu_map__dummy_new() 19 cpus->map[0] = -1; in perf_cpu_map__dummy_new() 20 refcount_set(&cpus->refcnt, 1); in perf_cpu_map__dummy_new() 23 return cpus; in perf_cpu_map__dummy_new() 50 struct perf_cpu_map *cpus; in cpu_map__default_new() local 57 cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int)); in cpu_map__default_new() 58 if (cpus != NULL) { in cpu_map__default_new() 62 cpus->map[i] = i; in cpu_map__default_new() [all …]
|
D | evlist.c | 43 perf_cpu_map__put(evsel->cpus); in __perf_evlist__propagate_maps() 44 evsel->cpus = perf_cpu_map__get(evlist->cpus); in __perf_evlist__propagate_maps() 45 } else if (!evsel->system_wide && perf_cpu_map__empty(evlist->cpus)) { in __perf_evlist__propagate_maps() 46 perf_cpu_map__put(evsel->cpus); in __perf_evlist__propagate_maps() 47 evsel->cpus = perf_cpu_map__get(evlist->cpus); in __perf_evlist__propagate_maps() 48 } else if (evsel->cpus != evsel->own_cpus) { in __perf_evlist__propagate_maps() 49 perf_cpu_map__put(evsel->cpus); in __perf_evlist__propagate_maps() 50 evsel->cpus = perf_cpu_map__get(evsel->own_cpus); in __perf_evlist__propagate_maps() 55 evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus); in __perf_evlist__propagate_maps() 126 perf_cpu_map__put(evlist->cpus); in perf_evlist__exit() [all …]
|
/Linux-v5.15/tools/perf/tests/ |
D | openat-syscall-all-cpus.c | 25 struct perf_cpu_map *cpus; in test__openat_syscall_event_on_all_cpus() local 38 cpus = perf_cpu_map__new(NULL); in test__openat_syscall_event_on_all_cpus() 39 if (cpus == NULL) { in test__openat_syscall_event_on_all_cpus() 53 if (evsel__open(evsel, cpus, threads) < 0) { in test__openat_syscall_event_on_all_cpus() 60 for (cpu = 0; cpu < cpus->nr; ++cpu) { in test__openat_syscall_event_on_all_cpus() 68 if (cpus->map[cpu] >= CPU_SETSIZE) { in test__openat_syscall_event_on_all_cpus() 69 pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); in test__openat_syscall_event_on_all_cpus() 73 CPU_SET(cpus->map[cpu], &cpu_set); in test__openat_syscall_event_on_all_cpus() 76 cpus->map[cpu], in test__openat_syscall_event_on_all_cpus() 84 CPU_CLR(cpus->map[cpu], &cpu_set); in test__openat_syscall_event_on_all_cpus() [all …]
|
D | cpumap.c | 54 struct cpu_map_entries *cpus; in process_event_cpus() local 62 cpus = (struct cpu_map_entries *)data->data; in process_event_cpus() 64 TEST_ASSERT_VAL("wrong nr", cpus->nr == 2); in process_event_cpus() 65 TEST_ASSERT_VAL("wrong cpu", cpus->cpu[0] == 1); in process_event_cpus() 66 TEST_ASSERT_VAL("wrong cpu", cpus->cpu[1] == 256); in process_event_cpus() 80 struct perf_cpu_map *cpus; in test__cpu_map_synthesize() local 83 cpus = perf_cpu_map__new("0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19"); in test__cpu_map_synthesize() 86 !perf_event__synthesize_cpu_map(NULL, cpus, process_event_mask, NULL)); in test__cpu_map_synthesize() 88 perf_cpu_map__put(cpus); in test__cpu_map_synthesize() 91 cpus = perf_cpu_map__new("1,256"); in test__cpu_map_synthesize() [all …]
|
D | event-times.c | 115 struct perf_cpu_map *cpus; in attach__cpu_disabled() local 120 cpus = perf_cpu_map__new("0"); in attach__cpu_disabled() 121 if (cpus == NULL) { in attach__cpu_disabled() 128 err = evsel__open_per_cpu(evsel, cpus, -1); in attach__cpu_disabled() 137 perf_cpu_map__put(cpus); in attach__cpu_disabled() 144 struct perf_cpu_map *cpus; in attach__cpu_enabled() local 149 cpus = perf_cpu_map__new("0"); in attach__cpu_enabled() 150 if (cpus == NULL) { in attach__cpu_enabled() 155 err = evsel__open_per_cpu(evsel, cpus, -1); in attach__cpu_enabled() 159 perf_cpu_map__put(cpus); in attach__cpu_enabled()
|
D | mmap-basic.c | 37 struct perf_cpu_map *cpus; in test__basic_mmap() local 55 cpus = perf_cpu_map__new(NULL); in test__basic_mmap() 56 if (cpus == NULL) { in test__basic_mmap() 62 CPU_SET(cpus->map[0], &cpu_set); in test__basic_mmap() 66 cpus->map[0], str_error_r(errno, sbuf, sizeof(sbuf))); in test__basic_mmap() 76 perf_evlist__set_maps(&evlist->core, cpus, threads); in test__basic_mmap() 93 if (evsel__open(evsels[i], cpus, threads) < 0) { in test__basic_mmap() 162 perf_cpu_map__put(cpus); in test__basic_mmap()
|
/Linux-v5.15/tools/perf/util/ |
D | evlist-hybrid.c | 24 struct perf_cpu_map *cpus; in evlist__add_default_hybrid() local 34 cpus = perf_cpu_map__get(pmu->cpus); in evlist__add_default_hybrid() 35 evsel->core.cpus = cpus; in evlist__add_default_hybrid() 36 evsel->core.own_cpus = perf_cpu_map__get(cpus); in evlist__add_default_hybrid() 92 struct perf_cpu_map *cpus; in evlist__fix_hybrid_cpus() local 100 cpus = perf_cpu_map__new(cpu_list); in evlist__fix_hybrid_cpus() 101 if (!cpus) in evlist__fix_hybrid_cpus() 120 ret = perf_pmu__cpus_match(pmu, cpus, &matched_cpus, in evlist__fix_hybrid_cpus() 128 matched_cpus->nr < cpus->nr || in evlist__fix_hybrid_cpus() 129 matched_cpus->nr < pmu->cpus->nr)) { in evlist__fix_hybrid_cpus() [all …]
|
D | cpumap.c | 21 static struct perf_cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus) in cpu_map__from_entries() argument 25 map = perf_cpu_map__empty_new(cpus->nr); in cpu_map__from_entries() 29 for (i = 0; i < cpus->nr; i++) { in cpu_map__from_entries() 35 if (cpus->cpu[i] == (u16) -1) in cpu_map__from_entries() 38 map->map[i] = (int) cpus->cpu[i]; in cpu_map__from_entries() 83 struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr); in perf_cpu_map__empty_new() local 85 if (cpus != NULL) { in perf_cpu_map__empty_new() 88 cpus->nr = nr; in perf_cpu_map__empty_new() 90 cpus->map[i] = -1; in perf_cpu_map__empty_new() 92 refcount_set(&cpus->refcnt, 1); in perf_cpu_map__empty_new() [all …]
|
D | perf_api_probe.c | 63 struct perf_cpu_map *cpus; in perf_probe_api() local 66 cpus = perf_cpu_map__new(NULL); in perf_probe_api() 67 if (!cpus) in perf_probe_api() 69 cpu = cpus->map[0]; in perf_probe_api() 70 perf_cpu_map__put(cpus); in perf_probe_api() 138 struct perf_cpu_map *cpus; in perf_can_record_cpu_wide() local 141 cpus = perf_cpu_map__new(NULL); in perf_can_record_cpu_wide() 142 if (!cpus) in perf_can_record_cpu_wide() 144 cpu = cpus->map[0]; in perf_can_record_cpu_wide() 145 perf_cpu_map__put(cpus); in perf_can_record_cpu_wide()
|
D | cpumap.h | 41 int cpu_map__build_socket_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **sockp); 42 int cpu_map__build_die_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **diep); 43 int cpu_map__build_core_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **corep); 44 int cpu_map__build_node_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **nodep); 61 int cpu_map__build_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **res, 65 int cpu_map__cpu(struct perf_cpu_map *cpus, int idx); 66 bool cpu_map__has(struct perf_cpu_map *cpus, int cpu);
|
/Linux-v5.15/tools/perf/arch/arm64/util/ |
D | header.c | 17 static int _get_cpuid(char *buf, size_t sz, struct perf_cpu_map *cpus) in _get_cpuid() argument 26 cpus = perf_cpu_map__get(cpus); in _get_cpuid() 28 for (cpu = 0; cpu < perf_cpu_map__nr(cpus); cpu++) { in _get_cpuid() 33 sysfs, cpus->map[cpu]); in _get_cpuid() 57 perf_cpu_map__put(cpus); in _get_cpuid() 67 struct perf_cpu_map *cpus = perf_cpu_map__new(NULL); in get_cpuid() local 70 if (!cpus) in get_cpuid() 73 ret = _get_cpuid(buf, sz, cpus); in get_cpuid() 75 perf_cpu_map__put(cpus); in get_cpuid() 85 if (!pmu || !pmu->cpus) in get_cpuid_str() [all …]
|
/Linux-v5.15/drivers/clk/sunxi/ |
D | clk-sun9i-cpus.c | 55 struct sun9i_a80_cpus_clk *cpus = to_sun9i_a80_cpus_clk(hw); in sun9i_a80_cpus_clk_recalc_rate() local 60 reg = readl(cpus->reg); in sun9i_a80_cpus_clk_recalc_rate() 155 struct sun9i_a80_cpus_clk *cpus = to_sun9i_a80_cpus_clk(hw); in sun9i_a80_cpus_clk_set_rate() local 162 reg = readl(cpus->reg); in sun9i_a80_cpus_clk_set_rate() 170 writel(reg, cpus->reg); in sun9i_a80_cpus_clk_set_rate() 188 struct sun9i_a80_cpus_clk *cpus; in sun9i_a80_cpus_setup() local 193 cpus = kzalloc(sizeof(*cpus), GFP_KERNEL); in sun9i_a80_cpus_setup() 194 if (!cpus) in sun9i_a80_cpus_setup() 197 cpus->reg = of_io_request_and_map(node, 0, of_node_full_name(node)); in sun9i_a80_cpus_setup() 198 if (IS_ERR(cpus->reg)) in sun9i_a80_cpus_setup() [all …]
|
/Linux-v5.15/tools/lib/perf/tests/ |
D | test-evlist.c | 32 struct perf_cpu_map *cpus; in test_stat_cpu() local 45 cpus = perf_cpu_map__new(NULL); in test_stat_cpu() 46 __T("failed to create cpus", cpus); in test_stat_cpu() 65 perf_evlist__set_maps(evlist, cpus, NULL); in test_stat_cpu() 71 cpus = perf_evsel__cpus(evsel); in test_stat_cpu() 73 for (idx = 0; idx < perf_cpu_map__nr(cpus); idx++) { in test_stat_cpu() 84 perf_cpu_map__put(cpus); in test_stat_cpu() 214 struct perf_cpu_map *cpus; in test_mmap_thread() local 260 cpus = perf_cpu_map__dummy_new(); in test_mmap_thread() 261 __T("failed to create cpus", cpus); in test_mmap_thread() [all …]
|
D | test-cpumap.c | 16 struct perf_cpu_map *cpus; in test_cpumap() local 22 cpus = perf_cpu_map__dummy_new(); in test_cpumap() 23 if (!cpus) in test_cpumap() 26 perf_cpu_map__get(cpus); in test_cpumap() 27 perf_cpu_map__put(cpus); in test_cpumap() 28 perf_cpu_map__put(cpus); in test_cpumap()
|
/Linux-v5.15/sound/soc/intel/boards/ |
D | sof_cs42l42.c | 283 struct snd_soc_dai_link_component *cpus, in create_spk_amp_dai_links() argument 315 links[*id].cpus = &cpus[*id]; in create_spk_amp_dai_links() 318 links[*id].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, in create_spk_amp_dai_links() 320 if (!links[*id].cpus->dai_name) { in create_spk_amp_dai_links() 333 struct snd_soc_dai_link_component *cpus, in create_hp_codec_dai_links() argument 353 links[*id].cpus = &cpus[*id]; in create_hp_codec_dai_links() 356 links[*id].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, in create_hp_codec_dai_links() 359 if (!links[*id].cpus->dai_name) in create_hp_codec_dai_links() 372 struct snd_soc_dai_link_component *cpus, in create_dmic_dai_links() argument 383 links[*id].cpus = &cpus[*id]; in create_dmic_dai_links() [all …]
|
D | sof_pcm512x.c | 225 struct snd_soc_dai_link_component *cpus; in sof_card_dai_links_create() local 231 cpus = devm_kcalloc(dev, sof_audio_card_pcm512x.num_links, in sof_card_dai_links_create() 233 if (!links || !cpus) in sof_card_dai_links_create() 256 links[id].cpus = &cpus[id]; in sof_card_dai_links_create() 259 links[id].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, in sof_card_dai_links_create() 262 if (!links[id].cpus->dai_name) in sof_card_dai_links_create() 265 links[id].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, in sof_card_dai_links_create() 268 if (!links[id].cpus->dai_name) in sof_card_dai_links_create() 277 links[id].cpus = &cpus[id]; in sof_card_dai_links_create() 278 links[id].cpus->dai_name = "DMIC01 Pin"; in sof_card_dai_links_create() [all …]
|
/Linux-v5.15/tools/perf/arch/nds32/util/ |
D | header.c | 15 struct cpu_map *cpus; in get_cpuid_str() local 18 if (!sysfs || !pmu || !pmu->cpus) in get_cpuid_str() 25 cpus = cpu_map__get(pmu->cpus); in get_cpuid_str() 26 sprintf(buf, "0x%x", cpus->nr - 1); in get_cpuid_str() 27 cpu_map__put(cpus); in get_cpuid_str()
|
/Linux-v5.15/drivers/cpufreq/ |
D | cpufreq-dt.c | 30 cpumask_var_t cpus; member 50 if (cpumask_test_cpu(cpu, priv->cpus)) in cpufreq_dt_find_data() 129 cpumask_copy(policy->cpus, priv->cpus); in cpufreq_init() 211 if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL)) in dt_cpufreq_early_init() 214 cpumask_set_cpu(cpu, priv->cpus); in dt_cpufreq_early_init() 235 ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus); in dt_cpufreq_early_init() 245 if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus)) in dt_cpufreq_early_init() 260 ret = dev_pm_opp_of_cpumask_add_table(priv->cpus); in dt_cpufreq_early_init() 279 cpumask_setall(priv->cpus); in dt_cpufreq_early_init() 280 ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus); in dt_cpufreq_early_init() [all …]
|
/Linux-v5.15/include/linux/ |
D | stop_machine.h | 114 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); 125 int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); 128 const struct cpumask *cpus); 132 const struct cpumask *cpus) in stop_machine_cpuslocked() argument 143 stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) in stop_machine() argument 145 return stop_machine_cpuslocked(fn, data, cpus); in stop_machine() 150 const struct cpumask *cpus) in stop_machine_from_inactive_cpu() argument 152 return stop_machine(fn, data, cpus); in stop_machine_from_inactive_cpu()
|
/Linux-v5.15/tools/lib/perf/include/perf/ |
D | cpumap.h | 19 LIBPERF_API int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx); 20 LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus); 24 #define perf_cpu_map__for_each_cpu(cpu, idx, cpus) \ argument 25 for ((idx) = 0, (cpu) = perf_cpu_map__cpu(cpus, idx); \ 26 (idx) < perf_cpu_map__nr(cpus); \ 27 (idx)++, (cpu) = perf_cpu_map__cpu(cpus, idx))
|
/Linux-v5.15/arch/x86/hyperv/ |
D | mmu.c | 20 static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus, 55 static void hyperv_flush_tlb_multi(const struct cpumask *cpus, in hyperv_flush_tlb_multi() argument 64 trace_hyperv_mmu_flush_tlb_multi(cpus, info); in hyperv_flush_tlb_multi() 75 if (cpumask_empty(cpus)) { in hyperv_flush_tlb_multi() 104 if (cpumask_equal(cpus, cpu_present_mask)) { in hyperv_flush_tlb_multi() 118 if (hv_cpu_number_to_vp_number(cpumask_last(cpus)) >= 64) in hyperv_flush_tlb_multi() 121 for_each_cpu(cpu, cpus) { in hyperv_flush_tlb_multi() 159 status = hyperv_flush_tlb_others_ex(cpus, info); in hyperv_flush_tlb_multi() 167 native_flush_tlb_multi(cpus, info); in hyperv_flush_tlb_multi() 170 static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus, in hyperv_flush_tlb_others_ex() argument [all …]
|
/Linux-v5.15/Documentation/devicetree/bindings/csky/ |
D | cpus.txt | 6 the "cpus" node, which in turn contains a number of subnodes (ie "cpu") 9 Only SMP system need to care about the cpus node and single processor 10 needn't define cpus node at all. 13 cpus and cpu node bindings definition 16 - cpus node 20 The node name must be "cpus". 22 A cpus node must define the following properties: 59 cpus {
|
/Linux-v5.15/sound/soc/samsung/ |
D | snow.c | 146 link->cpus = links_cpus; in snow_probe() 163 link->cpus->of_node = of_parse_phandle(cpu, "sound-dai", 0); in snow_probe() 166 if (!link->cpus->of_node) { in snow_probe() 176 of_node_put(link->cpus->of_node); in snow_probe() 181 priv->clk_i2s_bus = of_clk_get_by_name(link->cpus->of_node, in snow_probe() 185 of_node_put(link->cpus->of_node); in snow_probe() 191 link->cpus->of_node = of_parse_phandle(dev->of_node, in snow_probe() 193 if (!link->cpus->of_node) { in snow_probe() 201 of_node_put(link->cpus->of_node); in snow_probe() 207 link->platforms->of_node = link->cpus->of_node; in snow_probe() [all …]
|
/Linux-v5.15/tools/testing/selftests/rcutorture/bin/ |
D | jitter.sh | 58 if cpus=`grep 1 /sys/devices/system/cpu/*/online 2>&1 | 63 cpus= 66 cpus="$cpus $nohotplugcpus" 68 cpumask=`awk -v cpus="$cpus" -v me=$me -v n=$n 'BEGIN {
|
/Linux-v5.15/sound/soc/kirkwood/ |
D | armada-370-db.c | 111 a370db_dai[0].cpus->of_node = in a370db_probe() 114 a370db_dai[0].platforms->of_node = a370db_dai[0].cpus->of_node; in a370db_probe() 120 a370db_dai[1].cpus->of_node = a370db_dai[0].cpus->of_node; in a370db_probe() 121 a370db_dai[1].platforms->of_node = a370db_dai[0].cpus->of_node; in a370db_probe() 127 a370db_dai[2].cpus->of_node = a370db_dai[0].cpus->of_node; in a370db_probe() 128 a370db_dai[2].platforms->of_node = a370db_dai[0].cpus->of_node; in a370db_probe()
|