/Linux-v5.10/arch/xtensa/kernel/ |
D | jump_label.c | 27 atomic_t cpu_count; member 43 if (atomic_inc_return(&patch->cpu_count) == 1) { in patch_text_stop_machine() 45 atomic_inc(&patch->cpu_count); in patch_text_stop_machine() 47 while (atomic_read(&patch->cpu_count) <= num_online_cpus()) in patch_text_stop_machine() 58 .cpu_count = ATOMIC_INIT(0), in patch_text()
|
/Linux-v5.10/arch/riscv/kernel/ |
D | patch.c | 19 atomic_t cpu_count; member 103 if (atomic_inc_return(&patch->cpu_count) == 1) { in patch_text_cb() 107 atomic_inc(&patch->cpu_count); in patch_text_cb() 109 while (atomic_read(&patch->cpu_count) <= num_online_cpus()) in patch_text_cb() 123 .cpu_count = ATOMIC_INIT(0), in patch_text()
|
/Linux-v5.10/tools/power/cpupower/utils/idle_monitor/ |
D | cpuidle_sysfs.c | 48 for (cpu = 0; cpu < cpu_count; cpu++) { in cpuidle_start() 67 for (cpu = 0; cpu < cpu_count; cpu++) { in cpuidle_stop() 178 previous_count = malloc(sizeof(long long *) * cpu_count); in cpuidle_register() 179 current_count = malloc(sizeof(long long *) * cpu_count); in cpuidle_register() 180 for (num = 0; num < cpu_count; num++) { in cpuidle_register() 195 for (num = 0; num < cpu_count; num++) { in cpuidle_unregister()
|
D | mperf_monitor.c | 233 for (cpu = 0; cpu < cpu_count; cpu++) in mperf_start() 246 for (cpu = 0; cpu < cpu_count; cpu++) in mperf_stop() 351 is_valid = calloc(cpu_count, sizeof(int)); in mperf_register() 352 mperf_previous_count = calloc(cpu_count, sizeof(unsigned long long)); in mperf_register() 353 aperf_previous_count = calloc(cpu_count, sizeof(unsigned long long)); in mperf_register() 354 mperf_current_count = calloc(cpu_count, sizeof(unsigned long long)); in mperf_register() 355 aperf_current_count = calloc(cpu_count, sizeof(unsigned long long)); in mperf_register()
|
D | snb_idle.c | 117 for (cpu = 0; cpu < cpu_count; cpu++) { in snb_start() 134 for (cpu = 0; cpu < cpu_count; cpu++) { in snb_stop() 166 is_valid = calloc(cpu_count, sizeof(int)); in snb_register() 168 previous_count[num] = calloc(cpu_count, in snb_register() 170 current_count[num] = calloc(cpu_count, in snb_register()
|
D | hsw_ext_idle.c | 119 for (cpu = 0; cpu < cpu_count; cpu++) { in hsw_ext_start() 136 for (cpu = 0; cpu < cpu_count; cpu++) { in hsw_ext_stop() 161 is_valid = calloc(cpu_count, sizeof(int)); in hsw_ext_register() 163 previous_count[num] = calloc(cpu_count, in hsw_ext_register() 165 current_count[num] = calloc(cpu_count, in hsw_ext_register()
|
D | nhm_idle.c | 134 for (cpu = 0; cpu < cpu_count; cpu++) { in nhm_start() 153 for (cpu = 0; cpu < cpu_count; cpu++) { in nhm_stop() 180 is_valid = calloc(cpu_count, sizeof(int)); in intel_nhm_register() 182 previous_count[num] = calloc(cpu_count, in intel_nhm_register() 184 current_count[num] = calloc(cpu_count, in intel_nhm_register()
|
D | cpupower-monitor.c | 30 int cpu_count; variable 330 for (cpu = 0; cpu < cpu_count; cpu++) in do_interval_measure() 342 for (cpu = 0; cpu < cpu_count; cpu++) in do_interval_measure() 394 cpu_count = get_cpu_topology(&cpu_top); in cmd_monitor() 395 if (cpu_count < 0) { in cmd_monitor() 407 dprint("System has up to %d CPU cores\n", cpu_count); in cmd_monitor() 438 cpu_top.pkgs, cpu_top.cores, cpu_count); in cmd_monitor() 455 for (cpu = 0; cpu < cpu_count; cpu++) { in cmd_monitor()
|
D | amd_fam14h_idle.c | 233 for (cpu = 0; cpu < cpu_count; cpu++) in amd_fam14h_start() 253 for (cpu = 0; cpu < cpu_count; cpu++) in amd_fam14h_stop() 294 previous_count[num] = calloc(cpu_count, in amd_fam14h_register() 296 current_count[num] = calloc(cpu_count, in amd_fam14h_register()
|
D | cpupower-monitor.h | 28 extern int cpu_count;
|
/Linux-v5.10/arch/arm64/kernel/ |
D | smp.c | 531 static unsigned int cpu_count = 1; variable 562 if (is_mpidr_duplicate(cpu_count, hwid)) { in acpi_map_gic_cpu_interface() 579 if (cpu_count >= NR_CPUS) in acpi_map_gic_cpu_interface() 583 set_cpu_logical_map(cpu_count, hwid); in acpi_map_gic_cpu_interface() 585 cpu_madt_gicc[cpu_count] = *processor; in acpi_map_gic_cpu_interface() 596 acpi_set_mailbox_entry(cpu_count, processor); in acpi_map_gic_cpu_interface() 598 cpu_count++; in acpi_map_gic_cpu_interface() 662 if (is_mpidr_duplicate(cpu_count, hwid)) { in of_parse_and_init_cpus() 693 if (cpu_count >= NR_CPUS) in of_parse_and_init_cpus() 697 set_cpu_logical_map(cpu_count, hwid); in of_parse_and_init_cpus() [all …]
|
D | insn.c | 211 atomic_t cpu_count; member 220 if (atomic_inc_return(&pp->cpu_count) == 1) { in aarch64_insn_patch_text_cb() 225 atomic_inc(&pp->cpu_count); in aarch64_insn_patch_text_cb() 227 while (atomic_read(&pp->cpu_count) <= num_online_cpus()) in aarch64_insn_patch_text_cb() 241 .cpu_count = ATOMIC_INIT(0), in aarch64_insn_patch_text()
|
/Linux-v5.10/tools/testing/selftests/rcutorture/bin/ |
D | kvm-test-1-run.sh | 136 cpu_count=`configNR_CPUS.sh $resdir/ConfigFragment` 137 cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"` 138 if test "$cpu_count" -gt "$TORTURE_ALLOTED_CPUS" 140 echo CPU count limited from $cpu_count to $TORTURE_ALLOTED_CPUS | tee -a $resdir/Warnings 141 cpu_count=$TORTURE_ALLOTED_CPUS 143 qemu_args="`specify_qemu_cpus "$QEMU" "$qemu_args" "$cpu_count"`"
|
D | kvm.sh | 278 cpu_count=`configNR_CPUS.sh $CONFIGFRAG/$CF1` 279 cpu_count=`configfrag_boot_cpus "$TORTURE_BOOTARGS" "$CONFIGFRAG/$CF1" "$cpu_count"` 280 cpu_count=`configfrag_boot_maxcpus "$TORTURE_BOOTARGS" "$CONFIGFRAG/$CF1" "$cpu_count"` 281 echo $CF1 $cpu_count >> $T/cfgcpu
|
/Linux-v5.10/arch/s390/hypfs/ |
D | hypfs_diag0c.c | 33 unsigned int cpu_count, cpu, i; in diag0c_store() local 37 cpu_count = num_online_cpus(); in diag0c_store() 43 diag0c_data = kzalloc(struct_size(diag0c_data, entry, cpu_count), in diag0c_store() 55 *count = cpu_count; in diag0c_store()
|
/Linux-v5.10/arch/arm/mach-axxia/ |
D | platsmp.c | 55 int cpu_count = 0; in axxia_smp_prepare_cpus() local 72 if (cpu_count < max_cpus) { in axxia_smp_prepare_cpus() 74 cpu_count++; in axxia_smp_prepare_cpus()
|
/Linux-v5.10/arch/csky/kernel/ |
D | ftrace.c | 210 atomic_t cpu_count; member 217 if (atomic_inc_return(¶m->cpu_count) == 1) { in __ftrace_modify_code() 219 atomic_inc(¶m->cpu_count); in __ftrace_modify_code() 221 while (atomic_read(¶m->cpu_count) <= num_online_cpus()) in __ftrace_modify_code()
|
/Linux-v5.10/scripts/ |
D | checkkconfigsymbols.py | 18 from multiprocessing import Pool, cpu_count 274 pool = Pool(cpu_count(), init_worker) 281 for part in partition(kfiles, cpu_count()): 311 pool = Pool(cpu_count(), init_worker) 338 arglist = partition(source_files, cpu_count()) 344 for part in partition(kconfig_files, cpu_count()):
|
/Linux-v5.10/arch/parisc/kernel/ |
D | processor.c | 92 if (boot_cpu_data.cpu_count > 0) { in processor_probe() 101 cpuid = boot_cpu_data.cpu_count; in processor_probe() 150 boot_cpu_data.cpu_count--; in processor_probe() 160 boot_cpu_data.cpu_count++; in processor_probe()
|
/Linux-v5.10/arch/csky/kernel/probes/ |
D | kprobes.c | 23 atomic_t cpu_count; member 31 if (atomic_inc_return(¶m->cpu_count) == 1) { in patch_text_cb() 34 atomic_inc(¶m->cpu_count); in patch_text_cb() 36 while (atomic_read(¶m->cpu_count) <= num_online_cpus()) in patch_text_cb()
|
/Linux-v5.10/tools/power/x86/intel-speed-select/ |
D | isst-core.c | 243 ctdp_level->cpu_count = 0; in isst_get_coremask_info() 246 int cpu_count = 0; in isst_get_coremask_info() local 262 &cpu_count); in isst_get_coremask_info() 263 ctdp_level->cpu_count += cpu_count; in isst_get_coremask_info() 265 config_index, i, ctdp_level->cpu_count); in isst_get_coremask_info()
|
/Linux-v5.10/arch/ia64/include/asm/ |
D | smp.h | 49 int cpu_count; member
|
/Linux-v5.10/include/uapi/linux/ |
D | taskstats.h | 72 __u64 cpu_count __attribute__((aligned(8))); member
|
/Linux-v5.10/arch/parisc/include/asm/ |
D | processor.h | 64 unsigned int cpu_count; member
|
/Linux-v5.10/tools/accounting/ |
D | getdelays.c | 211 (unsigned long long)t->cpu_count, in print_delayacct() 215 average_ms((double)t->cpu_delay_total, t->cpu_count), in print_delayacct()
|