| /Linux-v5.4/arch/sh/mm/ | 
| D | tlb-sh5.c | 23 	cpu_data->dtlb.entries	= 64;  in sh64_tlb_init() 24 	cpu_data->dtlb.step	= 0x10;  in sh64_tlb_init() 26 	cpu_data->dtlb.first	= DTLB_FIXED | cpu_data->dtlb.step;  in sh64_tlb_init() 27 	cpu_data->dtlb.next	= cpu_data->dtlb.first;  in sh64_tlb_init() 29 	cpu_data->dtlb.last	= DTLB_FIXED |  in sh64_tlb_init() 30 				  ((cpu_data->dtlb.entries - 1) *  in sh64_tlb_init() 31 				   cpu_data->dtlb.step);  in sh64_tlb_init() 34 	cpu_data->itlb.entries	= 64;  in sh64_tlb_init() 35 	cpu_data->itlb.step	= 0x10;  in sh64_tlb_init() 37 	cpu_data->itlb.first	= ITLB_FIXED | cpu_data->itlb.step;  in sh64_tlb_init() [all …] 
 | 
| D | cache-sh5.c | 249 				 cpu_data->dcache.entry_mask) >>  in sh64_dcache_purge_sets() 250 				 cpu_data->dcache.entry_shift;  in sh64_dcache_purge_sets() 254 		set_offset &= (cpu_data->dcache.sets - 1);  in sh64_dcache_purge_sets() 256 			(set_offset << cpu_data->dcache.entry_shift);  in sh64_dcache_purge_sets() 265 		eaddr1 = eaddr0 + cpu_data->dcache.way_size *  in sh64_dcache_purge_sets() 266 				  cpu_data->dcache.ways;  in sh64_dcache_purge_sets() 269 		     eaddr += cpu_data->dcache.way_size) {  in sh64_dcache_purge_sets() 274 		eaddr1 = eaddr0 + cpu_data->dcache.way_size *  in sh64_dcache_purge_sets() 275 				  cpu_data->dcache.ways;  in sh64_dcache_purge_sets() 278 		     eaddr += cpu_data->dcache.way_size) {  in sh64_dcache_purge_sets() [all …] 
 | 
| /Linux-v5.4/drivers/cpuidle/governors/ | 
| D | teo.c | 119 	struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);  in teo_update()  local 120 	unsigned int sleep_length_us = ktime_to_us(cpu_data->sleep_length_ns);  in teo_update() 124 	if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) {  in teo_update() 136 		measured_us = ktime_to_us(cpu_data->time_span_ns);  in teo_update() 154 		unsigned int early_hits = cpu_data->states[i].early_hits;  in teo_update() 156 		cpu_data->states[i].early_hits -= early_hits >> DECAY_SHIFT;  in teo_update() 174 		unsigned int hits = cpu_data->states[idx_timer].hits;  in teo_update() 175 		unsigned int misses = cpu_data->states[idx_timer].misses;  in teo_update() 183 				cpu_data->states[idx_hit].early_hits += PULSE;  in teo_update() 188 		cpu_data->states[idx_timer].misses = misses;  in teo_update() [all …] 
 | 
| /Linux-v5.4/arch/mips/include/asm/ | 
| D | cpu-features.h | 17 #define __ase(ase)			(cpu_data[0].ases & (ase)) 18 #define __isa(isa)			(cpu_data[0].isa_level & (isa)) 19 #define __opt(opt)			(cpu_data[0].options & (opt)) 235 #define cpu_has_vtag_icache	(cpu_data[0].icache.flags & MIPS_CACHE_VTAG) 238 #define cpu_has_dc_aliases	(cpu_data[0].dcache.flags & MIPS_CACHE_ALIASES) 241 #define cpu_has_ic_fills_f_dc	(cpu_data[0].icache.flags & MIPS_CACHE_IC_F_DC) 244 #define cpu_has_pindexed_dcache	(cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX) 260 #define cpu_icache_snoops_remote_store	(cpu_data[0].icache.flags & MIPS_IC_SNOOPS_REMOTE) 430 # define cpu_has_64bits		(cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT) 433 # define cpu_has_64bit_zero_reg	(cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT) [all …] 
 | 
| D | cpu-info.h | 110 extern struct cpuinfo_mips cpu_data[]; 111 #define current_cpu_data cpu_data[smp_processor_id()] 112 #define raw_current_cpu_data cpu_data[raw_smp_processor_id()] 113 #define boot_cpu_data cpu_data[0] 174 	struct cpuinfo_mips *infoa = &cpu_data[cpua];  in cpus_are_siblings() 175 	struct cpuinfo_mips *infob = &cpu_data[cpub];  in cpus_are_siblings()
  | 
| /Linux-v5.4/arch/mips/kernel/ | 
| D | proc.c | 40 	unsigned int version = cpu_data[n].processor_id;  in show_cpuinfo() 41 	unsigned int fp_vers = cpu_data[n].fpu_id;  in show_cpuinfo() 62 		      cpu_data[n].options & MIPS_CPU_FPU ? "  FPU V%d.%d" : "");  in show_cpuinfo() 67 		      cpu_data[n].udelay_val / (500000/HZ),  in show_cpuinfo() 68 		      (cpu_data[n].udelay_val / (5000/HZ)) % 100);  in show_cpuinfo() 72 	seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize);  in show_cpuinfo() 79 		      cpu_data[n].watch_reg_count);  in show_cpuinfo() 80 		for (i = 0; i < cpu_data[n].watch_reg_count; i++)  in show_cpuinfo() 82 				cpu_data[n].watch_reg_masks[i]);  in show_cpuinfo() 138 		      cpu_data[n].srsets);  in show_cpuinfo() [all …] 
 | 
| D | smp-cps.c | 74 				cpu_set_cluster(&cpu_data[nvpes + v], cl);  in cps_smp_setup() 75 				cpu_set_core(&cpu_data[nvpes + v], c);  in cps_smp_setup() 76 				cpu_set_vpe_id(&cpu_data[nvpes + v], v);  in cps_smp_setup() 88 		set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0);  in cps_smp_setup() 89 		set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0);  in cps_smp_setup() 296 	unsigned core = cpu_core(&cpu_data[cpu]);  in cps_boot_secondary() 297 	unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);  in cps_boot_secondary() 305 	if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data))  in cps_boot_secondary() 312 	atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);  in cps_boot_secondary() 409 	core = cpu_core(&cpu_data[cpu]);  in cps_shutdown_this_cpu() [all …] 
 | 
| /Linux-v5.4/arch/sparc/kernel/ | 
| D | prom_64.c | 416 			int proc_id = cpu_data(cpu).proc_id;  in arch_find_n_match_cpu_physical_id() 530 	cpu_data(cpuid).clock_tick =  in fill_in_one_cpu() 534 		cpu_data(cpuid).dcache_size =  in fill_in_one_cpu() 537 		cpu_data(cpuid).dcache_line_size =  in fill_in_one_cpu() 540 		cpu_data(cpuid).icache_size =  in fill_in_one_cpu() 543 		cpu_data(cpuid).icache_line_size =  in fill_in_one_cpu() 546 		cpu_data(cpuid).ecache_size =  in fill_in_one_cpu() 548 		cpu_data(cpuid).ecache_line_size =  in fill_in_one_cpu() 550 		if (!cpu_data(cpuid).ecache_size ||  in fill_in_one_cpu() 551 		    !cpu_data(cpuid).ecache_line_size) {  in fill_in_one_cpu() [all …] 
 | 
| D | smp_32.c | 63 	cpu_data(id).udelay_val = loops_per_jiffy;  in smp_store_cpu_info() 66 	cpu_data(id).clock_tick = prom_getintdefault(cpu_node,  in smp_store_cpu_info() 68 	cpu_data(id).prom_node = cpu_node;  in smp_store_cpu_info() 75 	cpu_data(id).mid = mid;  in smp_store_cpu_info() 85 		bogosum += cpu_data(cpu).udelay_val;  in smp_cpus_done() 385 			   cpu_data(i).udelay_val/(500000/HZ),  in smp_bogo() 386 			   (cpu_data(i).udelay_val/(5000/HZ))%100);  in smp_bogo()
  | 
| /Linux-v5.4/arch/sh/include/asm/ | 
| D | tlb_64.h | 26 	for (tlb  = cpu_data->dtlb.first;	\ 27 	     tlb <= cpu_data->dtlb.last;	\ 28 	     tlb += cpu_data->dtlb.step) 36 	for (tlb  = cpu_data->itlb.first;	\ 37 	     tlb <= cpu_data->itlb.last;	\ 38 	     tlb += cpu_data->itlb.step)
  | 
| D | processor.h | 94 extern struct sh_cpuinfo cpu_data[]; 95 #define boot_cpu_data cpu_data[0] 96 #define current_cpu_data cpu_data[smp_processor_id()] 97 #define raw_current_cpu_data cpu_data[raw_smp_processor_id()]
  | 
| /Linux-v5.4/arch/ia64/mm/ | 
| D | contig.c | 37 static void *cpu_data;  variable 61 		memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);  in per_cpu_init() 62 		__per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;  in per_cpu_init() 75 			ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -  in per_cpu_init() 78 		cpu_data += PERCPU_PAGE_SIZE;  in per_cpu_init() 89 	cpu_data = memblock_alloc_from(size, PERCPU_PAGE_SIZE,  in alloc_per_cpu_data() 91 	if (!cpu_data)  in alloc_per_cpu_data()
  | 
| D | discontig.c | 136 static void *per_cpu_node_setup(void *cpu_data, int node)  in per_cpu_node_setup()  argument 147 		memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);  in per_cpu_node_setup() 148 		__per_cpu_offset[cpu] = (char *)__va(cpu_data) -  in per_cpu_node_setup() 162 				    (unsigned long)cpu_data -  in per_cpu_node_setup() 165 		cpu_data += PERCPU_PAGE_SIZE;  in per_cpu_node_setup() 168 	return cpu_data;  in per_cpu_node_setup() 262 	void *cpu_data;  in fill_pernode()  local 269 	cpu_data = (void *)pernode;  in fill_pernode() 280 	cpu_data = per_cpu_node_setup(cpu_data, node);  in fill_pernode()
  | 
| /Linux-v5.4/kernel/trace/ | 
| D | trace_functions_graph.c | 31 	struct fgraph_cpu_data __percpu *cpu_data;  member 393 	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);  in verif_pid() 644 		struct fgraph_cpu_data *cpu_data;  in print_graph_entry_leaf()  local 646 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);  in print_graph_entry_leaf() 653 		cpu_data->depth = call->depth - 1;  in print_graph_entry_leaf() 658 			cpu_data->enter_funcs[call->depth] = 0;  in print_graph_entry_leaf() 687 		struct fgraph_cpu_data *cpu_data;  in print_graph_entry_nested()  local 690 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);  in print_graph_entry_nested() 691 		cpu_data->depth = call->depth;  in print_graph_entry_nested() 696 			cpu_data->enter_funcs[call->depth] = call->func;  in print_graph_entry_nested() [all …] 
 | 
| /Linux-v5.4/arch/x86/include/asm/ | 
| D | topology.h | 107 #define topology_logical_package_id(cpu)	(cpu_data(cpu).logical_proc_id) 108 #define topology_physical_package_id(cpu)	(cpu_data(cpu).phys_proc_id) 109 #define topology_logical_die_id(cpu)		(cpu_data(cpu).logical_die_id) 110 #define topology_die_id(cpu)			(cpu_data(cpu).cpu_die_id) 111 #define topology_core_id(cpu)			(cpu_data(cpu).cpu_core_id)
  | 
| /Linux-v5.4/arch/ia64/kernel/ | 
| D | smpboot.c | 420 	last_cpuinfo = cpu_data(cpuid - 1);  in smp_callin() 591 	if (cpu_data(cpu)->threads_per_core == 1 &&  in remove_siblinginfo() 592 	    cpu_data(cpu)->cores_per_socket == 1) {  in remove_siblinginfo() 701 		bogosum += cpu_data(cpu)->loops_per_jiffy;  in smp_cpus_done() 713 		if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {  in set_cpu_sibling_map() 716 			if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {  in set_cpu_sibling_map() 749 	if (cpu_data(cpu)->threads_per_core == 1 &&  in __cpu_up() 750 	    cpu_data(cpu)->cores_per_socket == 1) {  in __cpu_up() 845 			if ((cpu_data(j)->socket_id == cpu_data(i)->socket_id)) {  in is_multithreading_enabled() 846 				if (cpu_data(j)->core_id == cpu_data(i)->core_id)  in is_multithreading_enabled()
  | 
| D | topology.c | 35 	if (cpu_data(num)->socket_id == -1)  in arch_fix_phys_package_id() 36 		cpu_data(num)->socket_id = slot;  in arch_fix_phys_package_id() 145 	if (cpu_data(cpu)->threads_per_core <= 1 &&  in cache_shared_cpu_map_setup() 146 		cpu_data(cpu)->cores_per_socket <= 1) {  in cache_shared_cpu_map_setup() 160 			if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id  in cache_shared_cpu_map_setup() 161 				&& cpu_data(j)->core_id == csi.log1_cid  in cache_shared_cpu_map_setup() 162 				&& cpu_data(j)->thread_id == csi.log1_tid)  in cache_shared_cpu_map_setup()
  | 
| /Linux-v5.4/arch/alpha/include/asm/ | 
| D | mmu_context.h | 93 #define cpu_last_asn(cpuid)	(cpu_data[cpuid].last_asn) 145 	cpu_data[cpu].asn_lock = 1;  in ev5_switch_mm() 156 		cpu_data[cpu].need_new_asn = 1;  in ev5_switch_mm() 191 	cpu_data[cpu].asn_lock = 0;				\ 193 	if (cpu_data[cpu].need_new_asn) {			\ 195 		cpu_data[cpu].need_new_asn = 0;			\
  | 
| /Linux-v5.4/arch/mips/loongson64/loongson-3/ | 
| D | smp.c | 313 	cpu_set_core(&cpu_data[cpu],  in loongson3_init_secondary() 315 	cpu_data[cpu].package =  in loongson3_init_secondary() 328 	if (cpu_data[cpu].package)  in loongson3_init_secondary() 380 	cpu_set_core(&cpu_data[0],  in loongson3_smp_setup() 382 	cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;  in loongson3_smp_setup() 481 		  [sets] "r" (cpu_data[smp_processor_id()].dcache.sets));  in loongson3a_r1_play_dead() 563 		  [sets] "r" (cpu_data[smp_processor_id()].dcache.sets),  in loongson3a_r2r3_play_dead() 564 		  [vsets] "r" (cpu_data[smp_processor_id()].vcache.sets));  in loongson3a_r2r3_play_dead() 626 		  [sets] "r" (cpu_data[smp_processor_id()].dcache.sets));  in loongson3b_play_dead() 695 	uint64_t core_id = cpu_core(&cpu_data[cpu]);  in loongson3_disable_clock() [all …] 
 | 
| /Linux-v5.4/arch/mips/mm/ | 
| D | context.c | 36 	if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {  in get_new_mmu_context() 76 		mmid = xchg_relaxed(&cpu_data[cpu].asid_cache, 0);  in flush_context() 88 		__set_bit(mmid & cpu_asid_mask(&cpu_data[cpu]), mmid_map);  in flush_context() 210 	old_active_mmid = READ_ONCE(cpu_data[cpu].asid_cache);  in check_switch_mmu_context() 213 	    !cmpxchg_relaxed(&cpu_data[cpu].asid_cache, old_active_mmid, ctx)) {  in check_switch_mmu_context() 220 		WRITE_ONCE(cpu_data[cpu].asid_cache, ctx);  in check_switch_mmu_context()
  | 
| /Linux-v5.4/tools/testing/selftests/bpf/prog_tests/ | 
| D | perf_buffer.c | 10 	int cpu_data = *(int *)data, duration = 0;  in on_sample()  local 13 	if (cpu_data != cpu)  in on_sample() 14 		CHECK(cpu_data != cpu, "check_cpu_data",  in on_sample() 15 		      "cpu_data %d != cpu %d\n", cpu_data, cpu);  in on_sample()
  | 
| /Linux-v5.4/arch/alpha/kernel/ | 
| D | smp.c | 56 struct cpuinfo_alpha cpu_data[NR_CPUS];  variable 57 EXPORT_SYMBOL(cpu_data); 84 	cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;  in smp_store_cpu_info() 85 	cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;  in smp_store_cpu_info() 86 	cpu_data[cpuid].need_new_asn = 0;  in smp_store_cpu_info() 87 	cpu_data[cpuid].asn_lock = 0;  in smp_store_cpu_info() 96 	cpu_data[cpuid].prof_counter = 1;  in smp_setup_percpu_timer() 97 	cpu_data[cpuid].prof_multiplier = 1;  in smp_setup_percpu_timer() 494 			bogosum += cpu_data[cpu].loops_per_jiffy;  in smp_cpus_done() 567 	cpu_data[this_cpu].ipi_count++;  in handle_ipi() [all …] 
 | 
| /Linux-v5.4/arch/arm64/kernel/ | 
| D | cpuinfo.c | 33 DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data); 133 		struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);  in c_show() 255 	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);  in cpuid_cpu_online() 275 	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);  in cpuid_cpu_offline() 293 		struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);  in cpuinfo_regs_init() 384 	struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);  in cpuinfo_store_cpu() 391 	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0);  in cpuinfo_store_boot_cpu()
  | 
| /Linux-v5.4/arch/x86/kernel/ | 
| D | smpboot.c | 193 	cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;  in smp_callin() 294 		struct cpuinfo_x86 *c = &cpu_data(cpu);  in topology_phys_to_logical_pkg() 310 	int proc_id = cpu_data(cur_cpu).phys_proc_id;  in topology_phys_to_logical_die() 313 		struct cpuinfo_x86 *c = &cpu_data(cpu);  in topology_phys_to_logical_die() 343 	cpu_data(cpu).logical_proc_id = new;  in topology_update_package_map() 366 	cpu_data(cpu).logical_die_id = new;  in topology_update_die_map() 373 	struct cpuinfo_x86 *c = &cpu_data(id);  in smp_store_boot_cpu_info() 388 	struct cpuinfo_x86 *c = &cpu_data(id);  in smp_store_cpu_info() 569 	struct cpuinfo_x86 *c = &cpu_data(cpu);  in set_cpu_sibling_map() 585 		o = &cpu_data(i);  in set_cpu_sibling_map() [all …] 
 | 
| /Linux-v5.4/drivers/cpufreq/ | 
| D | intel_pstate.c | 522 static s16 intel_pstate_get_epb(struct cpudata *cpu_data)  in intel_pstate_get_epb()  argument 530 	ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);  in intel_pstate_get_epb() 537 static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)  in intel_pstate_get_epp()  argument 547 			epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,  in intel_pstate_get_epp() 555 		epp = intel_pstate_get_epb(cpu_data);  in intel_pstate_get_epp() 605 static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)  in intel_pstate_get_energy_pref_index()  argument 610 	epp = intel_pstate_get_epp(cpu_data, 0);  in intel_pstate_get_energy_pref_index() 640 static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,  in intel_pstate_set_energy_pref_index()  argument 647 		epp = cpu_data->epp_default;  in intel_pstate_set_energy_pref_index() 654 		ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);  in intel_pstate_set_energy_pref_index() [all …] 
 |