Home
last modified time | relevance | path

Searched refs:per_cpu (Results 1 – 25 of 371) sorted by relevance

12345678910>>...15

/Linux-v4.19/arch/x86/xen/
Dsmp.c35 if (per_cpu(xen_resched_irq, cpu).irq >= 0) { in xen_smp_intr_free()
36 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); in xen_smp_intr_free()
37 per_cpu(xen_resched_irq, cpu).irq = -1; in xen_smp_intr_free()
38 kfree(per_cpu(xen_resched_irq, cpu).name); in xen_smp_intr_free()
39 per_cpu(xen_resched_irq, cpu).name = NULL; in xen_smp_intr_free()
41 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { in xen_smp_intr_free()
42 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); in xen_smp_intr_free()
43 per_cpu(xen_callfunc_irq, cpu).irq = -1; in xen_smp_intr_free()
44 kfree(per_cpu(xen_callfunc_irq, cpu).name); in xen_smp_intr_free()
45 per_cpu(xen_callfunc_irq, cpu).name = NULL; in xen_smp_intr_free()
[all …]
Dspinlock.c28 int irq = per_cpu(lock_kicker_irq, cpu); in xen_qlock_kick()
89 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", in xen_init_lock_cpu()
90 cpu, per_cpu(lock_kicker_irq, cpu)); in xen_init_lock_cpu()
102 per_cpu(lock_kicker_irq, cpu) = irq; in xen_init_lock_cpu()
103 per_cpu(irq_name, cpu) = name; in xen_init_lock_cpu()
114 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); in xen_uninit_lock_cpu()
115 per_cpu(lock_kicker_irq, cpu) = -1; in xen_uninit_lock_cpu()
116 kfree(per_cpu(irq_name, cpu)); in xen_uninit_lock_cpu()
117 per_cpu(irq_name, cpu) = NULL; in xen_uninit_lock_cpu()
Dsmp_pv.c96 if (per_cpu(xen_irq_work, cpu).irq >= 0) { in xen_smp_intr_free_pv()
97 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); in xen_smp_intr_free_pv()
98 per_cpu(xen_irq_work, cpu).irq = -1; in xen_smp_intr_free_pv()
99 kfree(per_cpu(xen_irq_work, cpu).name); in xen_smp_intr_free_pv()
100 per_cpu(xen_irq_work, cpu).name = NULL; in xen_smp_intr_free_pv()
103 if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { in xen_smp_intr_free_pv()
104 unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL); in xen_smp_intr_free_pv()
105 per_cpu(xen_pmu_irq, cpu).irq = -1; in xen_smp_intr_free_pv()
106 kfree(per_cpu(xen_pmu_irq, cpu).name); in xen_smp_intr_free_pv()
107 per_cpu(xen_pmu_irq, cpu).name = NULL; in xen_smp_intr_free_pv()
[all …]
/Linux-v4.19/arch/x86/oprofile/
Dnmi_int.c156 kfree(per_cpu(cpu_msrs, i).multiplex); in nmi_shutdown_mux()
157 per_cpu(cpu_msrs, i).multiplex = NULL; in nmi_shutdown_mux()
158 per_cpu(switch_index, i) = 0; in nmi_shutdown_mux()
172 per_cpu(cpu_msrs, i).multiplex = in nmi_setup_mux()
174 if (!per_cpu(cpu_msrs, i).multiplex) in nmi_setup_mux()
197 per_cpu(switch_index, cpu) = 0; in nmi_cpu_setup_mux()
229 int si = per_cpu(switch_index, cpu); in nmi_cpu_switch()
230 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); in nmi_cpu_switch()
238 per_cpu(switch_index, cpu) = 0; in nmi_cpu_switch()
240 per_cpu(switch_index, cpu) = si; in nmi_cpu_switch()
[all …]
/Linux-v4.19/drivers/perf/
Darm_pmu_acpi.c97 per_cpu(pmu_irqs, cpu) = irq; in arm_pmu_acpi_parse_irqs()
105 irq = per_cpu(pmu_irqs, cpu); in arm_pmu_acpi_parse_irqs()
116 if (per_cpu(pmu_irqs, irq_cpu) == irq) in arm_pmu_acpi_parse_irqs()
117 per_cpu(pmu_irqs, irq_cpu) = 0; in arm_pmu_acpi_parse_irqs()
131 pmu = per_cpu(probed_pmus, cpu); in arm_pmu_acpi_find_alloc_pmu()
163 int other_irq = per_cpu(hw_events->irq, cpu); in pmu_irq_matches()
195 if (per_cpu(probed_pmus, cpu)) in arm_pmu_acpi_cpu_starting()
198 irq = per_cpu(pmu_irqs, cpu); in arm_pmu_acpi_cpu_starting()
204 per_cpu(probed_pmus, cpu) = pmu; in arm_pmu_acpi_cpu_starting()
208 per_cpu(hw_events->irq, cpu) = irq; in arm_pmu_acpi_cpu_starting()
[all …]
/Linux-v4.19/kernel/
Dsmpboot.c31 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_thread_get()
41 per_cpu(idle_threads, smp_processor_id()) = current; in idle_thread_set_boot_cpu()
52 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_init()
59 per_cpu(idle_threads, cpu) = tsk; in idle_init()
336 return atomic_read(&per_cpu(cpu_hotplug_state, cpu)); in cpu_report_state()
354 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE); in cpu_check_up_prepare()
358 switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) { in cpu_check_up_prepare()
363 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE); in cpu_check_up_prepare()
409 (void)atomic_xchg(&per_cpu(cpu_hotplug_state, cpu), CPU_ONLINE); in cpu_set_state_online()
427 if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD) in cpu_wait_death()
[all …]
Dsoftirq.c630 per_cpu(tasklet_vec, cpu).tail = in softirq_init()
631 &per_cpu(tasklet_vec, cpu).head; in softirq_init()
632 per_cpu(tasklet_hi_vec, cpu).tail = in softirq_init()
633 &per_cpu(tasklet_hi_vec, cpu).head; in softirq_init()
682 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { in tasklet_kill_immediate()
687 per_cpu(tasklet_vec, cpu).tail = i; in tasklet_kill_immediate()
700 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { in takeover_tasklets()
701 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; in takeover_tasklets()
702 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); in takeover_tasklets()
703 per_cpu(tasklet_vec, cpu).head = NULL; in takeover_tasklets()
[all …]
Dprofile.c243 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); in __profile_flip_buffers()
251 j = per_cpu(cpu_profile_flip, get_cpu()); in profile_flip_buffers()
255 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; in profile_flip_buffers()
274 i = per_cpu(cpu_profile_flip, get_cpu()); in profile_discard_flip_buffers()
278 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; in profile_discard_flip_buffers()
294 hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)]; in do_profile_hits()
342 if (per_cpu(cpu_profile_hits, cpu)[i]) { in profile_dead_cpu()
343 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]); in profile_dead_cpu()
344 per_cpu(cpu_profile_hits, cpu)[i] = NULL; in profile_dead_cpu()
356 per_cpu(cpu_profile_flip, cpu) = 0; in profile_prepare_cpu()
[all …]
/Linux-v4.19/arch/powerpc/kernel/
Dirq.c529 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event); in arch_show_interrupts()
534 seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event); in arch_show_interrupts()
539 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others); in arch_show_interrupts()
544 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); in arch_show_interrupts()
549 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); in arch_show_interrupts()
554 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); in arch_show_interrupts()
561 per_cpu(irq_stat, j).hmi_exceptions); in arch_show_interrupts()
567 seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs); in arch_show_interrupts()
573 seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs); in arch_show_interrupts()
581 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs); in arch_show_interrupts()
[all …]
Dwatchdog.c119 cpu, tb, per_cpu(wd_timer_tb, cpu), in wd_lockup_ipi()
120 tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000); in wd_lockup_ipi()
242 per_cpu(wd_timer_tb, cpu) = tb; in watchdog_timer_interrupt()
264 if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) { in soft_nmi_interrupt()
275 cpu, tb, per_cpu(wd_timer_tb, cpu), in soft_nmi_interrupt()
276 tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000); in soft_nmi_interrupt()
319 if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) { in arch_touch_nmi_watchdog()
320 per_cpu(wd_timer_tb, cpu) = tb; in arch_touch_nmi_watchdog()
330 per_cpu(wd_timer_tb, cpu) = get_tb(); in start_watchdog_timer_on()
/Linux-v4.19/drivers/cpufreq/
Dspeedstep-centrino.c260 per_cpu(centrino_model, policy->cpu) = model; in centrino_cpu_init_table()
295 if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) || in extract_clock()
296 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) || in extract_clock()
297 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) { in extract_clock()
302 if ((!per_cpu(centrino_model, cpu)) || in extract_clock()
303 (!per_cpu(centrino_model, cpu)->op_points)) in extract_clock()
308 per_cpu(centrino_model, cpu)->op_points[i].frequency in extract_clock()
311 if (msr == per_cpu(centrino_model, cpu)->op_points[i].driver_data) in extract_clock()
312 return per_cpu(centrino_model, cpu)-> in extract_clock()
316 return per_cpu(centrino_model, cpu)->op_points[i-1].frequency; in extract_clock()
[all …]
Darm_big_little.c90 cpu_freq = per_cpu(cpu_last_req_freq, j); in find_cluster_maxfreq()
92 if ((cluster == per_cpu(physical_cluster, j)) && in find_cluster_maxfreq()
105 u32 cur_cluster = per_cpu(physical_cluster, cpu); in clk_get_cpu_rate()
121 pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq, in bL_cpufreq_get_rate()
124 return per_cpu(cpu_last_req_freq, cpu); in bL_cpufreq_get_rate()
140 prev_rate = per_cpu(cpu_last_req_freq, cpu); in bL_cpufreq_set_rate()
141 per_cpu(cpu_last_req_freq, cpu) = rate; in bL_cpufreq_set_rate()
142 per_cpu(physical_cluster, cpu) = new_cluster; in bL_cpufreq_set_rate()
171 per_cpu(cpu_last_req_freq, cpu) = prev_rate; in bL_cpufreq_set_rate()
172 per_cpu(physical_cluster, cpu) = old_cluster; in bL_cpufreq_set_rate()
[all …]
/Linux-v4.19/arch/x86/kernel/apic/
Dx2apic_cluster.c32 u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu); in x2apic_send_IPI()
57 struct cluster_mask *cmsk = per_cpu(cluster_masks, cpu); in __x2apic_send_IPI_mask()
61 dest |= per_cpu(x86_cpu_to_logical_apicid, clustercpu); in __x2apic_send_IPI_mask()
97 return per_cpu(x86_cpu_to_logical_apicid, cpu); in x2apic_calc_apicid()
113 cmsk = per_cpu(cluster_masks, cpu); in init_x2apic_ldr()
128 if (per_cpu(cluster_masks, cpu)) in alloc_clustermask()
152 if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL)) in x2apic_prepare_cpu()
159 struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu); in x2apic_dead_cpu()
162 free_cpumask_var(per_cpu(ipi_mask, dead_cpu)); in x2apic_dead_cpu()
/Linux-v4.19/arch/x86/kernel/
Dsetup_percpu.c222 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); in setup_per_cpu_areas()
223 per_cpu(cpu_number, cpu) = cpu; in setup_per_cpu_areas()
234 per_cpu(x86_cpu_to_apicid, cpu) = in setup_per_cpu_areas()
236 per_cpu(x86_bios_cpu_apicid, cpu) = in setup_per_cpu_areas()
238 per_cpu(x86_cpu_to_acpiid, cpu) = in setup_per_cpu_areas()
242 per_cpu(x86_cpu_to_logical_apicid, cpu) = in setup_per_cpu_areas()
246 per_cpu(irq_stack_ptr, cpu) = in setup_per_cpu_areas()
247 per_cpu(irq_stack_union.irq_stack, cpu) + in setup_per_cpu_areas()
251 per_cpu(x86_cpu_to_node_map, cpu) = in setup_per_cpu_areas()
Dirq_32.c116 if (per_cpu(hardirq_stack, cpu)) in irq_ctx_init()
122 per_cpu(hardirq_stack, cpu) = irqstk; in irq_ctx_init()
127 per_cpu(softirq_stack, cpu) = irqstk; in irq_ctx_init()
130 cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu)); in irq_ctx_init()
/Linux-v4.19/arch/arm/mm/
Dcontext.c70 asid = per_cpu(active_asids, cpu).counter; in a15_erratum_get_cpumask()
72 asid = per_cpu(reserved_asids, cpu); in a15_erratum_get_cpumask()
147 asid = atomic64_xchg(&per_cpu(active_asids, i), 0); in flush_context()
156 asid = per_cpu(reserved_asids, i); in flush_context()
158 per_cpu(reserved_asids, i) = asid; in flush_context()
183 if (per_cpu(reserved_asids, cpu) == asid) { in check_update_reserved_asid()
185 per_cpu(reserved_asids, cpu) = newasid; in check_update_reserved_asid()
258 && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) in check_and_switch_context()
274 atomic64_set(&per_cpu(active_asids, cpu), asid); in check_and_switch_context()
Dproc-v7-bugs.c45 if (per_cpu(harden_branch_predictor_fn, cpu)) in cpu_v7_spectre_init()
57 per_cpu(harden_branch_predictor_fn, cpu) = in cpu_v7_spectre_init()
66 per_cpu(harden_branch_predictor_fn, cpu) = in cpu_v7_spectre_init()
93 per_cpu(harden_branch_predictor_fn, cpu) = in cpu_v7_spectre_init()
106 per_cpu(harden_branch_predictor_fn, cpu) = in cpu_v7_spectre_init()
/Linux-v4.19/arch/ia64/mm/
Dtlb.c93 per_cpu(ia64_need_tlb_flush, i) = 1; in wrap_mmu_context()
368 per_cpu(ia64_tr_num, cpu) = 8; in ia64_tlb_init()
371 per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1; in ia64_tlb_init()
372 if (per_cpu(ia64_tr_num, cpu) > in ia64_tlb_init()
374 per_cpu(ia64_tr_num, cpu) = in ia64_tlb_init()
376 if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) { in ia64_tlb_init()
378 per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX; in ia64_tlb_init()
443 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); in ia64_itr_entry()
455 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); in ia64_itr_entry()
466 for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) { in ia64_itr_entry()
[all …]
/Linux-v4.19/arch/arm64/mm/
Dcontext.c100 asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); in flush_context()
109 asid = per_cpu(reserved_asids, i); in flush_context()
111 per_cpu(reserved_asids, i) = asid; in flush_context()
136 if (per_cpu(reserved_asids, cpu) == asid) { in check_update_reserved_asid()
138 per_cpu(reserved_asids, cpu) = newasid; in check_update_reserved_asid()
215 old_active_asid = atomic64_read(&per_cpu(active_asids, cpu)); in check_and_switch_context()
218 atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu), in check_and_switch_context()
233 atomic64_set(&per_cpu(active_asids, cpu), asid); in check_and_switch_context()
/Linux-v4.19/arch/mips/kernel/
Dmips-cpc.c79 spin_lock_init(&per_cpu(cpc_core_lock, cpu)); in mips_cpc_probe()
102 spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core), in mips_cpc_lock_other()
103 per_cpu(cpc_core_lock_flags, curr_core)); in mips_cpc_lock_other()
122 spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core), in mips_cpc_unlock_other()
123 per_cpu(cpc_core_lock_flags, curr_core)); in mips_cpc_unlock_other()
/Linux-v4.19/arch/parisc/kernel/
Dirq.c89 per_cpu(local_ack_eiem, cpu) &= ~mask; in cpu_ack_irq()
92 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); in cpu_ack_irq()
104 per_cpu(local_ack_eiem, cpu) |= mask; in cpu_eoi_irq()
107 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); in cpu_eoi_irq()
156 #define irq_stats(x) (&per_cpu(irq_stat, x))
345 return per_cpu(cpu_data, cpu).txn_addr; in txn_affinity_addr()
357 (!per_cpu(cpu_data, next_cpu).txn_addr || in txn_alloc_addr()
427 stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack; in stack_overflow_check()
430 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu); in stack_overflow_check()
446 last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu); in stack_overflow_check()
[all …]
Dsmp.c124 struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu); in ipi_interrupt()
129 spinlock_t *lock = &per_cpu(ipi_lock, this_cpu); in ipi_interrupt()
191 struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu); in ipi_send()
192 spinlock_t *lock = &per_cpu(ipi_lock, cpu); in ipi_send()
323 const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); in smp_boot_one_cpu()
381 int bootstrap_processor = per_cpu(cpu_data, 0).cpuid; in smp_prepare_boot_cpu()
401 spin_lock_init(&per_cpu(ipi_lock, cpu)); in smp_prepare_cpus()
/Linux-v4.19/drivers/oprofile/
Doprofile_perf.c43 if (per_cpu(perf_events, cpu)[id] == event) in op_overflow_handler()
79 if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event]) in op_create_counter()
96 per_cpu(perf_events, cpu)[event] = pevent; in op_create_counter()
103 struct perf_event *pevent = per_cpu(perf_events, cpu)[event]; in op_destroy_counter()
107 per_cpu(perf_events, cpu)[event] = NULL; in op_destroy_counter()
262 event = per_cpu(perf_events, cpu)[id]; in oprofile_perf_exit()
267 kfree(per_cpu(perf_events, cpu)); in oprofile_perf_exit()
301 per_cpu(perf_events, cpu) = kcalloc(num_counters, in oprofile_perf_init()
303 if (!per_cpu(perf_events, cpu)) { in oprofile_perf_init()
Dnmi_timer_int.c39 struct perf_event *event = per_cpu(nmi_timer_events, cpu); in nmi_timer_start_cpu()
46 per_cpu(nmi_timer_events, cpu) = event; in nmi_timer_start_cpu()
57 struct perf_event *event = per_cpu(nmi_timer_events, cpu); in nmi_timer_stop_cpu()
107 event = per_cpu(nmi_timer_events, cpu); in nmi_timer_shutdown()
111 per_cpu(nmi_timer_events, cpu) = NULL; in nmi_timer_shutdown()
/Linux-v4.19/arch/arm/kernel/
Dsmp.c345 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); in smp_store_cpu_info()
429 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; in smp_cpus_done()
584 per_cpu(cpu_completion, cpu) = completion; in register_ipi_completion()
590 complete(per_cpu(cpu_completion, cpu)); in ipi_complete()
720 if (!per_cpu(l_p_j_ref, cpu)) { in cpufreq_callback()
721 per_cpu(l_p_j_ref, cpu) = in cpufreq_callback()
722 per_cpu(cpu_data, cpu).loops_per_jiffy; in cpufreq_callback()
723 per_cpu(l_p_j_ref_freq, cpu) = freq->old; in cpufreq_callback()
735 per_cpu(cpu_data, cpu).loops_per_jiffy = in cpufreq_callback()
736 cpufreq_scale(per_cpu(l_p_j_ref, cpu), in cpufreq_callback()
[all …]

12345678910>>...15