| /Linux-v5.4/arch/x86/xen/ |
| D | smp.c | 35 if (per_cpu(xen_resched_irq, cpu).irq >= 0) { in xen_smp_intr_free() 36 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); in xen_smp_intr_free() 37 per_cpu(xen_resched_irq, cpu).irq = -1; in xen_smp_intr_free() 38 kfree(per_cpu(xen_resched_irq, cpu).name); in xen_smp_intr_free() 39 per_cpu(xen_resched_irq, cpu).name = NULL; in xen_smp_intr_free() 41 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { in xen_smp_intr_free() 42 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); in xen_smp_intr_free() 43 per_cpu(xen_callfunc_irq, cpu).irq = -1; in xen_smp_intr_free() 44 kfree(per_cpu(xen_callfunc_irq, cpu).name); in xen_smp_intr_free() 45 per_cpu(xen_callfunc_irq, cpu).name = NULL; in xen_smp_intr_free() [all …]
|
| D | smp_pv.c | 99 if (per_cpu(xen_irq_work, cpu).irq >= 0) { in xen_smp_intr_free_pv() 100 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); in xen_smp_intr_free_pv() 101 per_cpu(xen_irq_work, cpu).irq = -1; in xen_smp_intr_free_pv() 102 kfree(per_cpu(xen_irq_work, cpu).name); in xen_smp_intr_free_pv() 103 per_cpu(xen_irq_work, cpu).name = NULL; in xen_smp_intr_free_pv() 106 if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { in xen_smp_intr_free_pv() 107 unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL); in xen_smp_intr_free_pv() 108 per_cpu(xen_pmu_irq, cpu).irq = -1; in xen_smp_intr_free_pv() 109 kfree(per_cpu(xen_pmu_irq, cpu).name); in xen_smp_intr_free_pv() 110 per_cpu(xen_pmu_irq, cpu).name = NULL; in xen_smp_intr_free_pv() [all …]
|
| D | spinlock.c | 25 int irq = per_cpu(lock_kicker_irq, cpu); in xen_qlock_kick() 74 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", in xen_init_lock_cpu() 75 cpu, per_cpu(lock_kicker_irq, cpu)); in xen_init_lock_cpu() 87 per_cpu(lock_kicker_irq, cpu) = irq; in xen_init_lock_cpu() 88 per_cpu(irq_name, cpu) = name; in xen_init_lock_cpu() 99 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); in xen_uninit_lock_cpu() 100 per_cpu(lock_kicker_irq, cpu) = -1; in xen_uninit_lock_cpu() 101 kfree(per_cpu(irq_name, cpu)); in xen_uninit_lock_cpu() 102 per_cpu(irq_name, cpu) = NULL; in xen_uninit_lock_cpu()
|
| D | enlighten.c | 191 per_cpu(xen_vcpu, cpu) = in xen_vcpu_info_reset() 195 per_cpu(xen_vcpu, cpu) = NULL; in xen_vcpu_info_reset() 219 if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu)) in xen_vcpu_setup() 224 vcpup = &per_cpu(xen_vcpu_info, cpu); in xen_vcpu_setup() 250 per_cpu(xen_vcpu, cpu) = vcpup; in xen_vcpu_setup() 257 return ((per_cpu(xen_vcpu, cpu) == NULL) ? -ENODEV : 0); in xen_vcpu_setup()
|
| /Linux-v5.4/arch/x86/oprofile/ |
| D | nmi_int.c | 156 kfree(per_cpu(cpu_msrs, i).multiplex); in nmi_shutdown_mux() 157 per_cpu(cpu_msrs, i).multiplex = NULL; in nmi_shutdown_mux() 158 per_cpu(switch_index, i) = 0; in nmi_shutdown_mux() 172 per_cpu(cpu_msrs, i).multiplex = in nmi_setup_mux() 174 if (!per_cpu(cpu_msrs, i).multiplex) in nmi_setup_mux() 197 per_cpu(switch_index, cpu) = 0; in nmi_cpu_setup_mux() 229 int si = per_cpu(switch_index, cpu); in nmi_cpu_switch() 230 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); in nmi_cpu_switch() 238 per_cpu(switch_index, cpu) = 0; in nmi_cpu_switch() 240 per_cpu(switch_index, cpu) = si; in nmi_cpu_switch() [all …]
|
| /Linux-v5.4/arch/powerpc/kernel/ |
| D | irq.c | 515 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event); in arch_show_interrupts() 520 seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event); in arch_show_interrupts() 525 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others); in arch_show_interrupts() 530 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); in arch_show_interrupts() 535 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); in arch_show_interrupts() 540 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); in arch_show_interrupts() 547 per_cpu(irq_stat, j).hmi_exceptions); in arch_show_interrupts() 553 seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs); in arch_show_interrupts() 559 seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs); in arch_show_interrupts() 567 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs); in arch_show_interrupts() [all …]
|
| /Linux-v5.4/kernel/ |
| D | smpboot.c | 32 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_thread_get() 42 per_cpu(idle_threads, smp_processor_id()) = current; in idle_thread_set_boot_cpu() 53 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_init() 60 per_cpu(idle_threads, cpu) = tsk; in idle_init() 337 return atomic_read(&per_cpu(cpu_hotplug_state, cpu)); in cpu_report_state() 355 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE); in cpu_check_up_prepare() 359 switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) { in cpu_check_up_prepare() 364 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE); in cpu_check_up_prepare() 410 (void)atomic_xchg(&per_cpu(cpu_hotplug_state, cpu), CPU_ONLINE); in cpu_set_state_online() 428 if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD) in cpu_wait_death() [all …]
|
| D | softirq.c | 580 per_cpu(tasklet_vec, cpu).tail = in softirq_init() 581 &per_cpu(tasklet_vec, cpu).head; in softirq_init() 582 per_cpu(tasklet_hi_vec, cpu).tail = in softirq_init() 583 &per_cpu(tasklet_hi_vec, cpu).head; in softirq_init() 632 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { in tasklet_kill_immediate() 637 per_cpu(tasklet_vec, cpu).tail = i; in tasklet_kill_immediate() 650 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { in takeover_tasklets() 651 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; in takeover_tasklets() 652 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); in takeover_tasklets() 653 per_cpu(tasklet_vec, cpu).head = NULL; in takeover_tasklets() [all …]
|
| D | profile.c | 244 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); in __profile_flip_buffers() 252 j = per_cpu(cpu_profile_flip, get_cpu()); in profile_flip_buffers() 256 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; in profile_flip_buffers() 275 i = per_cpu(cpu_profile_flip, get_cpu()); in profile_discard_flip_buffers() 279 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; in profile_discard_flip_buffers() 295 hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)]; in do_profile_hits() 343 if (per_cpu(cpu_profile_hits, cpu)[i]) { in profile_dead_cpu() 344 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]); in profile_dead_cpu() 345 per_cpu(cpu_profile_hits, cpu)[i] = NULL; in profile_dead_cpu() 357 per_cpu(cpu_profile_flip, cpu) = 0; in profile_prepare_cpu() [all …]
|
| /Linux-v5.4/drivers/perf/ |
| D | arm_pmu_acpi.c | 164 per_cpu(pmu_irqs, cpu) = irq; in arm_pmu_acpi_parse_irqs() 172 irq = per_cpu(pmu_irqs, cpu); in arm_pmu_acpi_parse_irqs() 183 if (per_cpu(pmu_irqs, irq_cpu) == irq) in arm_pmu_acpi_parse_irqs() 184 per_cpu(pmu_irqs, irq_cpu) = 0; in arm_pmu_acpi_parse_irqs() 198 pmu = per_cpu(probed_pmus, cpu); in arm_pmu_acpi_find_alloc_pmu() 230 int other_irq = per_cpu(hw_events->irq, cpu); in pmu_irq_matches() 262 if (per_cpu(probed_pmus, cpu)) in arm_pmu_acpi_cpu_starting() 265 irq = per_cpu(pmu_irqs, cpu); in arm_pmu_acpi_cpu_starting() 271 per_cpu(probed_pmus, cpu) = pmu; in arm_pmu_acpi_cpu_starting() 275 per_cpu(hw_events->irq, cpu) = irq; in arm_pmu_acpi_cpu_starting() [all …]
|
| /Linux-v5.4/drivers/cpufreq/ |
| D | speedstep-centrino.c | 261 per_cpu(centrino_model, policy->cpu) = model; in centrino_cpu_init_table() 296 if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) || in extract_clock() 297 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) || in extract_clock() 298 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) { in extract_clock() 303 if ((!per_cpu(centrino_model, cpu)) || in extract_clock() 304 (!per_cpu(centrino_model, cpu)->op_points)) in extract_clock() 309 per_cpu(centrino_model, cpu)->op_points[i].frequency in extract_clock() 312 if (msr == per_cpu(centrino_model, cpu)->op_points[i].driver_data) in extract_clock() 313 return per_cpu(centrino_model, cpu)-> in extract_clock() 317 return per_cpu(centrino_model, cpu)->op_points[i-1].frequency; in extract_clock() [all …]
|
| D | arm_big_little.c | 90 cpu_freq = per_cpu(cpu_last_req_freq, j); in find_cluster_maxfreq() 92 if ((cluster == per_cpu(physical_cluster, j)) && in find_cluster_maxfreq() 105 u32 cur_cluster = per_cpu(physical_cluster, cpu); in clk_get_cpu_rate() 121 pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq, in bL_cpufreq_get_rate() 124 return per_cpu(cpu_last_req_freq, cpu); in bL_cpufreq_get_rate() 140 prev_rate = per_cpu(cpu_last_req_freq, cpu); in bL_cpufreq_set_rate() 141 per_cpu(cpu_last_req_freq, cpu) = rate; in bL_cpufreq_set_rate() 142 per_cpu(physical_cluster, cpu) = new_cluster; in bL_cpufreq_set_rate() 171 per_cpu(cpu_last_req_freq, cpu) = prev_rate; in bL_cpufreq_set_rate() 172 per_cpu(physical_cluster, cpu) = old_cluster; in bL_cpufreq_set_rate() [all …]
|
| /Linux-v5.4/arch/x86/kernel/apic/ |
| D | x2apic_cluster.c | 30 u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu); in x2apic_send_IPI() 55 struct cluster_mask *cmsk = per_cpu(cluster_masks, cpu); in __x2apic_send_IPI_mask() 59 dest |= per_cpu(x86_cpu_to_logical_apicid, clustercpu); in __x2apic_send_IPI_mask() 95 return per_cpu(x86_cpu_to_logical_apicid, cpu); in x2apic_calc_apicid() 111 cmsk = per_cpu(cluster_masks, cpu); in init_x2apic_ldr() 126 if (per_cpu(cluster_masks, cpu)) in alloc_clustermask() 150 if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL)) in x2apic_prepare_cpu() 157 struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu); in x2apic_dead_cpu() 161 free_cpumask_var(per_cpu(ipi_mask, dead_cpu)); in x2apic_dead_cpu()
|
| /Linux-v5.4/arch/arm/mm/ |
| D | context.c | 67 asid = per_cpu(active_asids, cpu).counter; in a15_erratum_get_cpumask() 69 asid = per_cpu(reserved_asids, cpu); in a15_erratum_get_cpumask() 144 asid = atomic64_xchg(&per_cpu(active_asids, i), 0); in flush_context() 153 asid = per_cpu(reserved_asids, i); in flush_context() 155 per_cpu(reserved_asids, i) = asid; in flush_context() 180 if (per_cpu(reserved_asids, cpu) == asid) { in check_update_reserved_asid() 182 per_cpu(reserved_asids, cpu) = newasid; in check_update_reserved_asid() 255 && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) in check_and_switch_context() 271 atomic64_set(&per_cpu(active_asids, cpu), asid); in check_and_switch_context()
|
| D | proc-v7-bugs.c | 45 if (per_cpu(harden_branch_predictor_fn, cpu)) in cpu_v7_spectre_init() 55 per_cpu(harden_branch_predictor_fn, cpu) = in cpu_v7_spectre_init() 62 per_cpu(harden_branch_predictor_fn, cpu) = in cpu_v7_spectre_init() 87 per_cpu(harden_branch_predictor_fn, cpu) = in cpu_v7_spectre_init() 98 per_cpu(harden_branch_predictor_fn, cpu) = in cpu_v7_spectre_init()
|
| /Linux-v5.4/arch/ia64/mm/ |
| D | tlb.c | 102 per_cpu(ia64_need_tlb_flush, i) = 1; in wrap_mmu_context() 398 per_cpu(ia64_tr_num, cpu) = 8; in ia64_tlb_init() 401 per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1; in ia64_tlb_init() 402 if (per_cpu(ia64_tr_num, cpu) > in ia64_tlb_init() 404 per_cpu(ia64_tr_num, cpu) = in ia64_tlb_init() 406 if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) { in ia64_tlb_init() 408 per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX; in ia64_tlb_init() 473 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); in ia64_itr_entry() 485 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); in ia64_itr_entry() 496 for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) { in ia64_itr_entry() [all …]
|
| /Linux-v5.4/arch/arm64/mm/ |
| D | context.c | 89 asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); in flush_context() 98 asid = per_cpu(reserved_asids, i); in flush_context() 100 per_cpu(reserved_asids, i) = asid; in flush_context() 125 if (per_cpu(reserved_asids, cpu) == asid) { in check_update_reserved_asid() 127 per_cpu(reserved_asids, cpu) = newasid; in check_update_reserved_asid() 207 old_active_asid = atomic64_read(&per_cpu(active_asids, cpu)); in check_and_switch_context() 210 atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu), in check_and_switch_context() 225 atomic64_set(&per_cpu(active_asids, cpu), asid); in check_and_switch_context()
|
| /Linux-v5.4/arch/mips/kernel/ |
| D | mips-cpc.c | 75 spin_lock_init(&per_cpu(cpc_core_lock, cpu)); in mips_cpc_probe() 98 spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core), in mips_cpc_lock_other() 99 per_cpu(cpc_core_lock_flags, curr_core)); in mips_cpc_lock_other() 118 spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core), in mips_cpc_unlock_other() 119 per_cpu(cpc_core_lock_flags, curr_core)); in mips_cpc_unlock_other()
|
| /Linux-v5.4/arch/parisc/kernel/ |
| D | irq.c | 76 per_cpu(local_ack_eiem, cpu) &= ~mask; in cpu_ack_irq() 79 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); in cpu_ack_irq() 91 per_cpu(local_ack_eiem, cpu) |= mask; in cpu_eoi_irq() 94 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); in cpu_eoi_irq() 146 #define irq_stats(x) (&per_cpu(irq_stat, x)) 341 return per_cpu(cpu_data, cpu).txn_addr; in txn_affinity_addr() 353 (!per_cpu(cpu_data, next_cpu).txn_addr || in txn_alloc_addr() 423 stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack; in stack_overflow_check() 426 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu); in stack_overflow_check() 442 last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu); in stack_overflow_check() [all …]
|
| D | smp.c | 122 struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu); in ipi_interrupt() 127 spinlock_t *lock = &per_cpu(ipi_lock, this_cpu); in ipi_interrupt() 190 struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu); in ipi_send() 191 spinlock_t *lock = &per_cpu(ipi_lock, cpu); in ipi_send() 322 const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); in smp_boot_one_cpu() 380 int bootstrap_processor = per_cpu(cpu_data, 0).cpuid; in smp_prepare_boot_cpu() 400 spin_lock_init(&per_cpu(ipi_lock, cpu)); in smp_prepare_cpus()
|
| /Linux-v5.4/drivers/oprofile/ |
| D | oprofile_perf.c | 43 if (per_cpu(perf_events, cpu)[id] == event) in op_overflow_handler() 79 if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event]) in op_create_counter() 96 per_cpu(perf_events, cpu)[event] = pevent; in op_create_counter() 103 struct perf_event *pevent = per_cpu(perf_events, cpu)[event]; in op_destroy_counter() 107 per_cpu(perf_events, cpu)[event] = NULL; in op_destroy_counter() 262 event = per_cpu(perf_events, cpu)[id]; in oprofile_perf_exit() 267 kfree(per_cpu(perf_events, cpu)); in oprofile_perf_exit() 301 per_cpu(perf_events, cpu) = kcalloc(num_counters, in oprofile_perf_init() 303 if (!per_cpu(perf_events, cpu)) { in oprofile_perf_init()
|
| D | nmi_timer_int.c | 39 struct perf_event *event = per_cpu(nmi_timer_events, cpu); in nmi_timer_start_cpu() 46 per_cpu(nmi_timer_events, cpu) = event; in nmi_timer_start_cpu() 57 struct perf_event *event = per_cpu(nmi_timer_events, cpu); in nmi_timer_stop_cpu() 107 event = per_cpu(nmi_timer_events, cpu); in nmi_timer_shutdown() 111 per_cpu(nmi_timer_events, cpu) = NULL; in nmi_timer_shutdown()
|
| /Linux-v5.4/arch/x86/kernel/ |
| D | setup_percpu.c | 224 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); in setup_per_cpu_areas() 225 per_cpu(cpu_number, cpu) = cpu; in setup_per_cpu_areas() 236 per_cpu(x86_cpu_to_apicid, cpu) = in setup_per_cpu_areas() 238 per_cpu(x86_bios_cpu_apicid, cpu) = in setup_per_cpu_areas() 240 per_cpu(x86_cpu_to_acpiid, cpu) = in setup_per_cpu_areas() 244 per_cpu(x86_cpu_to_logical_apicid, cpu) = in setup_per_cpu_areas() 248 per_cpu(x86_cpu_to_node_map, cpu) = in setup_per_cpu_areas()
|
| /Linux-v5.4/arch/x86/kernel/cpu/mce/ |
| D | amd.c | 225 per_cpu(smca_misc_banks_map, cpu) |= BIT(bank); in smca_set_misc_banks_map() 484 if (!(per_cpu(smca_misc_banks_map, cpu) & BIT(bank))) in smca_get_block_address() 496 if ((bank >= per_cpu(mce_num_banks, cpu)) || (block >= NR_BLOCKS)) in get_block_address() 528 per_cpu(bank_map, cpu) |= (1 << bank); in prepare_threshold_block() 1020 if (!(per_cpu(bank_map, cpu) & (1 << bank))) in amd_threshold_interrupt() 1023 first_block = per_cpu(threshold_banks, cpu)[bank]->blocks; in amd_threshold_interrupt() 1206 if ((bank >= per_cpu(mce_num_banks, cpu)) || (block >= NR_BLOCKS)) in allocate_threshold_blocks() 1244 if (per_cpu(threshold_banks, cpu)[bank]->blocks) { in allocate_threshold_blocks() 1246 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); in allocate_threshold_blocks() 1248 per_cpu(threshold_banks, cpu)[bank]->blocks = b; in allocate_threshold_blocks() [all …]
|
| /Linux-v5.4/arch/arm/kernel/ |
| D | smp.c | 367 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); in smp_store_cpu_info() 454 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; in smp_cpus_done() 611 per_cpu(cpu_completion, cpu) = completion; in register_ipi_completion() 617 complete(per_cpu(cpu_completion, cpu)); in ipi_complete() 764 if (!per_cpu(l_p_j_ref, first)) { in cpufreq_callback() 766 per_cpu(l_p_j_ref, cpu) = in cpufreq_callback() 767 per_cpu(cpu_data, cpu).loops_per_jiffy; in cpufreq_callback() 768 per_cpu(l_p_j_ref_freq, cpu) = freq->old; in cpufreq_callback() 783 lpj = cpufreq_scale(per_cpu(l_p_j_ref, first), in cpufreq_callback() 784 per_cpu(l_p_j_ref_freq, first), freq->new); in cpufreq_callback() [all …]
|