| /Linux-v5.4/arch/x86/mm/ |
| D | tlb.c | 62 if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid)) in clear_asid_other() 87 if (this_cpu_read(cpu_tlbstate.invalidate_other)) in choose_new_asid() 91 if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) != in choose_new_asid() 96 *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) < in choose_new_asid() 134 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); in leave_mm() 148 WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy)); in leave_mm() 249 prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb); in cond_ibpb() 268 if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) { in cond_ibpb() 278 struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm); in switch_mm_irqs_off() 279 u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); in switch_mm_irqs_off() [all …]
|
| /Linux-v5.4/arch/x86/include/asm/ |
| D | tlbflush.h | 254 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); in nmi_uaccess_okay() 297 cr4 = this_cpu_read(cpu_tlbstate.cr4); in cr4_set_bits_irqsoff() 307 cr4 = this_cpu_read(cpu_tlbstate.cr4); in cr4_clear_bits_irqsoff() 336 cr4 = this_cpu_read(cpu_tlbstate.cr4); in cr4_toggle_bits_irqsoff() 343 return this_cpu_read(cpu_tlbstate.cr4); in cr4_read_shadow() 411 invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid)); in __native_flush_tlb() 442 cr4 = this_cpu_read(cpu_tlbstate.cr4); in __native_flush_tlb_global() 456 u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); in __native_flush_tlb_one_user()
|
| D | mmu_context.h | 350 unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd, in __get_current_cr3_fast() 351 this_cpu_read(cpu_tlbstate.loaded_mm_asid)); in __get_current_cr3_fast() 382 temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm); in use_temporary_mm()
|
| D | switch_to.h | 79 if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs)) in refresh_sysenter_cs()
|
| /Linux-v5.4/kernel/trace/ |
| D | trace_preemptirq.c | 24 if (this_cpu_read(tracing_irq_cpu)) { in trace_hardirqs_on() 38 if (!this_cpu_read(tracing_irq_cpu)) { in trace_hardirqs_off() 52 if (this_cpu_read(tracing_irq_cpu)) { in trace_hardirqs_on_caller() 66 if (!this_cpu_read(tracing_irq_cpu)) { in trace_hardirqs_off_caller()
|
| /Linux-v5.4/arch/x86/xen/ |
| D | irq.c | 32 vcpu = this_cpu_read(xen_vcpu); in xen_save_fl() 54 vcpu = this_cpu_read(xen_vcpu); in xen_restore_fl() 73 this_cpu_read(xen_vcpu)->evtchn_upcall_mask = 1; in xen_irq_disable() 89 vcpu = this_cpu_read(xen_vcpu); in xen_irq_enable()
|
| D | multicalls.h | 51 local_irq_restore(this_cpu_read(xen_mc_irq_flags)); in xen_mc_issue()
|
| D | suspend.c | 44 wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl)); in xen_vcpu_notify_restore()
|
| /Linux-v5.4/drivers/irqchip/ |
| D | irq-csky-mpintc.c | 56 (this_cpu_read(intcl_reg) + INTCL_CFGR) : (INTCG_base + INTCG_CICFGR))) 75 void __iomem *reg_base = this_cpu_read(intcl_reg); in csky_mpintc_handler() 83 void __iomem *reg_base = this_cpu_read(intcl_reg); in csky_mpintc_enable() 92 void __iomem *reg_base = this_cpu_read(intcl_reg); in csky_mpintc_disable() 99 void __iomem *reg_base = this_cpu_read(intcl_reg); in csky_mpintc_eoi() 215 void __iomem *reg_base = this_cpu_read(intcl_reg); in csky_mpintc_send_ipi()
|
| /Linux-v5.4/kernel/printk/ |
| D | printk_safe.c | 319 if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK) in printk_nmi_direct_enter() 367 if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) && in vprintk_func() 378 if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK) in vprintk_func() 382 if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) in vprintk_func()
|
| /Linux-v5.4/kernel/ |
| D | watchdog_hld.c | 199 perf_event_enable(this_cpu_read(watchdog_ev)); in hardlockup_detector_perf_enable() 207 struct perf_event *event = this_cpu_read(watchdog_ev); in hardlockup_detector_perf_disable() 292 perf_event_release_kernel(this_cpu_read(watchdog_ev)); in hardlockup_detector_perf_init()
|
| /Linux-v5.4/arch/x86/kernel/ |
| D | dumpstack_32.c | 37 unsigned long *begin = (unsigned long *)this_cpu_read(hardirq_stack_ptr); in in_hardirq_stack() 62 unsigned long *begin = (unsigned long *)this_cpu_read(softirq_stack_ptr); in in_softirq_stack()
|
| D | nmi.c | 518 if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { in do_nmi() 549 if (unlikely(this_cpu_read(update_debug_stack))) { in do_nmi() 555 if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) in do_nmi() 556 write_cr2(this_cpu_read(nmi_cr2)); in do_nmi()
|
| D | paravirt.c | 216 BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE); in enter_lazy() 223 BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode); in leave_lazy() 255 if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) { in paravirt_start_context_switch() 278 return this_cpu_read(paravirt_lazy_mode); in paravirt_get_lazy_mode()
|
| D | kvmclock.c | 59 return &this_cpu_read(hv_clock_per_cpu)->pvti; in this_cpu_pvti() 64 return this_cpu_read(hv_clock_per_cpu); in this_cpu_hvclock()
|
| D | tsc.c | 69 seq = this_cpu_read(cyc2ns.seq.sequence); in cyc2ns_read_begin() 72 data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset); in cyc2ns_read_begin() 73 data->cyc2ns_mul = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul); in cyc2ns_read_begin() 74 data->cyc2ns_shift = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift); in cyc2ns_read_begin() 76 } while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence))); in cyc2ns_read_begin()
|
| /Linux-v5.4/include/linux/ |
| D | context_tracking.h | 55 prev_ctx = this_cpu_read(context_tracking.state); in exception_enter() 81 this_cpu_read(context_tracking.state) : CONTEXT_DISABLED; in ct_state()
|
| /Linux-v5.4/arch/x86/kernel/fpu/ |
| D | core.c | 48 return this_cpu_read(in_kernel_fpu); in kernel_fpu_disabled() 90 WARN_ON_FPU(this_cpu_read(in_kernel_fpu)); in kernel_fpu_begin() 109 WARN_ON_FPU(!this_cpu_read(in_kernel_fpu)); in kernel_fpu_end()
|
| /Linux-v5.4/arch/x86/kernel/cpu/mce/ |
| D | core.c | 706 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { in machine_check_poll() 808 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { in mce_no_way_out() 1088 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { in mce_clear_state() 1146 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { in __mc_scan_banks() 1487 u8 n_banks = this_cpu_read(mce_num_banks); in __mcheck_cpu_mce_banks_init() 1560 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { in __mcheck_cpu_init_clear_banks() 1586 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { in __mcheck_cpu_check_banks() 1638 if (c->x86 == 15 && this_cpu_read(mce_num_banks) > 4) { in __mcheck_cpu_apply_quirks() 1657 if (c->x86 == 6 && this_cpu_read(mce_num_banks) > 0) in __mcheck_cpu_apply_quirks() 1679 if (c->x86 == 6 && c->x86_model < 0x1A && this_cpu_read(mce_num_banks) > 0) in __mcheck_cpu_apply_quirks() [all …]
|
| /Linux-v5.4/arch/arm64/include/asm/ |
| D | simd.h | 37 !this_cpu_read(fpsimd_context_busy); in may_use_simd()
|
| /Linux-v5.4/tools/testing/radix-tree/linux/ |
| D | percpu.h | 7 #define this_cpu_read(var) var macro
|
| /Linux-v5.4/include/crypto/internal/ |
| D | simd.h | 64 (may_use_simd() && !this_cpu_read(crypto_simd_disabled_for_test))
|
| /Linux-v5.4/drivers/staging/speakup/ |
| D | fakekey.c | 86 return this_cpu_read(reporting_keystroke); in speakup_fake_key_pressed()
|
| /Linux-v5.4/net/ipv4/netfilter/ |
| D | nf_dup_ipv4.c | 55 if (this_cpu_read(nf_skb_duplicated)) in nf_dup_ipv4()
|
| /Linux-v5.4/net/ipv6/netfilter/ |
| D | nf_dup_ipv6.c | 50 if (this_cpu_read(nf_skb_duplicated)) in nf_dup_ipv6()
|