/Linux-v4.19/arch/x86/kernel/cpu/ |
D | perfctr-watchdog.c | 45 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) in nmi_perfctr_msr_to_bit() argument 50 if (msr >= MSR_F15H_PERF_CTR) in nmi_perfctr_msr_to_bit() 51 return (msr - MSR_F15H_PERF_CTR) >> 1; in nmi_perfctr_msr_to_bit() 52 return msr - MSR_K7_PERFCTR0; in nmi_perfctr_msr_to_bit() 55 return msr - MSR_ARCH_PERFMON_PERFCTR0; in nmi_perfctr_msr_to_bit() 59 return msr - MSR_P6_PERFCTR0; in nmi_perfctr_msr_to_bit() 61 return msr - MSR_KNC_PERFCTR0; in nmi_perfctr_msr_to_bit() 63 return msr - MSR_P4_BPU_PERFCTR0; in nmi_perfctr_msr_to_bit() 73 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) in nmi_evntsel_msr_to_bit() argument 78 if (msr >= MSR_F15H_PERF_CTL) in nmi_evntsel_msr_to_bit() [all …]
|
/Linux-v4.19/arch/x86/include/asm/ |
D | msr.h | 14 struct msr { struct 26 struct msr reg; argument 27 struct msr *msrs; 74 extern void do_trace_write_msr(unsigned int msr, u64 val, int failed); 75 extern void do_trace_read_msr(unsigned int msr, u64 val, int failed); 76 extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed); 79 static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {} in do_trace_write_msr() argument 80 static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {} in do_trace_read_msr() argument 81 static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {} in do_trace_rdpmc() argument 91 static inline unsigned long long notrace __rdmsr(unsigned int msr) in __rdmsr() argument [all …]
|
D | msr-trace.h | 3 #define TRACE_SYSTEM msr 6 #define TRACE_INCLUDE_FILE msr-trace 22 TP_PROTO(unsigned msr, u64 val, int failed), 23 TP_ARGS(msr, val, failed), 25 __field( unsigned, msr ) 30 __entry->msr = msr; 35 __entry->msr, 41 TP_PROTO(unsigned msr, u64 val, int failed), 42 TP_ARGS(msr, val, failed) 46 TP_PROTO(unsigned msr, u64 val, int failed), [all …]
|
/Linux-v4.19/arch/powerpc/kvm/ |
D | book3s_hv_tm.c | 20 u64 msr = vcpu->arch.shregs.msr; in emulate_tx_failure() local 24 if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) in emulate_tx_failure() 26 if (msr & MSR_PR) { in emulate_tx_failure() 46 u64 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation() local 55 WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && in kvmhv_p9_tm_emulation() 59 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation() 65 if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { in kvmhv_p9_tm_emulation() 76 if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) { in kvmhv_p9_tm_emulation() 85 WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && in kvmhv_p9_tm_emulation() 91 msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; in kvmhv_p9_tm_emulation() [all …]
|
D | book3s_hv_tm_builtin.c | 26 u64 newmsr, msr, bescr; in kvmhv_p9_tm_emulation_early() local 37 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation_early() 44 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation_early() 45 if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) in kvmhv_p9_tm_emulation_early() 49 ((msr & MSR_PR) && !(mfspr(SPRN_FSCR) & FSCR_EBB))) in kvmhv_p9_tm_emulation_early() 59 msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; in kvmhv_p9_tm_emulation_early() 60 vcpu->arch.shregs.msr = msr; in kvmhv_p9_tm_emulation_early() 69 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation_early() 74 newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE); in kvmhv_p9_tm_emulation_early() 76 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation_early() [all …]
|
/Linux-v4.19/arch/x86/lib/ |
D | msr.c | 9 struct msr *msrs_alloc(void) in msrs_alloc() 11 struct msr *msrs = NULL; in msrs_alloc() 13 msrs = alloc_percpu(struct msr); in msrs_alloc() 23 void msrs_free(struct msr *msrs) in msrs_free() 39 int msr_read(u32 msr, struct msr *m) in msr_read() argument 44 err = rdmsrl_safe(msr, &val); in msr_read() 57 int msr_write(u32 msr, struct msr *m) in msr_write() argument 59 return wrmsrl_safe(msr, m->q); in msr_write() 62 static inline int __flip_bit(u32 msr, u8 bit, bool set) in __flip_bit() argument 64 struct msr m, m1; in __flip_bit() [all …]
|
D | msr-smp.c | 11 struct msr *reg; in __rdmsr_on_cpu() 25 struct msr *reg; in __wrmsr_on_cpu() 100 struct msr *msrs, in __rwmsr_on_cpus() 127 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) in rdmsr_on_cpus() 141 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) in wrmsr_on_cpus() 148 struct msr_info msr; member 158 rv->msr.err = rdmsr_safe(rv->msr.msr_no, &rv->msr.reg.l, &rv->msr.reg.h); in __rdmsr_safe_on_cpu() 180 rv.msr.msr_no = msr_no; in rdmsr_safe_on_cpu() 185 err = rv.msr.err; in rdmsr_safe_on_cpu() 187 *l = rv.msr.reg.l; in rdmsr_safe_on_cpu() [all …]
|
/Linux-v4.19/arch/m68k/bvme6000/ |
D | config.c | 161 unsigned char msr = rtc->msr & 0xc0; in bvme6000_timer_int() local 163 rtc->msr = msr | 0x20; /* Ack the interrupt */ in bvme6000_timer_int() 180 unsigned char msr = rtc->msr & 0xc0; in bvme6000_sched_init() local 182 rtc->msr = 0; /* Ensure timer registers accessible */ in bvme6000_sched_init() 193 rtc->msr = 0x40; /* Access int.cntrl, etc */ in bvme6000_sched_init() 198 rtc->msr = 0; /* Access timer 1 control */ in bvme6000_sched_init() 201 rtc->msr = msr; in bvme6000_sched_init() 222 unsigned char msr = rtc->msr & 0xc0; in bvme6000_gettimeoffset() local 226 rtc->msr = 0; /* Ensure timer registers accessible */ in bvme6000_gettimeoffset() 230 t1int = rtc->msr & 0x20; in bvme6000_gettimeoffset() [all …]
|
D | rtc.c | 42 unsigned char msr; in rtc_ioctl() local 52 msr = rtc->msr & 0xc0; in rtc_ioctl() 53 rtc->msr = 0x40; in rtc_ioctl() 66 rtc->msr = msr; in rtc_ioctl() 108 msr = rtc->msr & 0xc0; in rtc_ioctl() 109 rtc->msr = 0x40; in rtc_ioctl() 123 rtc->msr = msr; in rtc_ioctl()
|
/Linux-v4.19/tools/power/x86/turbostat/ |
D | turbostat.c | 374 int get_msr(int cpu, off_t offset, unsigned long long *msr) in get_msr() argument 378 retval = pread(get_msr_fd(cpu), msr, sizeof(*msr), offset); in get_msr() 380 if (retval != sizeof *msr) in get_msr() 1704 unsigned long long msr; in get_counters() local 1780 if (get_msr(cpu, MSR_SMI_COUNT, &msr)) in get_counters() 1782 t->smi_count = msr & 0xFFFFFFFF; in get_counters() 1820 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) in get_counters() 1822 c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); in get_counters() 1885 if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr)) in get_counters() 1887 p->energy_pkg = msr & 0xFFFFFFFF; in get_counters() [all …]
|
/Linux-v4.19/arch/microblaze/kernel/ |
D | process.c | 47 regs->msr, regs->ear, regs->esr, regs->fsr); in show_regs() 72 local_save_flags(childregs->msr); in copy_thread() 74 ti->cpu_context.msr = childregs->msr & ~MSR_IE; in copy_thread() 86 ti->cpu_context.msr = (unsigned long)childregs->msr; in copy_thread() 88 childregs->msr |= MSR_UMS; in copy_thread() 100 childregs->msr &= ~MSR_EIP; in copy_thread() 101 childregs->msr |= MSR_IE; in copy_thread() 102 childregs->msr &= ~MSR_VM; in copy_thread() 103 childregs->msr |= MSR_VMS; in copy_thread() 104 childregs->msr |= MSR_EE; /* exceptions will be enabled*/ in copy_thread() [all …]
|
/Linux-v4.19/arch/x86/kvm/ |
D | mtrr.c | 29 static bool msr_mtrr_valid(unsigned msr) in msr_mtrr_valid() argument 31 switch (msr) { in msr_mtrr_valid() 61 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) in kvm_mtrr_valid() argument 66 if (!msr_mtrr_valid(msr)) in kvm_mtrr_valid() 69 if (msr == MSR_IA32_CR_PAT) { in kvm_mtrr_valid() 74 } else if (msr == MSR_MTRRdefType) { in kvm_mtrr_valid() 78 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { in kvm_mtrr_valid() 86 WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR)); in kvm_mtrr_valid() 89 if ((msr & 1) == 0) { in kvm_mtrr_valid() 196 static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit) in fixed_msr_to_seg_unit() argument [all …]
|
D | pmu.h | 30 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); 31 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data); 85 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, in get_gp_pmc() argument 88 if (msr >= base && msr < base + pmu->nr_arch_gp_counters) in get_gp_pmc() 89 return &pmu->gp_counters[msr - base]; in get_gp_pmc() 95 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) in get_fixed_pmc() argument 99 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) in get_fixed_pmc() 100 return &pmu->fixed_counters[msr - base]; in get_fixed_pmc() 113 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr); 114 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
|
D | pmu_amd.c | 66 static enum index msr_to_index(u32 msr) in msr_to_index() argument 68 switch (msr) { in msr_to_index() 100 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, in get_gp_pmc_amd() argument 103 switch (msr) { in get_gp_pmc_amd() 128 return &pmu->gp_counters[msr_to_index(msr)]; in get_gp_pmc_amd() 202 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) in amd_is_valid_msr() argument 207 ret = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER) || in amd_is_valid_msr() 208 get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); in amd_is_valid_msr() 213 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) in amd_pmu_get_msr() argument 219 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); in amd_pmu_get_msr() [all …]
|
D | pmu_intel.c | 145 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) in intel_is_valid_msr() argument 150 switch (msr) { in intel_is_valid_msr() 158 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || in intel_is_valid_msr() 159 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || in intel_is_valid_msr() 160 get_fixed_pmc(pmu, msr); in intel_is_valid_msr() 167 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) in intel_pmu_get_msr() argument 172 switch (msr) { in intel_pmu_get_msr() 186 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || in intel_pmu_get_msr() 187 (pmc = get_fixed_pmc(pmu, msr))) { in intel_pmu_get_msr() 190 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { in intel_pmu_get_msr() [all …]
|
/Linux-v4.19/arch/x86/xen/ |
D | pmu.c | 125 static inline bool is_amd_pmu_msr(unsigned int msr) in is_amd_pmu_msr() argument 127 if ((msr >= MSR_F15H_PERF_CTL && in is_amd_pmu_msr() 128 msr < MSR_F15H_PERF_CTR + (amd_num_counters * 2)) || in is_amd_pmu_msr() 129 (msr >= MSR_K7_EVNTSEL0 && in is_amd_pmu_msr() 130 msr < MSR_K7_PERFCTR0 + amd_num_counters)) in is_amd_pmu_msr() 182 static bool xen_intel_pmu_emulate(unsigned int msr, u64 *val, int type, in xen_intel_pmu_emulate() argument 198 switch (msr) { in xen_intel_pmu_emulate() 236 if (msr == MSR_CORE_PERF_GLOBAL_OVF_CTRL) in xen_intel_pmu_emulate() 245 static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read) in xen_amd_pmu_emulate() argument 258 ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3))) in xen_amd_pmu_emulate() [all …]
|
/Linux-v4.19/arch/powerpc/kernel/ |
D | signal_64.c | 112 unsigned long msr = regs->msr; in setup_sigcontext() local 131 msr |= MSR_VEC; in setup_sigcontext() 154 msr &= ~MSR_VSX; in setup_sigcontext() 168 msr |= MSR_VSX; in setup_sigcontext() 174 err |= __put_user(msr, &sc->gp_regs[PT_MSR]); in setup_sigcontext() 214 unsigned long msr = tsk->thread.regs->msr; in setup_tm_sigcontexts() local 219 BUG_ON(!MSR_TM_ACTIVE(regs->msr)); in setup_tm_sigcontexts() 227 msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX); in setup_tm_sigcontexts() 234 regs->msr &= ~MSR_TS_MASK; in setup_tm_sigcontexts() 248 if (msr & MSR_VEC) in setup_tm_sigcontexts() [all …]
|
D | process.c | 98 MSR_TM_ACTIVE(tsk->thread.regs->msr) && in check_if_tm_restore_required() 100 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr; in check_if_tm_restore_required() 105 static inline bool msr_tm_active(unsigned long msr) in msr_tm_active() argument 107 return MSR_TM_ACTIVE(msr); in msr_tm_active() 112 return msr_tm_active(tsk->thread.regs->msr) && in tm_active_with_fp() 113 (tsk->thread.ckpt_regs.msr & MSR_FP); in tm_active_with_fp() 118 return msr_tm_active(tsk->thread.regs->msr) && in tm_active_with_altivec() 119 (tsk->thread.ckpt_regs.msr & MSR_VEC); in tm_active_with_altivec() 122 static inline bool msr_tm_active(unsigned long msr) { return false; } in msr_tm_active() argument 179 unsigned long msr; in __giveup_fpu() local [all …]
|
D | signal_32.c | 391 unsigned long msr = regs->msr; in save_user_regs() local 409 msr |= MSR_VEC; in save_user_regs() 431 msr &= ~MSR_VSX; in save_user_regs() 443 msr |= MSR_VSX; in save_user_regs() 455 msr |= MSR_SPE; in save_user_regs() 464 if (__put_user(msr, &frame->mc_gregs[PT_MSR])) in save_user_regs() 498 unsigned long msr = regs->msr; in save_tm_user_regs() local 507 regs->msr &= ~MSR_TS_MASK; in save_tm_user_regs() 520 if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR])) in save_tm_user_regs() 529 if (msr & MSR_VEC) { in save_tm_user_regs() [all …]
|
/Linux-v4.19/tools/power/x86/x86_energy_perf_policy/ |
D | x86_energy_perf_policy.c | 627 int get_msr(int cpu, int offset, unsigned long long *msr) in get_msr() argument 638 retval = pread(fd, msr, sizeof(*msr), offset); in get_msr() 639 if (retval != sizeof(*msr)) in get_msr() 643 fprintf(stderr, "get_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, *msr); in get_msr() 682 unsigned long long msr; in read_hwp_cap() local 684 get_msr(cpu, msr_offset, &msr); in read_hwp_cap() 686 cap->highest = msr_perf_2_ratio(HWP_HIGHEST_PERF(msr)); in read_hwp_cap() 687 cap->guaranteed = msr_perf_2_ratio(HWP_GUARANTEED_PERF(msr)); in read_hwp_cap() 688 cap->efficient = msr_perf_2_ratio(HWP_MOSTEFFICIENT_PERF(msr)); in read_hwp_cap() 689 cap->lowest = msr_perf_2_ratio(HWP_LOWEST_PERF(msr)); in read_hwp_cap() [all …]
|
/Linux-v4.19/arch/arm64/mm/ |
D | proc.S | 107 msr tpidr_el0, x2 108 msr tpidrro_el0, x3 109 msr contextidr_el1, x4 110 msr cpacr_el1, x6 116 msr tcr_el1, x8 117 msr vbar_el1, x9 126 msr mdscr_el1, x10 128 msr sctlr_el1, x12 130 msr tpidr_el1, x13 132 msr tpidr_el2, x13 [all …]
|
/Linux-v4.19/arch/arm64/kvm/ |
D | hyp-init.S | 68 msr ttbr0_el2, x4 98 msr tcr_el2, x4 101 msr mair_el2, x4 115 msr sctlr_el2, x4 121 msr vbar_el2, x2 124 msr tpidr_el2, x3 135 msr elr_el2, x1 137 msr spsr_el2, x0 156 msr sctlr_el2, x5 161 msr vbar_el2, x5
|
/Linux-v4.19/arch/x86/power/ |
D | cpu.c | 39 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_save_context() local 40 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_save_context() 42 while (msr < end) { in msr_save_context() 43 msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q); in msr_save_context() 44 msr++; in msr_save_context() 50 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_restore_context() local 51 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_restore_context() 53 while (msr < end) { in msr_restore_context() 54 if (msr->valid) in msr_restore_context() 55 wrmsrl(msr->info.msr_no, msr->info.reg.q); in msr_restore_context() [all …]
|
/Linux-v4.19/tools/power/cpupower/debug/i386/ |
D | centrino-decode.c | 29 static int rdmsr(unsigned int cpu, unsigned int msr, in rdmsr() argument 48 if (lseek(fd, msr, SEEK_CUR) == -1) in rdmsr() 64 static void decode (unsigned int msr) in decode() argument 69 multiplier = ((msr >> 8) & 0xFF); in decode() 71 mv = (((msr & 0xFF) * 16) + 700); in decode() 73 printf("0x%x means multiplier %d @ %d mV\n", msr, multiplier, mv); in decode()
|
/Linux-v4.19/arch/arm64/kernel/ |
D | head.S | 411 msr sp_el0, x5 // Save thread_info 414 msr vbar_el1, x8 // vector table address 471 msr SPsel, #1 // We want to use SP_EL{1,2} 476 msr sctlr_el1, x0 482 msr sctlr_el2, x0 502 msr hcr_el2, x0 518 msr cnthctl_el2, x0 520 msr cntvoff_el2, xzr // Clear virtual offset 544 msr vpidr_el2, x0 545 msr vmpidr_el2, x1 [all …]
|