Home
last modified time | relevance | path

Searched refs:msr (Results 1 – 25 of 310) sorted by relevance

12345678910>>...13

/Linux-v5.4/arch/x86/kernel/cpu/
Dperfctr-watchdog.c45 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) in nmi_perfctr_msr_to_bit() argument
51 if (msr >= MSR_F15H_PERF_CTR) in nmi_perfctr_msr_to_bit()
52 return (msr - MSR_F15H_PERF_CTR) >> 1; in nmi_perfctr_msr_to_bit()
53 return msr - MSR_K7_PERFCTR0; in nmi_perfctr_msr_to_bit()
56 return msr - MSR_ARCH_PERFMON_PERFCTR0; in nmi_perfctr_msr_to_bit()
60 return msr - MSR_P6_PERFCTR0; in nmi_perfctr_msr_to_bit()
62 return msr - MSR_KNC_PERFCTR0; in nmi_perfctr_msr_to_bit()
64 return msr - MSR_P4_BPU_PERFCTR0; in nmi_perfctr_msr_to_bit()
74 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) in nmi_evntsel_msr_to_bit() argument
80 if (msr >= MSR_F15H_PERF_CTL) in nmi_evntsel_msr_to_bit()
[all …]
/Linux-v5.4/arch/x86/include/asm/
Dmsr.h14 struct msr { struct
26 struct msr reg; argument
27 struct msr *msrs;
74 extern void do_trace_write_msr(unsigned int msr, u64 val, int failed);
75 extern void do_trace_read_msr(unsigned int msr, u64 val, int failed);
76 extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed);
79 static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {} in do_trace_write_msr() argument
80 static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {} in do_trace_read_msr() argument
81 static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {} in do_trace_rdpmc() argument
91 static inline unsigned long long notrace __rdmsr(unsigned int msr) in __rdmsr() argument
[all …]
Dmsr-trace.h3 #define TRACE_SYSTEM msr
6 #define TRACE_INCLUDE_FILE msr-trace
22 TP_PROTO(unsigned msr, u64 val, int failed),
23 TP_ARGS(msr, val, failed),
25 __field( unsigned, msr )
30 __entry->msr = msr;
35 __entry->msr,
41 TP_PROTO(unsigned msr, u64 val, int failed),
42 TP_ARGS(msr, val, failed)
46 TP_PROTO(unsigned msr, u64 val, int failed),
[all …]
/Linux-v5.4/arch/powerpc/kvm/
Dbook3s_hv_tm.c17 u64 msr = vcpu->arch.shregs.msr; in emulate_tx_failure() local
21 if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) in emulate_tx_failure()
23 if (msr & MSR_PR) { in emulate_tx_failure()
43 u64 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation() local
52 WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && in kvmhv_p9_tm_emulation()
56 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation()
62 if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { in kvmhv_p9_tm_emulation()
73 if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) { in kvmhv_p9_tm_emulation()
82 WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && in kvmhv_p9_tm_emulation()
88 msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; in kvmhv_p9_tm_emulation()
[all …]
Dbook3s_hv_tm_builtin.c23 u64 newmsr, msr, bescr; in kvmhv_p9_tm_emulation_early() local
34 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation_early()
41 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation_early()
42 if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) in kvmhv_p9_tm_emulation_early()
46 ((msr & MSR_PR) && !(mfspr(SPRN_FSCR) & FSCR_EBB))) in kvmhv_p9_tm_emulation_early()
56 msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; in kvmhv_p9_tm_emulation_early()
57 vcpu->arch.shregs.msr = msr; in kvmhv_p9_tm_emulation_early()
66 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation_early()
71 newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE); in kvmhv_p9_tm_emulation_early()
73 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation_early()
[all …]
/Linux-v5.4/arch/x86/lib/
Dmsr.c9 struct msr *msrs_alloc(void) in msrs_alloc()
11 struct msr *msrs = NULL; in msrs_alloc()
13 msrs = alloc_percpu(struct msr); in msrs_alloc()
23 void msrs_free(struct msr *msrs) in msrs_free()
39 int msr_read(u32 msr, struct msr *m) in msr_read() argument
44 err = rdmsrl_safe(msr, &val); in msr_read()
57 int msr_write(u32 msr, struct msr *m) in msr_write() argument
59 return wrmsrl_safe(msr, m->q); in msr_write()
62 static inline int __flip_bit(u32 msr, u8 bit, bool set) in __flip_bit() argument
64 struct msr m, m1; in __flip_bit()
[all …]
Dmsr-smp.c11 struct msr *reg; in __rdmsr_on_cpu()
25 struct msr *reg; in __wrmsr_on_cpu()
100 struct msr *msrs, in __rwmsr_on_cpus()
127 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) in rdmsr_on_cpus()
141 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) in wrmsr_on_cpus()
148 struct msr_info msr; member
158 rv->msr.err = rdmsr_safe(rv->msr.msr_no, &rv->msr.reg.l, &rv->msr.reg.h); in __rdmsr_safe_on_cpu()
180 rv.msr.msr_no = msr_no; in rdmsr_safe_on_cpu()
185 err = rv.msr.err; in rdmsr_safe_on_cpu()
187 *l = rv.msr.reg.l; in rdmsr_safe_on_cpu()
[all …]
/Linux-v5.4/tools/power/x86/turbostat/
Dturbostat.c380 int get_msr(int cpu, off_t offset, unsigned long long *msr) in get_msr() argument
384 retval = pread(get_msr_fd(cpu), msr, sizeof(*msr), offset); in get_msr()
386 if (retval != sizeof *msr) in get_msr()
1777 unsigned long long msr; in get_counters() local
1854 if (get_msr(cpu, MSR_SMI_COUNT, &msr)) in get_counters()
1856 t->smi_count = msr & 0xFFFFFFFF; in get_counters()
1894 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) in get_counters()
1896 c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); in get_counters()
1900 if (get_msr(cpu, MSR_CORE_ENERGY_STAT, &msr)) in get_counters()
1902 c->core_energy = msr & 0xFFFFFFFF; in get_counters()
[all …]
/Linux-v5.4/arch/m68k/bvme6000/
Dconfig.c172 unsigned char msr; in bvme6000_timer_int() local
175 msr = rtc->msr & 0xc0; in bvme6000_timer_int()
176 rtc->msr = msr | 0x20; /* Ack the interrupt */ in bvme6000_timer_int()
197 unsigned char msr = rtc->msr & 0xc0; in bvme6000_sched_init() local
199 rtc->msr = 0; /* Ensure timer registers accessible */ in bvme6000_sched_init()
209 rtc->msr = 0x40; /* Access int.cntrl, etc */ in bvme6000_sched_init()
214 rtc->msr = 0; /* Access timer 1 control */ in bvme6000_sched_init()
217 rtc->msr = msr; in bvme6000_sched_init()
239 unsigned char msr, msb; in bvme6000_read_clk() local
245 msr = rtc->msr & 0xc0; in bvme6000_read_clk()
[all …]
Drtc.c42 unsigned char msr; in rtc_ioctl() local
52 msr = rtc->msr & 0xc0; in rtc_ioctl()
53 rtc->msr = 0x40; in rtc_ioctl()
66 rtc->msr = msr; in rtc_ioctl()
108 msr = rtc->msr & 0xc0; in rtc_ioctl()
109 rtc->msr = 0x40; in rtc_ioctl()
123 rtc->msr = msr; in rtc_ioctl()
/Linux-v5.4/arch/microblaze/kernel/
Dprocess.c47 regs->msr, regs->ear, regs->esr, regs->fsr); in show_regs()
72 local_save_flags(childregs->msr); in copy_thread()
74 ti->cpu_context.msr = childregs->msr & ~MSR_IE; in copy_thread()
86 ti->cpu_context.msr = (unsigned long)childregs->msr; in copy_thread()
88 childregs->msr |= MSR_UMS; in copy_thread()
100 childregs->msr &= ~MSR_EIP; in copy_thread()
101 childregs->msr |= MSR_IE; in copy_thread()
102 childregs->msr &= ~MSR_VM; in copy_thread()
103 childregs->msr |= MSR_VMS; in copy_thread()
104 childregs->msr |= MSR_EE; /* exceptions will be enabled*/ in copy_thread()
[all …]
/Linux-v5.4/arch/x86/kvm/
Dmtrr.c27 static bool msr_mtrr_valid(unsigned msr) in msr_mtrr_valid() argument
29 switch (msr) { in msr_mtrr_valid()
54 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) in kvm_mtrr_valid() argument
59 if (!msr_mtrr_valid(msr)) in kvm_mtrr_valid()
62 if (msr == MSR_IA32_CR_PAT) { in kvm_mtrr_valid()
64 } else if (msr == MSR_MTRRdefType) { in kvm_mtrr_valid()
68 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { in kvm_mtrr_valid()
76 WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR)); in kvm_mtrr_valid()
79 if ((msr & 1) == 0) { in kvm_mtrr_valid()
186 static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit) in fixed_msr_to_seg_unit() argument
[all …]
Dpmu.h31 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
32 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
86 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, in get_gp_pmc() argument
89 if (msr >= base && msr < base + pmu->nr_arch_gp_counters) in get_gp_pmc()
90 return &pmu->gp_counters[msr - base]; in get_gp_pmc()
96 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) in get_fixed_pmc() argument
100 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) in get_fixed_pmc()
101 return &pmu->fixed_counters[msr - base]; in get_fixed_pmc()
114 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
115 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
Dpmu_amd.c64 static enum index msr_to_index(u32 msr) in msr_to_index() argument
66 switch (msr) { in msr_to_index()
98 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, in get_gp_pmc_amd() argument
101 switch (msr) { in get_gp_pmc_amd()
126 return &pmu->gp_counters[msr_to_index(msr)]; in get_gp_pmc_amd()
200 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) in amd_is_valid_msr() argument
205 ret = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER) || in amd_is_valid_msr()
206 get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); in amd_is_valid_msr()
211 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) in amd_pmu_get_msr() argument
217 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); in amd_pmu_get_msr()
[all …]
/Linux-v5.4/arch/x86/xen/
Dpmu.c132 static inline bool is_amd_pmu_msr(unsigned int msr) in is_amd_pmu_msr() argument
134 if ((msr >= MSR_F15H_PERF_CTL && in is_amd_pmu_msr()
135 msr < MSR_F15H_PERF_CTR + (amd_num_counters * 2)) || in is_amd_pmu_msr()
136 (msr >= MSR_K7_EVNTSEL0 && in is_amd_pmu_msr()
137 msr < MSR_K7_PERFCTR0 + amd_num_counters)) in is_amd_pmu_msr()
189 static bool xen_intel_pmu_emulate(unsigned int msr, u64 *val, int type, in xen_intel_pmu_emulate() argument
205 switch (msr) { in xen_intel_pmu_emulate()
243 if (msr == MSR_CORE_PERF_GLOBAL_OVF_CTRL) in xen_intel_pmu_emulate()
252 static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read) in xen_amd_pmu_emulate() argument
265 ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3))) in xen_amd_pmu_emulate()
[all …]
/Linux-v5.4/arch/powerpc/kernel/
Dsignal_64.c108 unsigned long msr = regs->msr; in setup_sigcontext() local
127 msr |= MSR_VEC; in setup_sigcontext()
150 msr &= ~MSR_VSX; in setup_sigcontext()
164 msr |= MSR_VSX; in setup_sigcontext()
170 err |= __put_user(msr, &sc->gp_regs[PT_MSR]); in setup_sigcontext()
210 unsigned long msr = tsk->thread.regs->msr; in setup_tm_sigcontexts() local
215 BUG_ON(!MSR_TM_ACTIVE(regs->msr)); in setup_tm_sigcontexts()
223 msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX); in setup_tm_sigcontexts()
230 regs->msr &= ~MSR_TS_MASK; in setup_tm_sigcontexts()
244 if (msr & MSR_VEC) in setup_tm_sigcontexts()
[all …]
Dprocess.c97 MSR_TM_ACTIVE(tsk->thread.regs->msr) && in check_if_tm_restore_required()
99 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr; in check_if_tm_restore_required()
161 unsigned long msr; in __giveup_fpu() local
164 msr = tsk->thread.regs->msr; in __giveup_fpu()
165 msr &= ~(MSR_FP|MSR_FE0|MSR_FE1); in __giveup_fpu()
168 msr &= ~MSR_VSX; in __giveup_fpu()
170 tsk->thread.regs->msr = msr; in __giveup_fpu()
199 if (tsk->thread.regs->msr & MSR_FP) { in flush_fp_to_thread()
223 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { in enable_kernel_fp()
233 MSR_TM_ACTIVE(current->thread.regs->msr)) in enable_kernel_fp()
[all …]
Dsignal_32.c387 unsigned long msr = regs->msr; in save_user_regs() local
405 msr |= MSR_VEC; in save_user_regs()
427 msr &= ~MSR_VSX; in save_user_regs()
439 msr |= MSR_VSX; in save_user_regs()
451 msr |= MSR_SPE; in save_user_regs()
460 if (__put_user(msr, &frame->mc_gregs[PT_MSR])) in save_user_regs()
494 unsigned long msr = regs->msr; in save_tm_user_regs() local
503 regs->msr &= ~MSR_TS_MASK; in save_tm_user_regs()
516 if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR])) in save_tm_user_regs()
525 if (msr & MSR_VEC) { in save_tm_user_regs()
[all …]
/Linux-v5.4/tools/power/x86/x86_energy_perf_policy/
Dx86_energy_perf_policy.c626 int get_msr(int cpu, int offset, unsigned long long *msr) in get_msr() argument
637 retval = pread(fd, msr, sizeof(*msr), offset); in get_msr()
638 if (retval != sizeof(*msr)) in get_msr()
642 fprintf(stderr, "get_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, *msr); in get_msr()
681 unsigned long long msr; in read_hwp_cap() local
683 get_msr(cpu, msr_offset, &msr); in read_hwp_cap()
685 cap->highest = msr_perf_2_ratio(HWP_HIGHEST_PERF(msr)); in read_hwp_cap()
686 cap->guaranteed = msr_perf_2_ratio(HWP_GUARANTEED_PERF(msr)); in read_hwp_cap()
687 cap->efficient = msr_perf_2_ratio(HWP_MOSTEFFICIENT_PERF(msr)); in read_hwp_cap()
688 cap->lowest = msr_perf_2_ratio(HWP_LOWEST_PERF(msr)); in read_hwp_cap()
[all …]
/Linux-v5.4/arch/x86/events/
Dprobe.c14 perf_msr_probe(struct perf_msr *msr, int cnt, bool zero, void *data) in perf_msr_probe() argument
24 if (!msr[bit].no_check) { in perf_msr_probe()
25 struct attribute_group *grp = msr[bit].grp; in perf_msr_probe()
29 if (msr[bit].test && !msr[bit].test(bit, data)) in perf_msr_probe()
32 if (rdmsrl_safe(msr[bit].msr, &val)) in perf_msr_probe()
/Linux-v5.4/arch/arm64/mm/
Dproc.S92 msr tpidr_el0, x2
93 msr tpidrro_el0, x3
94 msr contextidr_el1, x4
95 msr cpacr_el1, x6
101 msr tcr_el1, x8
102 msr vbar_el1, x9
111 msr mdscr_el1, x10
113 msr sctlr_el1, x12
115 msr tpidr_el1, x13
117 msr tpidr_el2, x13
[all …]
/Linux-v5.4/arch/arm64/kvm/
Dhyp-init.S60 msr ttbr0_el2, x4
90 msr tcr_el2, x4
93 msr mair_el2, x4
107 msr sctlr_el2, x4
113 msr vbar_el2, x2
116 msr tpidr_el2, x3
127 msr elr_el2, x1
129 msr spsr_el2, x0
148 msr sctlr_el2, x5
153 msr vbar_el2, x5
/Linux-v5.4/tools/power/cpupower/debug/i386/
Dcentrino-decode.c28 static int rdmsr(unsigned int cpu, unsigned int msr, in rdmsr() argument
47 if (lseek(fd, msr, SEEK_CUR) == -1) in rdmsr()
63 static void decode (unsigned int msr) in decode() argument
68 multiplier = ((msr >> 8) & 0xFF); in decode()
70 mv = (((msr & 0xFF) * 16) + 700); in decode()
72 printf("0x%x means multiplier %d @ %d mV\n", msr, multiplier, mv); in decode()
/Linux-v5.4/arch/x86/power/
Dcpu.c39 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_save_context() local
40 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_save_context()
42 while (msr < end) { in msr_save_context()
43 msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q); in msr_save_context()
44 msr++; in msr_save_context()
50 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_restore_context() local
51 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_restore_context()
53 while (msr < end) { in msr_restore_context()
54 if (msr->valid) in msr_restore_context()
55 wrmsrl(msr->info.msr_no, msr->info.reg.q); in msr_restore_context()
[all …]
/Linux-v5.4/arch/x86/kvm/vmx/
Dpmu_intel.c143 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) in intel_is_valid_msr() argument
148 switch (msr) { in intel_is_valid_msr()
156 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || in intel_is_valid_msr()
157 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || in intel_is_valid_msr()
158 get_fixed_pmc(pmu, msr); in intel_is_valid_msr()
165 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) in intel_pmu_get_msr() argument
170 switch (msr) { in intel_pmu_get_msr()
184 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) { in intel_pmu_get_msr()
188 } else if ((pmc = get_fixed_pmc(pmu, msr))) { in intel_pmu_get_msr()
192 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { in intel_pmu_get_msr()
[all …]

12345678910>>...13