/Linux-v5.10/arch/x86/kernel/cpu/ |
D | perfctr-watchdog.c | 45 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) in nmi_perfctr_msr_to_bit() argument 51 if (msr >= MSR_F15H_PERF_CTR) in nmi_perfctr_msr_to_bit() 52 return (msr - MSR_F15H_PERF_CTR) >> 1; in nmi_perfctr_msr_to_bit() 53 return msr - MSR_K7_PERFCTR0; in nmi_perfctr_msr_to_bit() 56 return msr - MSR_ARCH_PERFMON_PERFCTR0; in nmi_perfctr_msr_to_bit() 60 return msr - MSR_P6_PERFCTR0; in nmi_perfctr_msr_to_bit() 62 return msr - MSR_KNC_PERFCTR0; in nmi_perfctr_msr_to_bit() 64 return msr - MSR_P4_BPU_PERFCTR0; in nmi_perfctr_msr_to_bit() 69 return msr - MSR_ARCH_PERFMON_PERFCTR0; in nmi_perfctr_msr_to_bit() 78 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) in nmi_evntsel_msr_to_bit() argument [all …]
|
/Linux-v5.10/arch/x86/include/asm/ |
D | msr.h | 14 struct msr { struct 26 struct msr reg; argument 27 struct msr *msrs; 73 extern void do_trace_write_msr(unsigned int msr, u64 val, int failed); 74 extern void do_trace_read_msr(unsigned int msr, u64 val, int failed); 75 extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed); 77 static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {} in do_trace_write_msr() argument 78 static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {} in do_trace_read_msr() argument 79 static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {} in do_trace_rdpmc() argument 89 static inline unsigned long long notrace __rdmsr(unsigned int msr) in __rdmsr() argument [all …]
|
D | msr-trace.h | 3 #define TRACE_SYSTEM msr 6 #define TRACE_INCLUDE_FILE msr-trace 22 TP_PROTO(unsigned msr, u64 val, int failed), 23 TP_ARGS(msr, val, failed), 25 __field( unsigned, msr ) 30 __entry->msr = msr; 35 __entry->msr, 41 TP_PROTO(unsigned msr, u64 val, int failed), 42 TP_ARGS(msr, val, failed) 46 TP_PROTO(unsigned msr, u64 val, int failed), [all …]
|
/Linux-v5.10/arch/powerpc/kvm/ |
D | book3s_hv_tm.c | 19 u64 msr = vcpu->arch.shregs.msr; in emulate_tx_failure() local 23 if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) in emulate_tx_failure() 25 if (msr & MSR_PR) { in emulate_tx_failure() 45 u64 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation() local 65 WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && in kvmhv_p9_tm_emulation() 69 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation() 75 if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { in kvmhv_p9_tm_emulation() 86 if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) { in kvmhv_p9_tm_emulation() 95 WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && in kvmhv_p9_tm_emulation() 101 msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; in kvmhv_p9_tm_emulation() [all …]
|
D | book3s_hv_tm_builtin.c | 23 u64 newmsr, msr, bescr; in kvmhv_p9_tm_emulation_early() local 45 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation_early() 52 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation_early() 53 if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) in kvmhv_p9_tm_emulation_early() 57 ((msr & MSR_PR) && !(mfspr(SPRN_FSCR) & FSCR_EBB))) in kvmhv_p9_tm_emulation_early() 67 msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; in kvmhv_p9_tm_emulation_early() 68 vcpu->arch.shregs.msr = msr; in kvmhv_p9_tm_emulation_early() 77 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation_early() 82 newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE); in kvmhv_p9_tm_emulation_early() 84 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation_early() [all …]
|
/Linux-v5.10/arch/x86/lib/ |
D | msr.c | 9 struct msr *msrs_alloc(void) in msrs_alloc() 11 struct msr *msrs = NULL; in msrs_alloc() 13 msrs = alloc_percpu(struct msr); in msrs_alloc() 23 void msrs_free(struct msr *msrs) in msrs_free() 39 int msr_read(u32 msr, struct msr *m) in msr_read() argument 44 err = rdmsrl_safe(msr, &val); in msr_read() 57 int msr_write(u32 msr, struct msr *m) in msr_write() argument 59 return wrmsrl_safe(msr, m->q); in msr_write() 62 static inline int __flip_bit(u32 msr, u8 bit, bool set) in __flip_bit() argument 64 struct msr m, m1; in __flip_bit() [all …]
|
/Linux-v5.10/tools/power/x86/turbostat/ |
D | turbostat.c | 279 int get_msr_sum(int cpu, off_t offset, unsigned long long *msr); 493 int get_msr(int cpu, off_t offset, unsigned long long *msr) in get_msr() argument 497 retval = pread(get_msr_fd(cpu), msr, sizeof(*msr), offset); in get_msr() 499 if (retval != sizeof *msr) in get_msr() 1891 unsigned long long msr; in get_counters() local 1968 if (get_msr(cpu, MSR_SMI_COUNT, &msr)) in get_counters() 1970 t->smi_count = msr & 0xFFFFFFFF; in get_counters() 2008 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) in get_counters() 2010 c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); in get_counters() 2014 if (get_msr(cpu, MSR_CORE_ENERGY_STAT, &msr)) in get_counters() [all …]
|
/Linux-v5.10/arch/m68k/bvme6000/ |
D | config.c | 171 unsigned char msr; in bvme6000_timer_int() local 174 msr = rtc->msr & 0xc0; in bvme6000_timer_int() 175 rtc->msr = msr | 0x20; /* Ack the interrupt */ in bvme6000_timer_int() 196 unsigned char msr = rtc->msr & 0xc0; in bvme6000_sched_init() local 198 rtc->msr = 0; /* Ensure timer registers accessible */ in bvme6000_sched_init() 208 rtc->msr = 0x40; /* Access int.cntrl, etc */ in bvme6000_sched_init() 213 rtc->msr = 0; /* Access timer 1 control */ in bvme6000_sched_init() 216 rtc->msr = msr; in bvme6000_sched_init() 238 unsigned char msr, msb; in bvme6000_read_clk() local 244 msr = rtc->msr & 0xc0; in bvme6000_read_clk() [all …]
|
D | rtc.c | 42 unsigned char msr; in rtc_ioctl() local 52 msr = rtc->msr & 0xc0; in rtc_ioctl() 53 rtc->msr = 0x40; in rtc_ioctl() 66 rtc->msr = msr; in rtc_ioctl() 108 msr = rtc->msr & 0xc0; in rtc_ioctl() 109 rtc->msr = 0x40; in rtc_ioctl() 123 rtc->msr = msr; in rtc_ioctl()
|
/Linux-v5.10/arch/microblaze/kernel/ |
D | process.c | 46 regs->msr, regs->ear, regs->esr, regs->fsr); in show_regs() 71 local_save_flags(childregs->msr); in copy_thread() 73 ti->cpu_context.msr = childregs->msr & ~MSR_IE; in copy_thread() 85 ti->cpu_context.msr = (unsigned long)childregs->msr; in copy_thread() 87 childregs->msr |= MSR_UMS; in copy_thread() 99 childregs->msr &= ~MSR_EIP; in copy_thread() 100 childregs->msr |= MSR_IE; in copy_thread() 101 childregs->msr &= ~MSR_VM; in copy_thread() 102 childregs->msr |= MSR_VMS; in copy_thread() 103 childregs->msr |= MSR_EE; /* exceptions will be enabled*/ in copy_thread() [all …]
|
/Linux-v5.10/arch/x86/kvm/ |
D | mtrr.c | 27 static bool msr_mtrr_valid(unsigned msr) in msr_mtrr_valid() argument 29 switch (msr) { in msr_mtrr_valid() 54 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) in kvm_mtrr_valid() argument 59 if (!msr_mtrr_valid(msr)) in kvm_mtrr_valid() 62 if (msr == MSR_IA32_CR_PAT) { in kvm_mtrr_valid() 64 } else if (msr == MSR_MTRRdefType) { in kvm_mtrr_valid() 68 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { in kvm_mtrr_valid() 76 WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR)); in kvm_mtrr_valid() 79 if ((msr & 1) == 0) { in kvm_mtrr_valid() 186 static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit) in fixed_msr_to_seg_unit() argument [all …]
|
/Linux-v5.10/arch/x86/xen/ |
D | pmu.c | 132 static inline bool is_amd_pmu_msr(unsigned int msr) in is_amd_pmu_msr() argument 134 if ((msr >= MSR_F15H_PERF_CTL && in is_amd_pmu_msr() 135 msr < MSR_F15H_PERF_CTR + (amd_num_counters * 2)) || in is_amd_pmu_msr() 136 (msr >= MSR_K7_EVNTSEL0 && in is_amd_pmu_msr() 137 msr < MSR_K7_PERFCTR0 + amd_num_counters)) in is_amd_pmu_msr() 189 static bool xen_intel_pmu_emulate(unsigned int msr, u64 *val, int type, in xen_intel_pmu_emulate() argument 205 switch (msr) { in xen_intel_pmu_emulate() 243 if (msr == MSR_CORE_PERF_GLOBAL_OVF_CTRL) in xen_intel_pmu_emulate() 252 static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read) in xen_amd_pmu_emulate() argument 265 ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3))) in xen_amd_pmu_emulate() [all …]
|
/Linux-v5.10/arch/powerpc/kernel/ |
D | signal_64.c | 108 unsigned long msr = regs->msr; in setup_sigcontext() local 127 msr |= MSR_VEC; in setup_sigcontext() 150 msr &= ~MSR_VSX; in setup_sigcontext() 164 msr |= MSR_VSX; in setup_sigcontext() 170 err |= __put_user(msr, &sc->gp_regs[PT_MSR]); in setup_sigcontext() 196 unsigned long msr) in setup_tm_sigcontexts() argument 215 BUG_ON(!MSR_TM_ACTIVE(msr)); in setup_tm_sigcontexts() 223 msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX); in setup_tm_sigcontexts() 237 if (msr & MSR_VEC) in setup_tm_sigcontexts() 249 msr |= MSR_VEC; in setup_tm_sigcontexts() [all …]
|
D | signal_32.c | 242 unsigned long msr = regs->msr; in save_user_regs() local 260 msr |= MSR_VEC; in save_user_regs() 282 msr &= ~MSR_VSX; in save_user_regs() 294 msr |= MSR_VSX; in save_user_regs() 306 msr |= MSR_SPE; in save_user_regs() 315 if (__put_user(msr, &frame->mc_gregs[PT_MSR])) in save_user_regs() 348 unsigned long msr) in save_tm_user_regs() argument 363 if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR])) in save_tm_user_regs() 372 if (msr & MSR_VEC) { in save_tm_user_regs() 387 msr |= MSR_VEC; in save_tm_user_regs() [all …]
|
D | process.c | 96 MSR_TM_ACTIVE(tsk->thread.regs->msr) && in check_if_tm_restore_required() 98 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr; in check_if_tm_restore_required() 156 unsigned long msr; in __giveup_fpu() local 159 msr = tsk->thread.regs->msr; in __giveup_fpu() 160 msr &= ~(MSR_FP|MSR_FE0|MSR_FE1); in __giveup_fpu() 162 msr &= ~MSR_VSX; in __giveup_fpu() 163 tsk->thread.regs->msr = msr; in __giveup_fpu() 192 if (tsk->thread.regs->msr & MSR_FP) { in flush_fp_to_thread() 216 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { in enable_kernel_fp() 226 MSR_TM_ACTIVE(current->thread.regs->msr)) in enable_kernel_fp() [all …]
|
D | syscall_64.c | 33 BUG_ON(!(regs->msr & MSR_RI)); in system_call_exception() 34 BUG_ON(!(regs->msr & MSR_PR)); in system_call_exception() 231 if ((regs->msr & mathflags) != mathflags) in syscall_exit_prepare() 243 local_paca->tm_scratch = regs->msr; in syscall_exit_prepare() 252 notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr) in interrupt_exit_user_prepare() argument 263 BUG_ON(!(regs->msr & MSR_RI)); in interrupt_exit_user_prepare() 264 BUG_ON(!(regs->msr & MSR_PR)); in interrupt_exit_user_prepare() 304 if ((regs->msr & mathflags) != mathflags) in interrupt_exit_user_prepare() 328 local_paca->tm_scratch = regs->msr; in interrupt_exit_user_prepare() 339 notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr) in interrupt_exit_kernel_prepare() argument [all …]
|
/Linux-v5.10/arch/x86/events/ |
D | probe.c | 19 perf_msr_probe(struct perf_msr *msr, int cnt, bool zero, void *data) in perf_msr_probe() argument 29 if (!msr[bit].no_check) { in perf_msr_probe() 30 struct attribute_group *grp = msr[bit].grp; in perf_msr_probe() 39 if (!msr[bit].msr) in perf_msr_probe() 42 if (msr[bit].test && !msr[bit].test(bit, data)) in perf_msr_probe() 45 if (rdmsrl_safe(msr[bit].msr, &val)) in perf_msr_probe()
|
/Linux-v5.10/tools/testing/selftests/kvm/x86_64/ |
D | user_msr_test.c | 33 static void deny_msr(uint8_t *bitmap, u32 msr) in deny_msr() argument 35 u32 idx = msr & (KVM_MSR_FILTER_MAX_BITMAP_SIZE - 1); in deny_msr() 156 run->msr.data = run->msr.index; in handle_rdmsr() 159 if (run->msr.index == MSR_SYSCALL_MASK || in handle_rdmsr() 160 run->msr.index == MSR_GS_BASE) { in handle_rdmsr() 161 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER, in handle_rdmsr() 165 if (run->msr.index == 0xdeadbeef) { in handle_rdmsr() 166 TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN, in handle_rdmsr() 176 if (run->msr.index == MSR_IA32_POWER_CTL) { in handle_wrmsr() 177 TEST_ASSERT(run->msr.data == 0x1234, in handle_wrmsr() [all …]
|
D | kvm_pv_test.c | 69 #define TEST_MSR(msr) { .idx = msr, .name = #msr } argument 71 #define PR_MSR(msr) ucall(UCALL_PR_MSR, 1, msr) argument 90 static void test_msr(struct msr_data *msr) in test_msr() argument 92 PR_MSR(msr); in test_msr() 93 do_rdmsr(msr->idx); in test_msr() 97 do_wrmsr(msr->idx, 0); in test_msr() 156 struct msr_data *msr = (struct msr_data *)uc->args[0]; in pr_msr() local 158 pr_info("testing msr: %s (%#x)\n", msr->name, msr->idx); in pr_msr()
|
/Linux-v5.10/tools/power/x86/x86_energy_perf_policy/ |
D | x86_energy_perf_policy.c | 677 int get_msr(int cpu, int offset, unsigned long long *msr) in get_msr() argument 688 retval = pread(fd, msr, sizeof(*msr), offset); in get_msr() 689 if (retval != sizeof(*msr)) { in get_msr() 695 fprintf(stderr, "get_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, *msr); in get_msr() 734 unsigned long long msr; in read_hwp_cap() local 736 get_msr(cpu, msr_offset, &msr); in read_hwp_cap() 738 cap->highest = msr_perf_2_ratio(HWP_HIGHEST_PERF(msr)); in read_hwp_cap() 739 cap->guaranteed = msr_perf_2_ratio(HWP_GUARANTEED_PERF(msr)); in read_hwp_cap() 740 cap->efficient = msr_perf_2_ratio(HWP_MOSTEFFICIENT_PERF(msr)); in read_hwp_cap() 741 cap->lowest = msr_perf_2_ratio(HWP_LOWEST_PERF(msr)); in read_hwp_cap() [all …]
|
/Linux-v5.10/arch/x86/kvm/vmx/ |
D | pmu_intel.c | 163 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) in get_fw_gp_pmc() argument 168 return get_gp_pmc(pmu, msr, MSR_IA32_PMC0); in get_fw_gp_pmc() 171 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) in intel_is_valid_msr() argument 176 switch (msr) { in intel_is_valid_msr() 184 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || in intel_is_valid_msr() 185 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || in intel_is_valid_msr() 186 get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr); in intel_is_valid_msr() 193 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) in intel_msr_idx_to_pmc() argument 198 pmc = get_fixed_pmc(pmu, msr); in intel_msr_idx_to_pmc() 199 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0); in intel_msr_idx_to_pmc() [all …]
|
/Linux-v5.10/arch/powerpc/kernel/ptrace/ |
D | ptrace-tm.c | 34 return task->thread.ckpt_regs.msr | task->thread.fpexc_mode; in get_user_ckpt_msr() 37 static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr) in set_user_ckpt_msr() argument 39 task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE; in set_user_ckpt_msr() 40 task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE; in set_user_ckpt_msr() 63 if (!MSR_TM_ACTIVE(target->thread.regs->msr)) in tm_cgpr_active() 92 if (!MSR_TM_ACTIVE(target->thread.regs->msr)) in tm_cgpr_get() 100 offsetof(struct pt_regs, msr)); in tm_cgpr_get() 104 offsetof(struct pt_regs, msr) + sizeof(long)); in tm_cgpr_get() 143 if (!MSR_TM_ACTIVE(target->thread.regs->msr)) in tm_cgpr_set() 163 offsetof(struct pt_regs, msr) + sizeof(long)); in tm_cgpr_set() [all …]
|
/Linux-v5.10/arch/x86/kvm/svm/ |
D | pmu.c | 64 static enum index msr_to_index(u32 msr) in msr_to_index() argument 66 switch (msr) { in msr_to_index() 98 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, in get_gp_pmc_amd() argument 101 switch (msr) { in get_gp_pmc_amd() 126 return &pmu->gp_counters[msr_to_index(msr)]; in get_gp_pmc_amd() 201 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) in amd_is_valid_msr() argument 207 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) in amd_msr_idx_to_pmc() argument 212 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); in amd_msr_idx_to_pmc() 213 pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); in amd_msr_idx_to_pmc() 222 u32 msr = msr_info->index; in amd_pmu_get_msr() local [all …]
|
/Linux-v5.10/arch/arm64/mm/ |
D | proc.S | 120 msr tpidr_el0, x2 121 msr tpidrro_el0, x3 122 msr contextidr_el1, x4 123 msr cpacr_el1, x6 129 msr tcr_el1, x8 130 msr vbar_el1, x9 139 msr mdscr_el1, x10 141 msr sctlr_el1, x12 143 msr tpidr_el1, x13 145 msr tpidr_el2, x13 [all …]
|
/Linux-v5.10/arch/arm64/kvm/hyp/nvhe/ |
D | hyp-init.S | 76 msr tpidr_el2, x2 82 msr ttbr0_el2, x0 112 msr tcr_el2, x0 115 msr mair_el2, x0 134 msr sctlr_el2, x0 139 msr vbar_el2, x4 151 msr elr_el2, x1 153 msr spsr_el2, x0 176 msr sctlr_el2, x5 181 msr vbar_el2, x5
|