1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ASM_KVM_CACHE_REGS_H
3 #define ASM_KVM_CACHE_REGS_H
4
5 #include <linux/kvm_host.h>
6
7 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
8 #define KVM_POSSIBLE_CR4_GUEST_BITS \
9 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
10 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
11
12 #define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
13 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
14 { \
15 return vcpu->arch.regs[VCPU_REGS_##uname]; \
16 } \
17 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
18 unsigned long val) \
19 { \
20 vcpu->arch.regs[VCPU_REGS_##uname] = val; \
21 }
BUILD_KVM_GPR_ACCESSORS(rax,RAX)22 BUILD_KVM_GPR_ACCESSORS(rax, RAX)
23 BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
24 BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
25 BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
26 BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
27 BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
28 BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
29 #ifdef CONFIG_X86_64
30 BUILD_KVM_GPR_ACCESSORS(r8, R8)
31 BUILD_KVM_GPR_ACCESSORS(r9, R9)
32 BUILD_KVM_GPR_ACCESSORS(r10, R10)
33 BUILD_KVM_GPR_ACCESSORS(r11, R11)
34 BUILD_KVM_GPR_ACCESSORS(r12, R12)
35 BUILD_KVM_GPR_ACCESSORS(r13, R13)
36 BUILD_KVM_GPR_ACCESSORS(r14, R14)
37 BUILD_KVM_GPR_ACCESSORS(r15, R15)
38 #endif
39
40 static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
41 enum kvm_reg reg)
42 {
43 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
44 }
45
kvm_register_is_dirty(struct kvm_vcpu * vcpu,enum kvm_reg reg)46 static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
47 enum kvm_reg reg)
48 {
49 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
50 }
51
kvm_register_mark_available(struct kvm_vcpu * vcpu,enum kvm_reg reg)52 static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
53 enum kvm_reg reg)
54 {
55 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
56 }
57
kvm_register_mark_dirty(struct kvm_vcpu * vcpu,enum kvm_reg reg)58 static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
59 enum kvm_reg reg)
60 {
61 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
62 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
63 }
64
kvm_register_read(struct kvm_vcpu * vcpu,int reg)65 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
66 {
67 if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
68 return 0;
69
70 if (!kvm_register_is_available(vcpu, reg))
71 kvm_x86_ops.cache_reg(vcpu, reg);
72
73 return vcpu->arch.regs[reg];
74 }
75
kvm_register_write(struct kvm_vcpu * vcpu,int reg,unsigned long val)76 static inline void kvm_register_write(struct kvm_vcpu *vcpu, int reg,
77 unsigned long val)
78 {
79 if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
80 return;
81
82 vcpu->arch.regs[reg] = val;
83 kvm_register_mark_dirty(vcpu, reg);
84 }
85
kvm_rip_read(struct kvm_vcpu * vcpu)86 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
87 {
88 return kvm_register_read(vcpu, VCPU_REGS_RIP);
89 }
90
kvm_rip_write(struct kvm_vcpu * vcpu,unsigned long val)91 static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
92 {
93 kvm_register_write(vcpu, VCPU_REGS_RIP, val);
94 }
95
kvm_rsp_read(struct kvm_vcpu * vcpu)96 static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
97 {
98 return kvm_register_read(vcpu, VCPU_REGS_RSP);
99 }
100
kvm_rsp_write(struct kvm_vcpu * vcpu,unsigned long val)101 static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
102 {
103 kvm_register_write(vcpu, VCPU_REGS_RSP, val);
104 }
105
kvm_pdptr_read(struct kvm_vcpu * vcpu,int index)106 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
107 {
108 might_sleep(); /* on svm */
109
110 if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
111 kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_PDPTR);
112
113 return vcpu->arch.walk_mmu->pdptrs[index];
114 }
115
kvm_read_cr0_bits(struct kvm_vcpu * vcpu,ulong mask)116 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
117 {
118 ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
119 if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
120 !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
121 kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR0);
122 return vcpu->arch.cr0 & mask;
123 }
124
kvm_read_cr0(struct kvm_vcpu * vcpu)125 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
126 {
127 return kvm_read_cr0_bits(vcpu, ~0UL);
128 }
129
kvm_read_cr4_bits(struct kvm_vcpu * vcpu,ulong mask)130 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
131 {
132 ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
133 if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
134 !kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
135 kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR4);
136 return vcpu->arch.cr4 & mask;
137 }
138
kvm_read_cr3(struct kvm_vcpu * vcpu)139 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
140 {
141 if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
142 kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR3);
143 return vcpu->arch.cr3;
144 }
145
kvm_read_cr4(struct kvm_vcpu * vcpu)146 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
147 {
148 return kvm_read_cr4_bits(vcpu, ~0UL);
149 }
150
kvm_read_edx_eax(struct kvm_vcpu * vcpu)151 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
152 {
153 return (kvm_rax_read(vcpu) & -1u)
154 | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
155 }
156
enter_guest_mode(struct kvm_vcpu * vcpu)157 static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
158 {
159 vcpu->arch.hflags |= HF_GUEST_MASK;
160 }
161
leave_guest_mode(struct kvm_vcpu * vcpu)162 static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
163 {
164 vcpu->arch.hflags &= ~HF_GUEST_MASK;
165
166 if (vcpu->arch.load_eoi_exitmap_pending) {
167 vcpu->arch.load_eoi_exitmap_pending = false;
168 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
169 }
170 }
171
is_guest_mode(struct kvm_vcpu * vcpu)172 static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
173 {
174 return vcpu->arch.hflags & HF_GUEST_MASK;
175 }
176
is_smm(struct kvm_vcpu * vcpu)177 static inline bool is_smm(struct kvm_vcpu *vcpu)
178 {
179 return vcpu->arch.hflags & HF_SMM_MASK;
180 }
181
182 #endif
183