1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ASM_KVM_CACHE_REGS_H
3 #define ASM_KVM_CACHE_REGS_H
4
5 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
6 #define KVM_POSSIBLE_CR4_GUEST_BITS \
7 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
8 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)
9
kvm_register_read(struct kvm_vcpu * vcpu,enum kvm_reg reg)10 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
11 enum kvm_reg reg)
12 {
13 if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail))
14 kvm_x86_ops->cache_reg(vcpu, reg);
15
16 return vcpu->arch.regs[reg];
17 }
18
kvm_register_write(struct kvm_vcpu * vcpu,enum kvm_reg reg,unsigned long val)19 static inline void kvm_register_write(struct kvm_vcpu *vcpu,
20 enum kvm_reg reg,
21 unsigned long val)
22 {
23 vcpu->arch.regs[reg] = val;
24 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
25 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
26 }
27
kvm_rip_read(struct kvm_vcpu * vcpu)28 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
29 {
30 return kvm_register_read(vcpu, VCPU_REGS_RIP);
31 }
32
kvm_rip_write(struct kvm_vcpu * vcpu,unsigned long val)33 static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
34 {
35 kvm_register_write(vcpu, VCPU_REGS_RIP, val);
36 }
37
kvm_pdptr_read(struct kvm_vcpu * vcpu,int index)38 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
39 {
40 might_sleep(); /* on svm */
41
42 if (!test_bit(VCPU_EXREG_PDPTR,
43 (unsigned long *)&vcpu->arch.regs_avail))
44 kvm_x86_ops->cache_reg(vcpu, (enum kvm_reg)VCPU_EXREG_PDPTR);
45
46 return vcpu->arch.walk_mmu->pdptrs[index];
47 }
48
kvm_read_cr0_bits(struct kvm_vcpu * vcpu,ulong mask)49 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
50 {
51 ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
52 if (tmask & vcpu->arch.cr0_guest_owned_bits)
53 kvm_x86_ops->decache_cr0_guest_bits(vcpu);
54 return vcpu->arch.cr0 & mask;
55 }
56
kvm_read_cr0(struct kvm_vcpu * vcpu)57 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
58 {
59 return kvm_read_cr0_bits(vcpu, ~0UL);
60 }
61
kvm_read_cr4_bits(struct kvm_vcpu * vcpu,ulong mask)62 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
63 {
64 ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
65 if (tmask & vcpu->arch.cr4_guest_owned_bits)
66 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
67 return vcpu->arch.cr4 & mask;
68 }
69
kvm_read_cr3(struct kvm_vcpu * vcpu)70 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
71 {
72 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
73 kvm_x86_ops->decache_cr3(vcpu);
74 return vcpu->arch.cr3;
75 }
76
kvm_read_cr4(struct kvm_vcpu * vcpu)77 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
78 {
79 return kvm_read_cr4_bits(vcpu, ~0UL);
80 }
81
kvm_read_edx_eax(struct kvm_vcpu * vcpu)82 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
83 {
84 return (kvm_register_read(vcpu, VCPU_REGS_RAX) & -1u)
85 | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
86 }
87
enter_guest_mode(struct kvm_vcpu * vcpu)88 static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
89 {
90 vcpu->arch.hflags |= HF_GUEST_MASK;
91 }
92
leave_guest_mode(struct kvm_vcpu * vcpu)93 static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
94 {
95 vcpu->arch.hflags &= ~HF_GUEST_MASK;
96
97 if (vcpu->arch.load_eoi_exitmap_pending) {
98 vcpu->arch.load_eoi_exitmap_pending = false;
99 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
100 }
101 }
102
is_guest_mode(struct kvm_vcpu * vcpu)103 static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
104 {
105 return vcpu->arch.hflags & HF_GUEST_MASK;
106 }
107
is_smm(struct kvm_vcpu * vcpu)108 static inline bool is_smm(struct kvm_vcpu *vcpu)
109 {
110 return vcpu->arch.hflags & HF_SMM_MASK;
111 }
112
113 #endif
114