Lines Matching full:vcpu
19 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
21 return vcpu->arch.regs[VCPU_REGS_##uname]; \
23 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
26 vcpu->arch.regs[VCPU_REGS_##uname] = val; \
50 * 1 0 register in vcpu->arch in BUILD_KVM_GPR_ACCESSORS()
51 * 1 1 register in vcpu->arch, needs to be stored back in BUILD_KVM_GPR_ACCESSORS()
53 static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu, in BUILD_KVM_GPR_ACCESSORS()
56 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in BUILD_KVM_GPR_ACCESSORS()
59 static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu, in kvm_register_is_dirty() argument
62 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); in kvm_register_is_dirty()
65 static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu, in kvm_register_mark_available() argument
68 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in kvm_register_mark_available()
71 static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu, in kvm_register_mark_dirty() argument
74 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in kvm_register_mark_dirty()
75 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); in kvm_register_mark_dirty()
80 * register are read/written irrespective of current vCPU mode. In other words,
83 static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg) in kvm_register_read_raw() argument
88 if (!kvm_register_is_available(vcpu, reg)) in kvm_register_read_raw()
89 static_call(kvm_x86_cache_reg)(vcpu, reg); in kvm_register_read_raw()
91 return vcpu->arch.regs[reg]; in kvm_register_read_raw()
94 static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg, in kvm_register_write_raw() argument
100 vcpu->arch.regs[reg] = val; in kvm_register_write_raw()
101 kvm_register_mark_dirty(vcpu, reg); in kvm_register_write_raw()
104 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu) in kvm_rip_read() argument
106 return kvm_register_read_raw(vcpu, VCPU_REGS_RIP); in kvm_rip_read()
109 static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val) in kvm_rip_write() argument
111 kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val); in kvm_rip_write()
114 static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu) in kvm_rsp_read() argument
116 return kvm_register_read_raw(vcpu, VCPU_REGS_RSP); in kvm_rsp_read()
119 static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val) in kvm_rsp_write() argument
121 kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val); in kvm_rsp_write()
124 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) in kvm_pdptr_read() argument
128 if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR)) in kvm_pdptr_read()
129 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR); in kvm_pdptr_read()
131 return vcpu->arch.walk_mmu->pdptrs[index]; in kvm_pdptr_read()
134 static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value) in kvm_pdptr_write() argument
136 vcpu->arch.walk_mmu->pdptrs[index] = value; in kvm_pdptr_write()
139 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) in kvm_read_cr0_bits() argument
142 if ((tmask & vcpu->arch.cr0_guest_owned_bits) && in kvm_read_cr0_bits()
143 !kvm_register_is_available(vcpu, VCPU_EXREG_CR0)) in kvm_read_cr0_bits()
144 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0); in kvm_read_cr0_bits()
145 return vcpu->arch.cr0 & mask; in kvm_read_cr0_bits()
148 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu) in kvm_read_cr0() argument
150 return kvm_read_cr0_bits(vcpu, ~0UL); in kvm_read_cr0()
153 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) in kvm_read_cr4_bits() argument
156 if ((tmask & vcpu->arch.cr4_guest_owned_bits) && in kvm_read_cr4_bits()
157 !kvm_register_is_available(vcpu, VCPU_EXREG_CR4)) in kvm_read_cr4_bits()
158 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4); in kvm_read_cr4_bits()
159 return vcpu->arch.cr4 & mask; in kvm_read_cr4_bits()
162 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) in kvm_read_cr3() argument
164 if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) in kvm_read_cr3()
165 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3); in kvm_read_cr3()
166 return vcpu->arch.cr3; in kvm_read_cr3()
169 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu) in kvm_read_cr4() argument
171 return kvm_read_cr4_bits(vcpu, ~0UL); in kvm_read_cr4()
174 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu) in kvm_read_edx_eax() argument
176 return (kvm_rax_read(vcpu) & -1u) in kvm_read_edx_eax()
177 | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32); in kvm_read_edx_eax()
180 static inline void enter_guest_mode(struct kvm_vcpu *vcpu) in enter_guest_mode() argument
182 vcpu->arch.hflags |= HF_GUEST_MASK; in enter_guest_mode()
183 vcpu->stat.guest_mode = 1; in enter_guest_mode()
186 static inline void leave_guest_mode(struct kvm_vcpu *vcpu) in leave_guest_mode() argument
188 vcpu->arch.hflags &= ~HF_GUEST_MASK; in leave_guest_mode()
190 if (vcpu->arch.load_eoi_exitmap_pending) { in leave_guest_mode()
191 vcpu->arch.load_eoi_exitmap_pending = false; in leave_guest_mode()
192 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); in leave_guest_mode()
195 vcpu->stat.guest_mode = 0; in leave_guest_mode()
198 static inline bool is_guest_mode(struct kvm_vcpu *vcpu) in is_guest_mode() argument
200 return vcpu->arch.hflags & HF_GUEST_MASK; in is_guest_mode()
203 static inline bool is_smm(struct kvm_vcpu *vcpu) in is_smm() argument
205 return vcpu->arch.hflags & HF_SMM_MASK; in is_smm()