Lines Matching refs:vcpu

13 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
15 return vcpu->arch.regs[VCPU_REGS_##uname]; \
17 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
20 vcpu->arch.regs[VCPU_REGS_##uname] = val; \
40 static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu, in BUILD_KVM_GPR_ACCESSORS()
43 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in BUILD_KVM_GPR_ACCESSORS()
46 static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu, in kvm_register_is_dirty() argument
49 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); in kvm_register_is_dirty()
52 static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu, in kvm_register_mark_available() argument
55 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in kvm_register_mark_available()
58 static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu, in kvm_register_mark_dirty() argument
61 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in kvm_register_mark_dirty()
62 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); in kvm_register_mark_dirty()
65 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg) in kvm_register_read() argument
70 if (!kvm_register_is_available(vcpu, reg)) in kvm_register_read()
71 kvm_x86_ops.cache_reg(vcpu, reg); in kvm_register_read()
73 return vcpu->arch.regs[reg]; in kvm_register_read()
76 static inline void kvm_register_write(struct kvm_vcpu *vcpu, int reg, in kvm_register_write() argument
82 vcpu->arch.regs[reg] = val; in kvm_register_write()
83 kvm_register_mark_dirty(vcpu, reg); in kvm_register_write()
86 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu) in kvm_rip_read() argument
88 return kvm_register_read(vcpu, VCPU_REGS_RIP); in kvm_rip_read()
91 static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val) in kvm_rip_write() argument
93 kvm_register_write(vcpu, VCPU_REGS_RIP, val); in kvm_rip_write()
96 static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu) in kvm_rsp_read() argument
98 return kvm_register_read(vcpu, VCPU_REGS_RSP); in kvm_rsp_read()
101 static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val) in kvm_rsp_write() argument
103 kvm_register_write(vcpu, VCPU_REGS_RSP, val); in kvm_rsp_write()
106 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) in kvm_pdptr_read() argument
110 if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR)) in kvm_pdptr_read()
111 kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_PDPTR); in kvm_pdptr_read()
113 return vcpu->arch.walk_mmu->pdptrs[index]; in kvm_pdptr_read()
116 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) in kvm_read_cr0_bits() argument
119 if ((tmask & vcpu->arch.cr0_guest_owned_bits) && in kvm_read_cr0_bits()
120 !kvm_register_is_available(vcpu, VCPU_EXREG_CR0)) in kvm_read_cr0_bits()
121 kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR0); in kvm_read_cr0_bits()
122 return vcpu->arch.cr0 & mask; in kvm_read_cr0_bits()
125 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu) in kvm_read_cr0() argument
127 return kvm_read_cr0_bits(vcpu, ~0UL); in kvm_read_cr0()
130 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) in kvm_read_cr4_bits() argument
133 if ((tmask & vcpu->arch.cr4_guest_owned_bits) && in kvm_read_cr4_bits()
134 !kvm_register_is_available(vcpu, VCPU_EXREG_CR4)) in kvm_read_cr4_bits()
135 kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR4); in kvm_read_cr4_bits()
136 return vcpu->arch.cr4 & mask; in kvm_read_cr4_bits()
139 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) in kvm_read_cr3() argument
141 if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) in kvm_read_cr3()
142 kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR3); in kvm_read_cr3()
143 return vcpu->arch.cr3; in kvm_read_cr3()
146 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu) in kvm_read_cr4() argument
148 return kvm_read_cr4_bits(vcpu, ~0UL); in kvm_read_cr4()
151 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu) in kvm_read_edx_eax() argument
153 return (kvm_rax_read(vcpu) & -1u) in kvm_read_edx_eax()
154 | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32); in kvm_read_edx_eax()
157 static inline void enter_guest_mode(struct kvm_vcpu *vcpu) in enter_guest_mode() argument
159 vcpu->arch.hflags |= HF_GUEST_MASK; in enter_guest_mode()
162 static inline void leave_guest_mode(struct kvm_vcpu *vcpu) in leave_guest_mode() argument
164 vcpu->arch.hflags &= ~HF_GUEST_MASK; in leave_guest_mode()
166 if (vcpu->arch.load_eoi_exitmap_pending) { in leave_guest_mode()
167 vcpu->arch.load_eoi_exitmap_pending = false; in leave_guest_mode()
168 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); in leave_guest_mode()
172 static inline bool is_guest_mode(struct kvm_vcpu *vcpu) in is_guest_mode() argument
174 return vcpu->arch.hflags & HF_GUEST_MASK; in is_guest_mode()
177 static inline bool is_smm(struct kvm_vcpu *vcpu) in is_smm() argument
179 return vcpu->arch.hflags & HF_SMM_MASK; in is_smm()