Lines Matching refs:kvm_vcpu
78 struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
85 struct kvm_vcpu *runner;
145 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
146 extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
147 extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
148 extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
149 extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
150 extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
151 extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
152 extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
154 extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
155 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
156 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
157 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
158 extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
162 extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
165 extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
166 extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
168 extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
169 extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
170 extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
176 extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
181 extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
183 extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
185 extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
188 extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
191 extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
202 extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
226 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
227 extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
228 extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
230 extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
231 extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
232 extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
234 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
235 extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu);
236 extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
265 extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
267 extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu);
268 extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu);
269 extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu);
273 extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
274 extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
275 extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
279 extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
280 extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
283 void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr);
284 void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
287 void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
288 void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
289 void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu);
290 void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu);
292 static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {} in kvmppc_save_tm_pr()
293 static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {} in kvmppc_restore_tm_pr()
294 static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {} in kvmppc_save_tm_sprs()
295 static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {} in kvmppc_restore_tm_sprs()
301 long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
302 long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu);
305 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
306 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
307 long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
310 int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
312 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
313 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
315 long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
317 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
321 static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) in to_book3s()
335 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) in kvmppc_set_gpr()
340 static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) in kvmppc_get_gpr()
345 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) in kvmppc_set_cr()
350 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) in kvmppc_get_cr()
355 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) in kvmppc_set_xer()
360 static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu) in kvmppc_get_xer()
365 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) in kvmppc_set_ctr()
370 static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) in kvmppc_get_ctr()
375 static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) in kvmppc_set_lr()
380 static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) in kvmppc_get_lr()
385 static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) in kvmppc_set_pc()
390 static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) in kvmppc_get_pc()
395 static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
396 static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) in kvmppc_need_byteswap()
401 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) in kvmppc_get_fault_dar()
407 static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu) in kvmppc_dec_expires_host_tb()
418 static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu) in kvmppc_supports_magic_page()
424 extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu);
425 extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);