Lines Matching full:vcpu

54 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)  in kvm_arch_dy_runnable()  argument
56 return kvm_arch_vcpu_runnable(vcpu); in kvm_arch_dy_runnable()
59 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument
64 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
78 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) in kvmppc_prepare_to_enter() argument
94 kvmppc_account_exit(vcpu, SIGNAL_EXITS); in kvmppc_prepare_to_enter()
95 vcpu->run->exit_reason = KVM_EXIT_INTR; in kvmppc_prepare_to_enter()
100 vcpu->mode = IN_GUEST_MODE; in kvmppc_prepare_to_enter()
103 * Reading vcpu->requests must happen after setting vcpu->mode, in kvmppc_prepare_to_enter()
108 * to the page tables done while the VCPU is running. in kvmppc_prepare_to_enter()
113 if (kvm_request_pending(vcpu)) { in kvmppc_prepare_to_enter()
116 trace_kvm_check_requests(vcpu); in kvmppc_prepare_to_enter()
117 r = kvmppc_core_check_requests(vcpu); in kvmppc_prepare_to_enter()
124 if (kvmppc_core_prepare_to_enter(vcpu)) { in kvmppc_prepare_to_enter()
141 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) in kvmppc_swab_shared() argument
143 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; in kvmppc_swab_shared()
161 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) in kvmppc_kvm_pv() argument
163 int nr = kvmppc_get_gpr(vcpu, 11); in kvmppc_kvm_pv()
165 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); in kvmppc_kvm_pv()
166 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); in kvmppc_kvm_pv()
167 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); in kvmppc_kvm_pv()
168 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); in kvmppc_kvm_pv()
171 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { in kvmppc_kvm_pv()
185 if (vcpu->arch.intr_msr & MSR_LE) in kvmppc_kvm_pv()
187 if (shared_big_endian != vcpu->arch.shared_big_endian) in kvmppc_kvm_pv()
188 kvmppc_swab_shared(vcpu); in kvmppc_kvm_pv()
189 vcpu->arch.shared_big_endian = shared_big_endian; in kvmppc_kvm_pv()
198 vcpu->arch.disable_kernel_nx = true; in kvmppc_kvm_pv()
199 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in kvmppc_kvm_pv()
202 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; in kvmppc_kvm_pv()
203 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; in kvmppc_kvm_pv()
210 if ((vcpu->arch.magic_page_pa & 0xf000) != in kvmppc_kvm_pv()
211 ((ulong)vcpu->arch.shared & 0xf000)) { in kvmppc_kvm_pv()
212 void *old_shared = vcpu->arch.shared; in kvmppc_kvm_pv()
213 ulong shared = (ulong)vcpu->arch.shared; in kvmppc_kvm_pv()
217 shared |= vcpu->arch.magic_page_pa & 0xf000; in kvmppc_kvm_pv()
220 vcpu->arch.shared = new_shared; in kvmppc_kvm_pv()
239 kvm_vcpu_block(vcpu); in kvmppc_kvm_pv()
240 kvm_clear_request(KVM_REQ_UNHALT, vcpu); in kvmppc_kvm_pv()
247 kvmppc_set_gpr(vcpu, 4, r2); in kvmppc_kvm_pv()
253 int kvmppc_sanity_check(struct kvm_vcpu *vcpu) in kvmppc_sanity_check() argument
258 if (!vcpu->arch.pvr) in kvmppc_sanity_check()
262 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) in kvmppc_sanity_check()
266 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_sanity_check()
277 vcpu->arch.sane = r; in kvmppc_sanity_check()
282 int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu) in kvmppc_emulate_mmio() argument
287 er = kvmppc_emulate_loadstore(vcpu); in kvmppc_emulate_mmio()
298 vcpu->run->exit_reason = KVM_EXIT_MMIO; in kvmppc_emulate_mmio()
309 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); in kvmppc_emulate_mmio()
324 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, in kvmppc_st() argument
327 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_st()
331 vcpu->stat.st++; in kvmppc_st()
333 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) in kvmppc_st()
334 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, in kvmppc_st()
340 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, in kvmppc_st()
351 if (kvmppc_supports_magic_page(vcpu) && mp_pa && in kvmppc_st()
353 !(kvmppc_get_msr(vcpu) & MSR_PR)) { in kvmppc_st()
354 void *magic = vcpu->arch.shared; in kvmppc_st()
360 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) in kvmppc_st()
367 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, in kvmppc_ld() argument
370 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_ld()
374 vcpu->stat.ld++; in kvmppc_ld()
376 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) in kvmppc_ld()
377 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, in kvmppc_ld()
383 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, in kvmppc_ld()
397 if (kvmppc_supports_magic_page(vcpu) && mp_pa && in kvmppc_ld()
399 !(kvmppc_get_msr(vcpu) & MSR_PR)) { in kvmppc_ld()
400 void *magic = vcpu->arch.shared; in kvmppc_ld()
406 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_ld()
407 rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size); in kvmppc_ld()
408 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in kvmppc_ld()
467 struct kvm_vcpu *vcpu; in kvm_arch_destroy_vm() local
479 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_arch_destroy_vm()
480 kvm_vcpu_destroy(vcpu); in kvm_arch_destroy_vm()
731 struct kvm_vcpu *vcpu; in kvmppc_decrementer_wakeup() local
733 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); in kvmppc_decrementer_wakeup()
734 kvmppc_decrementer_func(vcpu); in kvmppc_decrementer_wakeup()
739 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_create() argument
743 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); in kvm_arch_vcpu_create()
744 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; in kvm_arch_vcpu_create()
745 vcpu->arch.dec_expires = get_tb(); in kvm_arch_vcpu_create()
748 mutex_init(&vcpu->arch.exit_timing_lock); in kvm_arch_vcpu_create()
750 err = kvmppc_subarch_vcpu_init(vcpu); in kvm_arch_vcpu_create()
754 err = kvmppc_core_vcpu_create(vcpu); in kvm_arch_vcpu_create()
758 vcpu->arch.waitp = &vcpu->wait; in kvm_arch_vcpu_create()
759 kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id); in kvm_arch_vcpu_create()
763 kvmppc_subarch_vcpu_uninit(vcpu); in kvm_arch_vcpu_create()
767 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
771 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
773 /* Make sure we're not using the vcpu anymore */ in kvm_arch_vcpu_destroy()
774 hrtimer_cancel(&vcpu->arch.dec_timer); in kvm_arch_vcpu_destroy()
776 kvmppc_remove_vcpu_debugfs(vcpu); in kvm_arch_vcpu_destroy()
778 switch (vcpu->arch.irq_type) { in kvm_arch_vcpu_destroy()
780 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); in kvm_arch_vcpu_destroy()
784 kvmppc_xive_cleanup_vcpu(vcpu); in kvm_arch_vcpu_destroy()
786 kvmppc_xics_free_icp(vcpu); in kvm_arch_vcpu_destroy()
789 kvmppc_xive_native_cleanup_vcpu(vcpu); in kvm_arch_vcpu_destroy()
793 kvmppc_core_vcpu_free(vcpu); in kvm_arch_vcpu_destroy()
795 kvmppc_subarch_vcpu_uninit(vcpu); in kvm_arch_vcpu_destroy()
798 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) in kvm_cpu_has_pending_timer() argument
800 return kvmppc_core_pending_dec(vcpu); in kvm_cpu_has_pending_timer()
803 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
813 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); in kvm_arch_vcpu_load()
815 kvmppc_core_vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
818 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
820 kvmppc_core_vcpu_put(vcpu); in kvm_arch_vcpu_put()
822 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); in kvm_arch_vcpu_put()
894 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_dword() argument
898 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_dword()
899 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword()
905 val.vval = VCPU_VSX_VR(vcpu, index - 32); in kvmppc_set_vsr_dword()
907 VCPU_VSX_VR(vcpu, index - 32) = val.vval; in kvmppc_set_vsr_dword()
909 VCPU_VSX_FPR(vcpu, index, offset) = gpr; in kvmppc_set_vsr_dword()
913 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_dword_dump() argument
917 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword_dump()
920 val.vval = VCPU_VSX_VR(vcpu, index - 32); in kvmppc_set_vsr_dword_dump()
923 VCPU_VSX_VR(vcpu, index - 32) = val.vval; in kvmppc_set_vsr_dword_dump()
925 VCPU_VSX_FPR(vcpu, index, 0) = gpr; in kvmppc_set_vsr_dword_dump()
926 VCPU_VSX_FPR(vcpu, index, 1) = gpr; in kvmppc_set_vsr_dword_dump()
930 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_word_dump() argument
934 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word_dump()
941 VCPU_VSX_VR(vcpu, index - 32) = val.vval; in kvmppc_set_vsr_word_dump()
945 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0]; in kvmppc_set_vsr_word_dump()
946 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0]; in kvmppc_set_vsr_word_dump()
950 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_word() argument
954 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_word()
955 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word()
962 val.vval = VCPU_VSX_VR(vcpu, index - 32); in kvmppc_set_vsr_word()
964 VCPU_VSX_VR(vcpu, index - 32) = val.vval; in kvmppc_set_vsr_word()
968 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset); in kvmppc_set_vsr_word()
970 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0]; in kvmppc_set_vsr_word()
976 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_offset_generic() argument
985 if (kvmppc_need_byteswap(vcpu)) in kvmppc_get_vmx_offset_generic()
993 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_dword_offset() argument
996 return kvmppc_get_vmx_offset_generic(vcpu, index, 8); in kvmppc_get_vmx_dword_offset()
999 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_word_offset() argument
1002 return kvmppc_get_vmx_offset_generic(vcpu, index, 4); in kvmppc_get_vmx_word_offset()
1005 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_hword_offset() argument
1008 return kvmppc_get_vmx_offset_generic(vcpu, index, 2); in kvmppc_get_vmx_hword_offset()
1011 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_byte_offset() argument
1014 return kvmppc_get_vmx_offset_generic(vcpu, index, 1); in kvmppc_get_vmx_byte_offset()
1018 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_dword() argument
1022 int offset = kvmppc_get_vmx_dword_offset(vcpu, in kvmppc_set_vmx_dword()
1023 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_dword()
1024 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_dword()
1029 val.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_set_vmx_dword()
1031 VCPU_VSX_VR(vcpu, index) = val.vval; in kvmppc_set_vmx_dword()
1034 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_word() argument
1038 int offset = kvmppc_get_vmx_word_offset(vcpu, in kvmppc_set_vmx_word()
1039 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_word()
1040 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_word()
1045 val.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_set_vmx_word()
1047 VCPU_VSX_VR(vcpu, index) = val.vval; in kvmppc_set_vmx_word()
1050 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_hword() argument
1054 int offset = kvmppc_get_vmx_hword_offset(vcpu, in kvmppc_set_vmx_hword()
1055 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_hword()
1056 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_hword()
1061 val.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_set_vmx_hword()
1063 VCPU_VSX_VR(vcpu, index) = val.vval; in kvmppc_set_vmx_hword()
1066 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_byte() argument
1070 int offset = kvmppc_get_vmx_byte_offset(vcpu, in kvmppc_set_vmx_byte()
1071 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_byte()
1072 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_byte()
1077 val.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_set_vmx_byte()
1079 VCPU_VSX_VR(vcpu, index) = val.vval; in kvmppc_set_vmx_byte()
1113 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu) in kvmppc_complete_mmio_load() argument
1115 struct kvm_run *run = vcpu->run; in kvmppc_complete_mmio_load()
1123 if (!vcpu->arch.mmio_host_swabbed) { in kvmppc_complete_mmio_load()
1140 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) in kvmppc_complete_mmio_load()
1143 if (vcpu->arch.mmio_sign_extend) { in kvmppc_complete_mmio_load()
1159 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { in kvmppc_complete_mmio_load()
1161 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); in kvmppc_complete_mmio_load()
1164 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1165 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); in kvmppc_complete_mmio_load()
1167 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; in kvmppc_complete_mmio_load()
1171 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1174 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; in kvmppc_complete_mmio_load()
1175 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1180 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1181 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); in kvmppc_complete_mmio_load()
1183 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) in kvmppc_complete_mmio_load()
1184 kvmppc_set_vsr_dword(vcpu, gpr); in kvmppc_complete_mmio_load()
1185 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) in kvmppc_complete_mmio_load()
1186 kvmppc_set_vsr_word(vcpu, gpr); in kvmppc_complete_mmio_load()
1187 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1189 kvmppc_set_vsr_dword_dump(vcpu, gpr); in kvmppc_complete_mmio_load()
1190 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1192 kvmppc_set_vsr_word_dump(vcpu, gpr); in kvmppc_complete_mmio_load()
1197 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1198 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); in kvmppc_complete_mmio_load()
1200 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) in kvmppc_complete_mmio_load()
1201 kvmppc_set_vmx_dword(vcpu, gpr); in kvmppc_complete_mmio_load()
1202 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) in kvmppc_complete_mmio_load()
1203 kvmppc_set_vmx_word(vcpu, gpr); in kvmppc_complete_mmio_load()
1204 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1206 kvmppc_set_vmx_hword(vcpu, gpr); in kvmppc_complete_mmio_load()
1207 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1209 kvmppc_set_vmx_byte(vcpu, gpr); in kvmppc_complete_mmio_load()
1214 if (kvmppc_need_byteswap(vcpu)) in kvmppc_complete_mmio_load()
1216 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, in kvmppc_complete_mmio_load()
1225 static int __kvmppc_handle_load(struct kvm_vcpu *vcpu, in __kvmppc_handle_load() argument
1229 struct kvm_run *run = vcpu->run; in __kvmppc_handle_load()
1234 if (kvmppc_need_byteswap(vcpu)) { in __kvmppc_handle_load()
1245 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in __kvmppc_handle_load()
1249 vcpu->arch.io_gpr = rt; in __kvmppc_handle_load()
1250 vcpu->arch.mmio_host_swabbed = host_swabbed; in __kvmppc_handle_load()
1251 vcpu->mmio_needed = 1; in __kvmppc_handle_load()
1252 vcpu->mmio_is_write = 0; in __kvmppc_handle_load()
1253 vcpu->arch.mmio_sign_extend = sign_extend; in __kvmppc_handle_load()
1255 idx = srcu_read_lock(&vcpu->kvm->srcu); in __kvmppc_handle_load()
1257 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, in __kvmppc_handle_load()
1260 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __kvmppc_handle_load()
1263 kvmppc_complete_mmio_load(vcpu); in __kvmppc_handle_load()
1264 vcpu->mmio_needed = 0; in __kvmppc_handle_load()
1271 int kvmppc_handle_load(struct kvm_vcpu *vcpu, in kvmppc_handle_load() argument
1275 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0); in kvmppc_handle_load()
1280 int kvmppc_handle_loads(struct kvm_vcpu *vcpu, in kvmppc_handle_loads() argument
1284 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1); in kvmppc_handle_loads()
1288 int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu, in kvmppc_handle_vsx_load() argument
1295 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_load()
1298 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_load()
1299 emulated = __kvmppc_handle_load(vcpu, rt, bytes, in kvmppc_handle_vsx_load()
1305 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_load()
1307 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_load()
1308 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_load()
1314 int kvmppc_handle_store(struct kvm_vcpu *vcpu, in kvmppc_handle_store() argument
1317 struct kvm_run *run = vcpu->run; in kvmppc_handle_store()
1323 if (kvmppc_need_byteswap(vcpu)) { in kvmppc_handle_store()
1334 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in kvmppc_handle_store()
1337 vcpu->mmio_needed = 1; in kvmppc_handle_store()
1338 vcpu->mmio_is_write = 1; in kvmppc_handle_store()
1340 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) in kvmppc_handle_store()
1360 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_store()
1362 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, in kvmppc_handle_store()
1365 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvmppc_handle_store()
1368 vcpu->mmio_needed = 0; in kvmppc_handle_store()
1377 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) in kvmppc_get_vsr_data() argument
1382 int copy_type = vcpu->arch.mmio_copy_type; in kvmppc_get_vsr_data()
1388 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1396 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset); in kvmppc_get_vsr_data()
1398 reg.vval = VCPU_VSX_VR(vcpu, rs - 32); in kvmppc_get_vsr_data()
1405 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1415 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset); in kvmppc_get_vsr_data()
1418 reg.vval = VCPU_VSX_VR(vcpu, rs - 32); in kvmppc_get_vsr_data()
1431 int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, in kvmppc_handle_vsx_store() argument
1437 vcpu->arch.io_gpr = rs; in kvmppc_handle_vsx_store()
1440 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_store()
1443 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_store()
1444 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) in kvmppc_handle_vsx_store()
1447 emulated = kvmppc_handle_store(vcpu, in kvmppc_handle_vsx_store()
1453 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_store()
1455 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_store()
1456 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_store()
1462 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu) in kvmppc_emulate_mmio_vsx_loadstore() argument
1464 struct kvm_run *run = vcpu->run; in kvmppc_emulate_mmio_vsx_loadstore()
1468 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vsx_loadstore()
1470 if (!vcpu->mmio_is_write) { in kvmppc_emulate_mmio_vsx_loadstore()
1471 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr, in kvmppc_emulate_mmio_vsx_loadstore()
1472 run->mmio.len, 1, vcpu->arch.mmio_sign_extend); in kvmppc_emulate_mmio_vsx_loadstore()
1474 emulated = kvmppc_handle_vsx_store(vcpu, in kvmppc_emulate_mmio_vsx_loadstore()
1475 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vsx_loadstore()
1498 int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, in kvmppc_handle_vmx_load() argument
1503 if (vcpu->arch.mmio_vsx_copy_nums > 2) in kvmppc_handle_vmx_load()
1506 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_load()
1507 emulated = __kvmppc_handle_load(vcpu, rt, bytes, in kvmppc_handle_vmx_load()
1513 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_load()
1514 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_load()
1515 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_load()
1521 int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_dword() argument
1528 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_dword()
1533 reg.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_get_vmx_dword()
1539 int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_word() argument
1546 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_word()
1551 reg.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_get_vmx_word()
1557 int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_hword() argument
1564 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_hword()
1569 reg.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_get_vmx_hword()
1575 int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_byte() argument
1582 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_byte()
1587 reg.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_get_vmx_byte()
1593 int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, in kvmppc_handle_vmx_store() argument
1600 if (vcpu->arch.mmio_vsx_copy_nums > 2) in kvmppc_handle_vmx_store()
1603 vcpu->arch.io_gpr = rs; in kvmppc_handle_vmx_store()
1605 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_store()
1606 switch (vcpu->arch.mmio_copy_type) { in kvmppc_handle_vmx_store()
1608 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1613 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1617 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1621 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1628 emulated = kvmppc_handle_store(vcpu, val, bytes, in kvmppc_handle_vmx_store()
1633 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_store()
1634 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_store()
1635 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_store()
1641 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu) in kvmppc_emulate_mmio_vmx_loadstore() argument
1643 struct kvm_run *run = vcpu->run; in kvmppc_emulate_mmio_vmx_loadstore()
1647 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vmx_loadstore()
1649 if (!vcpu->mmio_is_write) { in kvmppc_emulate_mmio_vmx_loadstore()
1650 emulated = kvmppc_handle_vmx_load(vcpu, in kvmppc_emulate_mmio_vmx_loadstore()
1651 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1653 emulated = kvmppc_handle_vmx_store(vcpu, in kvmppc_emulate_mmio_vmx_loadstore()
1654 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1676 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) in kvm_vcpu_ioctl_get_one_reg() argument
1686 r = kvmppc_get_one_reg(vcpu, reg->id, &val); in kvm_vcpu_ioctl_get_one_reg()
1696 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; in kvm_vcpu_ioctl_get_one_reg()
1703 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); in kvm_vcpu_ioctl_get_one_reg()
1706 val = get_reg_val(reg->id, vcpu->arch.vrsave); in kvm_vcpu_ioctl_get_one_reg()
1724 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) in kvm_vcpu_ioctl_set_one_reg() argument
1737 r = kvmppc_set_one_reg(vcpu, reg->id, &val); in kvm_vcpu_ioctl_set_one_reg()
1747 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; in kvm_vcpu_ioctl_set_one_reg()
1754 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); in kvm_vcpu_ioctl_set_one_reg()
1761 vcpu->arch.vrsave = set_reg_val(reg->id, val); in kvm_vcpu_ioctl_set_one_reg()
1773 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_ioctl_run() argument
1775 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
1778 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_run()
1780 if (vcpu->mmio_needed) { in kvm_arch_vcpu_ioctl_run()
1781 vcpu->mmio_needed = 0; in kvm_arch_vcpu_ioctl_run()
1782 if (!vcpu->mmio_is_write) in kvm_arch_vcpu_ioctl_run()
1783 kvmppc_complete_mmio_load(vcpu); in kvm_arch_vcpu_ioctl_run()
1785 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1786 vcpu->arch.mmio_vsx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1787 vcpu->arch.mmio_vsx_offset++; in kvm_arch_vcpu_ioctl_run()
1790 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1791 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu); in kvm_arch_vcpu_ioctl_run()
1793 vcpu->mmio_needed = 1; in kvm_arch_vcpu_ioctl_run()
1799 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1800 vcpu->arch.mmio_vmx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1801 vcpu->arch.mmio_vmx_offset++; in kvm_arch_vcpu_ioctl_run()
1804 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1805 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu); in kvm_arch_vcpu_ioctl_run()
1807 vcpu->mmio_needed = 1; in kvm_arch_vcpu_ioctl_run()
1812 } else if (vcpu->arch.osi_needed) { in kvm_arch_vcpu_ioctl_run()
1817 kvmppc_set_gpr(vcpu, i, gprs[i]); in kvm_arch_vcpu_ioctl_run()
1818 vcpu->arch.osi_needed = 0; in kvm_arch_vcpu_ioctl_run()
1819 } else if (vcpu->arch.hcall_needed) { in kvm_arch_vcpu_ioctl_run()
1822 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); in kvm_arch_vcpu_ioctl_run()
1824 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); in kvm_arch_vcpu_ioctl_run()
1825 vcpu->arch.hcall_needed = 0; in kvm_arch_vcpu_ioctl_run()
1827 } else if (vcpu->arch.epr_needed) { in kvm_arch_vcpu_ioctl_run()
1828 kvmppc_set_epr(vcpu, run->epr.epr); in kvm_arch_vcpu_ioctl_run()
1829 vcpu->arch.epr_needed = 0; in kvm_arch_vcpu_ioctl_run()
1833 kvm_sigset_activate(vcpu); in kvm_arch_vcpu_ioctl_run()
1838 r = kvmppc_vcpu_run(vcpu); in kvm_arch_vcpu_ioctl_run()
1840 kvm_sigset_deactivate(vcpu); in kvm_arch_vcpu_ioctl_run()
1845 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_run()
1849 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) in kvm_vcpu_ioctl_interrupt() argument
1852 kvmppc_core_dequeue_external(vcpu); in kvm_vcpu_ioctl_interrupt()
1856 kvmppc_core_queue_external(vcpu, irq); in kvm_vcpu_ioctl_interrupt()
1858 kvm_vcpu_kick(vcpu); in kvm_vcpu_ioctl_interrupt()
1863 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_enable_cap() argument
1874 vcpu->arch.osi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1878 vcpu->arch.papr_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1883 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1885 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1890 vcpu->arch.watchdog_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1902 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); in kvm_vcpu_ioctl_enable_cap()
1919 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1939 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1941 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1965 r = kvmppc_xive_native_connect_vcpu(dev, vcpu, in kvm_vcpu_ioctl_enable_cap()
1975 if (!is_kvmppc_hv_enabled(vcpu->kvm)) in kvm_vcpu_ioctl_enable_cap()
1978 vcpu->kvm->arch.fwnmi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1987 r = kvmppc_sanity_check(vcpu); in kvm_vcpu_ioctl_enable_cap()
2005 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
2011 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
2020 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl() local
2027 return kvm_vcpu_ioctl_interrupt(vcpu, &irq); in kvm_arch_vcpu_async_ioctl()
2035 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
2044 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl()
2047 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); in kvm_arch_vcpu_ioctl()
2048 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl()
2060 r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg); in kvm_arch_vcpu_ioctl()
2062 r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg); in kvm_arch_vcpu_ioctl()
2070 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl()
2073 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); in kvm_arch_vcpu_ioctl()
2074 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl()
2086 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument