Lines Matching full:vcpu
56 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) in kvm_arch_dy_runnable() argument
58 return kvm_arch_vcpu_runnable(vcpu); in kvm_arch_dy_runnable()
61 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument
66 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
80 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) in kvmppc_prepare_to_enter() argument
96 kvmppc_account_exit(vcpu, SIGNAL_EXITS); in kvmppc_prepare_to_enter()
97 vcpu->run->exit_reason = KVM_EXIT_INTR; in kvmppc_prepare_to_enter()
102 vcpu->mode = IN_GUEST_MODE; in kvmppc_prepare_to_enter()
105 * Reading vcpu->requests must happen after setting vcpu->mode, in kvmppc_prepare_to_enter()
110 * to the page tables done while the VCPU is running. in kvmppc_prepare_to_enter()
115 if (kvm_request_pending(vcpu)) { in kvmppc_prepare_to_enter()
118 trace_kvm_check_requests(vcpu); in kvmppc_prepare_to_enter()
119 r = kvmppc_core_check_requests(vcpu); in kvmppc_prepare_to_enter()
126 if (kvmppc_core_prepare_to_enter(vcpu)) { in kvmppc_prepare_to_enter()
143 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) in kvmppc_swab_shared() argument
145 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; in kvmppc_swab_shared()
163 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) in kvmppc_kvm_pv() argument
165 int nr = kvmppc_get_gpr(vcpu, 11); in kvmppc_kvm_pv()
167 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); in kvmppc_kvm_pv()
168 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); in kvmppc_kvm_pv()
169 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); in kvmppc_kvm_pv()
170 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); in kvmppc_kvm_pv()
173 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { in kvmppc_kvm_pv()
187 if (vcpu->arch.intr_msr & MSR_LE) in kvmppc_kvm_pv()
189 if (shared_big_endian != vcpu->arch.shared_big_endian) in kvmppc_kvm_pv()
190 kvmppc_swab_shared(vcpu); in kvmppc_kvm_pv()
191 vcpu->arch.shared_big_endian = shared_big_endian; in kvmppc_kvm_pv()
200 vcpu->arch.disable_kernel_nx = true; in kvmppc_kvm_pv()
201 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in kvmppc_kvm_pv()
204 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; in kvmppc_kvm_pv()
205 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; in kvmppc_kvm_pv()
212 if ((vcpu->arch.magic_page_pa & 0xf000) != in kvmppc_kvm_pv()
213 ((ulong)vcpu->arch.shared & 0xf000)) { in kvmppc_kvm_pv()
214 void *old_shared = vcpu->arch.shared; in kvmppc_kvm_pv()
215 ulong shared = (ulong)vcpu->arch.shared; in kvmppc_kvm_pv()
219 shared |= vcpu->arch.magic_page_pa & 0xf000; in kvmppc_kvm_pv()
222 vcpu->arch.shared = new_shared; in kvmppc_kvm_pv()
241 kvm_vcpu_halt(vcpu); in kvmppc_kvm_pv()
248 kvmppc_set_gpr(vcpu, 4, r2); in kvmppc_kvm_pv()
254 int kvmppc_sanity_check(struct kvm_vcpu *vcpu) in kvmppc_sanity_check() argument
259 if (!vcpu->arch.pvr) in kvmppc_sanity_check()
263 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) in kvmppc_sanity_check()
267 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_sanity_check()
278 vcpu->arch.sane = r; in kvmppc_sanity_check()
283 int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu) in kvmppc_emulate_mmio() argument
288 er = kvmppc_emulate_loadstore(vcpu); in kvmppc_emulate_mmio()
299 vcpu->run->exit_reason = KVM_EXIT_MMIO; in kvmppc_emulate_mmio()
310 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); in kvmppc_emulate_mmio()
322 if (vcpu->mmio_is_write) in kvmppc_emulate_mmio()
325 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.vaddr_accessed, dsisr); in kvmppc_emulate_mmio()
332 kvmppc_core_queue_program(vcpu, 0); in kvmppc_emulate_mmio()
347 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, in kvmppc_st() argument
350 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_st()
354 vcpu->stat.st++; in kvmppc_st()
356 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) in kvmppc_st()
357 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, in kvmppc_st()
363 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, in kvmppc_st()
374 if (kvmppc_supports_magic_page(vcpu) && mp_pa && in kvmppc_st()
376 !(kvmppc_get_msr(vcpu) & MSR_PR)) { in kvmppc_st()
377 void *magic = vcpu->arch.shared; in kvmppc_st()
383 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) in kvmppc_st()
390 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, in kvmppc_ld() argument
393 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_ld()
397 vcpu->stat.ld++; in kvmppc_ld()
399 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) in kvmppc_ld()
400 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, in kvmppc_ld()
406 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, in kvmppc_ld()
420 if (kvmppc_supports_magic_page(vcpu) && mp_pa && in kvmppc_ld()
422 !(kvmppc_get_msr(vcpu) & MSR_PR)) { in kvmppc_ld()
423 void *magic = vcpu->arch.shared; in kvmppc_ld()
429 kvm_vcpu_srcu_read_lock(vcpu); in kvmppc_ld()
430 rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size); in kvmppc_ld()
431 kvm_vcpu_srcu_read_unlock(vcpu); in kvmppc_ld()
774 struct kvm_vcpu *vcpu; in kvmppc_decrementer_wakeup() local
776 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); in kvmppc_decrementer_wakeup()
777 kvmppc_decrementer_func(vcpu); in kvmppc_decrementer_wakeup()
782 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_create() argument
786 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); in kvm_arch_vcpu_create()
787 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; in kvm_arch_vcpu_create()
790 mutex_init(&vcpu->arch.exit_timing_lock); in kvm_arch_vcpu_create()
792 err = kvmppc_subarch_vcpu_init(vcpu); in kvm_arch_vcpu_create()
796 err = kvmppc_core_vcpu_create(vcpu); in kvm_arch_vcpu_create()
800 rcuwait_init(&vcpu->arch.wait); in kvm_arch_vcpu_create()
801 vcpu->arch.waitp = &vcpu->arch.wait; in kvm_arch_vcpu_create()
805 kvmppc_subarch_vcpu_uninit(vcpu); in kvm_arch_vcpu_create()
809 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
813 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
815 /* Make sure we're not using the vcpu anymore */ in kvm_arch_vcpu_destroy()
816 hrtimer_cancel(&vcpu->arch.dec_timer); in kvm_arch_vcpu_destroy()
818 switch (vcpu->arch.irq_type) { in kvm_arch_vcpu_destroy()
820 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); in kvm_arch_vcpu_destroy()
824 kvmppc_xive_cleanup_vcpu(vcpu); in kvm_arch_vcpu_destroy()
826 kvmppc_xics_free_icp(vcpu); in kvm_arch_vcpu_destroy()
829 kvmppc_xive_native_cleanup_vcpu(vcpu); in kvm_arch_vcpu_destroy()
833 kvmppc_core_vcpu_free(vcpu); in kvm_arch_vcpu_destroy()
835 kvmppc_subarch_vcpu_uninit(vcpu); in kvm_arch_vcpu_destroy()
838 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) in kvm_cpu_has_pending_timer() argument
840 return kvmppc_core_pending_dec(vcpu); in kvm_cpu_has_pending_timer()
843 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
853 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); in kvm_arch_vcpu_load()
855 kvmppc_core_vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
858 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
860 kvmppc_core_vcpu_put(vcpu); in kvm_arch_vcpu_put()
862 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); in kvm_arch_vcpu_put()
934 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_dword() argument
938 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_dword()
939 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword()
945 val.vval = VCPU_VSX_VR(vcpu, index - 32); in kvmppc_set_vsr_dword()
947 VCPU_VSX_VR(vcpu, index - 32) = val.vval; in kvmppc_set_vsr_dword()
949 VCPU_VSX_FPR(vcpu, index, offset) = gpr; in kvmppc_set_vsr_dword()
953 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_dword_dump() argument
957 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword_dump()
960 val.vval = VCPU_VSX_VR(vcpu, index - 32); in kvmppc_set_vsr_dword_dump()
963 VCPU_VSX_VR(vcpu, index - 32) = val.vval; in kvmppc_set_vsr_dword_dump()
965 VCPU_VSX_FPR(vcpu, index, 0) = gpr; in kvmppc_set_vsr_dword_dump()
966 VCPU_VSX_FPR(vcpu, index, 1) = gpr; in kvmppc_set_vsr_dword_dump()
970 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_word_dump() argument
974 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word_dump()
981 VCPU_VSX_VR(vcpu, index - 32) = val.vval; in kvmppc_set_vsr_word_dump()
985 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0]; in kvmppc_set_vsr_word_dump()
986 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0]; in kvmppc_set_vsr_word_dump()
990 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, in kvmppc_set_vsr_word() argument
994 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_word()
995 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word()
1002 val.vval = VCPU_VSX_VR(vcpu, index - 32); in kvmppc_set_vsr_word()
1004 VCPU_VSX_VR(vcpu, index - 32) = val.vval; in kvmppc_set_vsr_word()
1008 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset); in kvmppc_set_vsr_word()
1010 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0]; in kvmppc_set_vsr_word()
1016 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_offset_generic() argument
1025 if (kvmppc_need_byteswap(vcpu)) in kvmppc_get_vmx_offset_generic()
1033 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_dword_offset() argument
1036 return kvmppc_get_vmx_offset_generic(vcpu, index, 8); in kvmppc_get_vmx_dword_offset()
1039 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_word_offset() argument
1042 return kvmppc_get_vmx_offset_generic(vcpu, index, 4); in kvmppc_get_vmx_word_offset()
1045 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_hword_offset() argument
1048 return kvmppc_get_vmx_offset_generic(vcpu, index, 2); in kvmppc_get_vmx_hword_offset()
1051 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu, in kvmppc_get_vmx_byte_offset() argument
1054 return kvmppc_get_vmx_offset_generic(vcpu, index, 1); in kvmppc_get_vmx_byte_offset()
1058 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_dword() argument
1062 int offset = kvmppc_get_vmx_dword_offset(vcpu, in kvmppc_set_vmx_dword()
1063 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_dword()
1064 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_dword()
1069 val.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_set_vmx_dword()
1071 VCPU_VSX_VR(vcpu, index) = val.vval; in kvmppc_set_vmx_dword()
1074 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_word() argument
1078 int offset = kvmppc_get_vmx_word_offset(vcpu, in kvmppc_set_vmx_word()
1079 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_word()
1080 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_word()
1085 val.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_set_vmx_word()
1087 VCPU_VSX_VR(vcpu, index) = val.vval; in kvmppc_set_vmx_word()
1090 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_hword() argument
1094 int offset = kvmppc_get_vmx_hword_offset(vcpu, in kvmppc_set_vmx_hword()
1095 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_hword()
1096 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_hword()
1101 val.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_set_vmx_hword()
1103 VCPU_VSX_VR(vcpu, index) = val.vval; in kvmppc_set_vmx_hword()
1106 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu, in kvmppc_set_vmx_byte() argument
1110 int offset = kvmppc_get_vmx_byte_offset(vcpu, in kvmppc_set_vmx_byte()
1111 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_byte()
1112 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_byte()
1117 val.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_set_vmx_byte()
1119 VCPU_VSX_VR(vcpu, index) = val.vval; in kvmppc_set_vmx_byte()
1153 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu) in kvmppc_complete_mmio_load() argument
1155 struct kvm_run *run = vcpu->run; in kvmppc_complete_mmio_load()
1161 if (!vcpu->arch.mmio_host_swabbed) { in kvmppc_complete_mmio_load()
1178 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) in kvmppc_complete_mmio_load()
1181 if (vcpu->arch.mmio_sign_extend) { in kvmppc_complete_mmio_load()
1197 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { in kvmppc_complete_mmio_load()
1199 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); in kvmppc_complete_mmio_load()
1202 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1203 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); in kvmppc_complete_mmio_load()
1205 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; in kvmppc_complete_mmio_load()
1209 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1212 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; in kvmppc_complete_mmio_load()
1213 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1218 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1219 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); in kvmppc_complete_mmio_load()
1221 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) in kvmppc_complete_mmio_load()
1222 kvmppc_set_vsr_dword(vcpu, gpr); in kvmppc_complete_mmio_load()
1223 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) in kvmppc_complete_mmio_load()
1224 kvmppc_set_vsr_word(vcpu, gpr); in kvmppc_complete_mmio_load()
1225 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1227 kvmppc_set_vsr_dword_dump(vcpu, gpr); in kvmppc_complete_mmio_load()
1228 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1230 kvmppc_set_vsr_word_dump(vcpu, gpr); in kvmppc_complete_mmio_load()
1235 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1236 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); in kvmppc_complete_mmio_load()
1238 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) in kvmppc_complete_mmio_load()
1239 kvmppc_set_vmx_dword(vcpu, gpr); in kvmppc_complete_mmio_load()
1240 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) in kvmppc_complete_mmio_load()
1241 kvmppc_set_vmx_word(vcpu, gpr); in kvmppc_complete_mmio_load()
1242 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1244 kvmppc_set_vmx_hword(vcpu, gpr); in kvmppc_complete_mmio_load()
1245 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1247 kvmppc_set_vmx_byte(vcpu, gpr); in kvmppc_complete_mmio_load()
1252 if (kvmppc_need_byteswap(vcpu)) in kvmppc_complete_mmio_load()
1254 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, in kvmppc_complete_mmio_load()
1263 static int __kvmppc_handle_load(struct kvm_vcpu *vcpu, in __kvmppc_handle_load() argument
1267 struct kvm_run *run = vcpu->run; in __kvmppc_handle_load()
1272 if (kvmppc_need_byteswap(vcpu)) { in __kvmppc_handle_load()
1281 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in __kvmppc_handle_load()
1285 vcpu->arch.io_gpr = rt; in __kvmppc_handle_load()
1286 vcpu->arch.mmio_host_swabbed = host_swabbed; in __kvmppc_handle_load()
1287 vcpu->mmio_needed = 1; in __kvmppc_handle_load()
1288 vcpu->mmio_is_write = 0; in __kvmppc_handle_load()
1289 vcpu->arch.mmio_sign_extend = sign_extend; in __kvmppc_handle_load()
1291 idx = srcu_read_lock(&vcpu->kvm->srcu); in __kvmppc_handle_load()
1293 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, in __kvmppc_handle_load()
1296 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __kvmppc_handle_load()
1299 kvmppc_complete_mmio_load(vcpu); in __kvmppc_handle_load()
1300 vcpu->mmio_needed = 0; in __kvmppc_handle_load()
1307 int kvmppc_handle_load(struct kvm_vcpu *vcpu, in kvmppc_handle_load() argument
1311 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0); in kvmppc_handle_load()
1316 int kvmppc_handle_loads(struct kvm_vcpu *vcpu, in kvmppc_handle_loads() argument
1320 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1); in kvmppc_handle_loads()
1324 int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu, in kvmppc_handle_vsx_load() argument
1331 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_load()
1334 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_load()
1335 emulated = __kvmppc_handle_load(vcpu, rt, bytes, in kvmppc_handle_vsx_load()
1341 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_load()
1343 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_load()
1344 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_load()
1350 int kvmppc_handle_store(struct kvm_vcpu *vcpu, in kvmppc_handle_store() argument
1353 struct kvm_run *run = vcpu->run; in kvmppc_handle_store()
1359 if (kvmppc_need_byteswap(vcpu)) { in kvmppc_handle_store()
1368 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in kvmppc_handle_store()
1371 vcpu->mmio_needed = 1; in kvmppc_handle_store()
1372 vcpu->mmio_is_write = 1; in kvmppc_handle_store()
1374 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) in kvmppc_handle_store()
1394 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_store()
1396 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, in kvmppc_handle_store()
1399 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvmppc_handle_store()
1402 vcpu->mmio_needed = 0; in kvmppc_handle_store()
1411 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) in kvmppc_get_vsr_data() argument
1416 int copy_type = vcpu->arch.mmio_copy_type; in kvmppc_get_vsr_data()
1422 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1430 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset); in kvmppc_get_vsr_data()
1432 reg.vval = VCPU_VSX_VR(vcpu, rs - 32); in kvmppc_get_vsr_data()
1439 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1449 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset); in kvmppc_get_vsr_data()
1452 reg.vval = VCPU_VSX_VR(vcpu, rs - 32); in kvmppc_get_vsr_data()
1465 int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, in kvmppc_handle_vsx_store() argument
1471 vcpu->arch.io_gpr = rs; in kvmppc_handle_vsx_store()
1474 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_store()
1477 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_store()
1478 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) in kvmppc_handle_vsx_store()
1481 emulated = kvmppc_handle_store(vcpu, in kvmppc_handle_vsx_store()
1487 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_store()
1489 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_store()
1490 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_store()
1496 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu) in kvmppc_emulate_mmio_vsx_loadstore() argument
1498 struct kvm_run *run = vcpu->run; in kvmppc_emulate_mmio_vsx_loadstore()
1502 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vsx_loadstore()
1504 if (!vcpu->mmio_is_write) { in kvmppc_emulate_mmio_vsx_loadstore()
1505 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr, in kvmppc_emulate_mmio_vsx_loadstore()
1506 run->mmio.len, 1, vcpu->arch.mmio_sign_extend); in kvmppc_emulate_mmio_vsx_loadstore()
1508 emulated = kvmppc_handle_vsx_store(vcpu, in kvmppc_emulate_mmio_vsx_loadstore()
1509 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vsx_loadstore()
1532 int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, in kvmppc_handle_vmx_load() argument
1537 if (vcpu->arch.mmio_vmx_copy_nums > 2) in kvmppc_handle_vmx_load()
1540 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_load()
1541 emulated = __kvmppc_handle_load(vcpu, rt, bytes, in kvmppc_handle_vmx_load()
1547 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_load()
1548 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_load()
1549 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_load()
1555 static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_dword() argument
1562 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_dword()
1567 reg.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_get_vmx_dword()
1573 static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_word() argument
1580 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_word()
1585 reg.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_get_vmx_word()
1591 static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_hword() argument
1598 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_hword()
1603 reg.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_get_vmx_hword()
1609 static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) in kvmppc_get_vmx_byte() argument
1616 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_byte()
1621 reg.vval = VCPU_VSX_VR(vcpu, index); in kvmppc_get_vmx_byte()
1627 int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, in kvmppc_handle_vmx_store() argument
1634 if (vcpu->arch.mmio_vmx_copy_nums > 2) in kvmppc_handle_vmx_store()
1637 vcpu->arch.io_gpr = rs; in kvmppc_handle_vmx_store()
1639 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_store()
1640 switch (vcpu->arch.mmio_copy_type) { in kvmppc_handle_vmx_store()
1642 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1647 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1651 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1655 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1662 emulated = kvmppc_handle_store(vcpu, val, bytes, in kvmppc_handle_vmx_store()
1667 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_store()
1668 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_store()
1669 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_store()
1675 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu) in kvmppc_emulate_mmio_vmx_loadstore() argument
1677 struct kvm_run *run = vcpu->run; in kvmppc_emulate_mmio_vmx_loadstore()
1681 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vmx_loadstore()
1683 if (!vcpu->mmio_is_write) { in kvmppc_emulate_mmio_vmx_loadstore()
1684 emulated = kvmppc_handle_vmx_load(vcpu, in kvmppc_emulate_mmio_vmx_loadstore()
1685 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1687 emulated = kvmppc_handle_vmx_store(vcpu, in kvmppc_emulate_mmio_vmx_loadstore()
1688 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1710 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) in kvm_vcpu_ioctl_get_one_reg() argument
1720 r = kvmppc_get_one_reg(vcpu, reg->id, &val); in kvm_vcpu_ioctl_get_one_reg()
1730 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; in kvm_vcpu_ioctl_get_one_reg()
1737 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); in kvm_vcpu_ioctl_get_one_reg()
1740 val = get_reg_val(reg->id, vcpu->arch.vrsave); in kvm_vcpu_ioctl_get_one_reg()
1758 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) in kvm_vcpu_ioctl_set_one_reg() argument
1771 r = kvmppc_set_one_reg(vcpu, reg->id, &val); in kvm_vcpu_ioctl_set_one_reg()
1781 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; in kvm_vcpu_ioctl_set_one_reg()
1788 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); in kvm_vcpu_ioctl_set_one_reg()
1795 vcpu->arch.vrsave = set_reg_val(reg->id, val); in kvm_vcpu_ioctl_set_one_reg()
1807 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_ioctl_run() argument
1809 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
1812 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_run()
1814 if (vcpu->mmio_needed) { in kvm_arch_vcpu_ioctl_run()
1815 vcpu->mmio_needed = 0; in kvm_arch_vcpu_ioctl_run()
1816 if (!vcpu->mmio_is_write) in kvm_arch_vcpu_ioctl_run()
1817 kvmppc_complete_mmio_load(vcpu); in kvm_arch_vcpu_ioctl_run()
1819 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1820 vcpu->arch.mmio_vsx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1821 vcpu->arch.mmio_vsx_offset++; in kvm_arch_vcpu_ioctl_run()
1824 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1825 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu); in kvm_arch_vcpu_ioctl_run()
1827 vcpu->mmio_needed = 1; in kvm_arch_vcpu_ioctl_run()
1833 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1834 vcpu->arch.mmio_vmx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1835 vcpu->arch.mmio_vmx_offset++; in kvm_arch_vcpu_ioctl_run()
1838 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1839 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu); in kvm_arch_vcpu_ioctl_run()
1841 vcpu->mmio_needed = 1; in kvm_arch_vcpu_ioctl_run()
1846 } else if (vcpu->arch.osi_needed) { in kvm_arch_vcpu_ioctl_run()
1851 kvmppc_set_gpr(vcpu, i, gprs[i]); in kvm_arch_vcpu_ioctl_run()
1852 vcpu->arch.osi_needed = 0; in kvm_arch_vcpu_ioctl_run()
1853 } else if (vcpu->arch.hcall_needed) { in kvm_arch_vcpu_ioctl_run()
1856 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); in kvm_arch_vcpu_ioctl_run()
1858 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); in kvm_arch_vcpu_ioctl_run()
1859 vcpu->arch.hcall_needed = 0; in kvm_arch_vcpu_ioctl_run()
1861 } else if (vcpu->arch.epr_needed) { in kvm_arch_vcpu_ioctl_run()
1862 kvmppc_set_epr(vcpu, run->epr.epr); in kvm_arch_vcpu_ioctl_run()
1863 vcpu->arch.epr_needed = 0; in kvm_arch_vcpu_ioctl_run()
1867 kvm_sigset_activate(vcpu); in kvm_arch_vcpu_ioctl_run()
1872 r = kvmppc_vcpu_run(vcpu); in kvm_arch_vcpu_ioctl_run()
1874 kvm_sigset_deactivate(vcpu); in kvm_arch_vcpu_ioctl_run()
1887 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_run()
1891 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) in kvm_vcpu_ioctl_interrupt() argument
1894 kvmppc_core_dequeue_external(vcpu); in kvm_vcpu_ioctl_interrupt()
1898 kvmppc_core_queue_external(vcpu, irq); in kvm_vcpu_ioctl_interrupt()
1900 kvm_vcpu_kick(vcpu); in kvm_vcpu_ioctl_interrupt()
1905 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_enable_cap() argument
1916 vcpu->arch.osi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1920 vcpu->arch.papr_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1925 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1927 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1932 vcpu->arch.watchdog_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1944 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); in kvm_vcpu_ioctl_enable_cap()
1961 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1981 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1983 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
2007 r = kvmppc_xive_native_connect_vcpu(dev, vcpu, in kvm_vcpu_ioctl_enable_cap()
2017 if (!is_kvmppc_hv_enabled(vcpu->kvm)) in kvm_vcpu_ioctl_enable_cap()
2020 vcpu->kvm->arch.fwnmi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
2029 r = kvmppc_sanity_check(vcpu); in kvm_vcpu_ioctl_enable_cap()
2047 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
2053 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
2062 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl() local
2069 return kvm_vcpu_ioctl_interrupt(vcpu, &irq); in kvm_arch_vcpu_async_ioctl()
2077 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
2088 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl()
2089 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); in kvm_arch_vcpu_ioctl()
2090 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl()
2102 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); in kvm_arch_vcpu_ioctl()
2104 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); in kvm_arch_vcpu_ioctl()
2114 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl()
2115 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); in kvm_arch_vcpu_ioctl()
2116 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl()
2128 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
2540 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry) in kvm_arch_create_vcpu_debugfs() argument
2542 if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs) in kvm_arch_create_vcpu_debugfs()
2543 vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry); in kvm_arch_create_vcpu_debugfs()