Lines Matching full:arch

53 	return !!(v->arch.pending_exceptions) || kvm_request_pending(v);  in kvm_arch_vcpu_runnable()
145 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; in kvmppc_swab_shared()
187 if (vcpu->arch.intr_msr & MSR_LE) in kvmppc_kvm_pv()
189 if (shared_big_endian != vcpu->arch.shared_big_endian) in kvmppc_kvm_pv()
191 vcpu->arch.shared_big_endian = shared_big_endian; in kvmppc_kvm_pv()
200 vcpu->arch.disable_kernel_nx = true; in kvmppc_kvm_pv()
204 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; in kvmppc_kvm_pv()
205 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; in kvmppc_kvm_pv()
212 if ((vcpu->arch.magic_page_pa & 0xf000) != in kvmppc_kvm_pv()
213 ((ulong)vcpu->arch.shared & 0xf000)) { in kvmppc_kvm_pv()
214 void *old_shared = vcpu->arch.shared; in kvmppc_kvm_pv()
215 ulong shared = (ulong)vcpu->arch.shared; in kvmppc_kvm_pv()
219 shared |= vcpu->arch.magic_page_pa & 0xf000; in kvmppc_kvm_pv()
222 vcpu->arch.shared = new_shared; in kvmppc_kvm_pv()
259 if (!vcpu->arch.pvr) in kvmppc_sanity_check()
263 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) in kvmppc_sanity_check()
267 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_sanity_check()
278 vcpu->arch.sane = r; in kvmppc_sanity_check()
325 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.vaddr_accessed, dsisr); in kvmppc_emulate_mmio()
350 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_st()
356 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) in kvmppc_st()
357 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, in kvmppc_st()
377 void *magic = vcpu->arch.shared; in kvmppc_st()
393 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_ld()
399 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) in kvmppc_ld()
400 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, in kvmppc_ld()
423 void *magic = vcpu->arch.shared; in kvmppc_ld()
483 kvm->arch.kvm_ops = kvm_ops; in kvm_arch_init_vm()
513 module_put(kvm->arch.kvm_ops->owner); in kvm_arch_destroy_vm()
603 if (kvm->arch.emul_smt_mode > 1) in kvm_vm_ioctl_check_extension()
604 r = kvm->arch.emul_smt_mode; in kvm_vm_ioctl_check_extension()
606 r = kvm->arch.smt_mode; in kvm_vm_ioctl_check_extension()
776 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); in kvmppc_decrementer_wakeup()
786 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); in kvm_arch_vcpu_create()
787 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; in kvm_arch_vcpu_create()
790 mutex_init(&vcpu->arch.exit_timing_lock); in kvm_arch_vcpu_create()
800 rcuwait_init(&vcpu->arch.wait); in kvm_arch_vcpu_create()
801 vcpu->arch.waitp = &vcpu->arch.wait; in kvm_arch_vcpu_create()
816 hrtimer_cancel(&vcpu->arch.dec_timer); in kvm_arch_vcpu_destroy()
818 switch (vcpu->arch.irq_type) { in kvm_arch_vcpu_destroy()
820 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); in kvm_arch_vcpu_destroy()
853 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); in kvm_arch_vcpu_load()
862 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); in kvm_arch_vcpu_put()
885 if (kvm->arch.kvm_ops->irq_bypass_add_producer) in kvm_arch_irq_bypass_add_producer()
886 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); in kvm_arch_irq_bypass_add_producer()
898 if (kvm->arch.kvm_ops->irq_bypass_del_producer) in kvm_arch_irq_bypass_del_producer()
899 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); in kvm_arch_irq_bypass_del_producer()
938 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_dword()
939 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword()
957 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword_dump()
974 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word_dump()
994 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_word()
995 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word()
1063 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_dword()
1064 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_dword()
1079 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_word()
1080 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_word()
1095 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_hword()
1096 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_hword()
1111 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_byte()
1112 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_byte()
1161 if (!vcpu->arch.mmio_host_swabbed) { in kvmppc_complete_mmio_load()
1178 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) in kvmppc_complete_mmio_load()
1181 if (vcpu->arch.mmio_sign_extend) { in kvmppc_complete_mmio_load()
1197 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { in kvmppc_complete_mmio_load()
1199 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); in kvmppc_complete_mmio_load()
1202 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1203 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); in kvmppc_complete_mmio_load()
1205 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; in kvmppc_complete_mmio_load()
1209 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1212 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; in kvmppc_complete_mmio_load()
1213 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1218 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1219 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); in kvmppc_complete_mmio_load()
1221 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) in kvmppc_complete_mmio_load()
1223 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) in kvmppc_complete_mmio_load()
1225 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1228 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1235 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1236 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); in kvmppc_complete_mmio_load()
1238 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) in kvmppc_complete_mmio_load()
1240 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) in kvmppc_complete_mmio_load()
1242 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1245 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1254 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, in kvmppc_complete_mmio_load()
1281 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in __kvmppc_handle_load()
1285 vcpu->arch.io_gpr = rt; in __kvmppc_handle_load()
1286 vcpu->arch.mmio_host_swabbed = host_swabbed; in __kvmppc_handle_load()
1289 vcpu->arch.mmio_sign_extend = sign_extend; in __kvmppc_handle_load()
1331 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_load()
1334 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_load()
1341 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_load()
1343 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_load()
1344 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_load()
1368 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in kvmppc_handle_store()
1374 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) in kvmppc_handle_store()
1416 int copy_type = vcpu->arch.mmio_copy_type; in kvmppc_get_vsr_data()
1422 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1439 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1471 vcpu->arch.io_gpr = rs; in kvmppc_handle_vsx_store()
1474 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_store()
1477 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_store()
1487 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_store()
1489 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_store()
1490 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_store()
1502 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vsx_loadstore()
1505 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr, in kvmppc_emulate_mmio_vsx_loadstore()
1506 run->mmio.len, 1, vcpu->arch.mmio_sign_extend); in kvmppc_emulate_mmio_vsx_loadstore()
1509 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vsx_loadstore()
1537 if (vcpu->arch.mmio_vmx_copy_nums > 2) in kvmppc_handle_vmx_load()
1540 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_load()
1547 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_load()
1548 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_load()
1549 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_load()
1562 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_dword()
1580 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_word()
1598 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_hword()
1616 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_byte()
1634 if (vcpu->arch.mmio_vmx_copy_nums > 2) in kvmppc_handle_vmx_store()
1637 vcpu->arch.io_gpr = rs; in kvmppc_handle_vmx_store()
1639 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_store()
1640 switch (vcpu->arch.mmio_copy_type) { in kvmppc_handle_vmx_store()
1667 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_store()
1668 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_store()
1669 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_store()
1681 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vmx_loadstore()
1685 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1688 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1730 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; in kvm_vcpu_ioctl_get_one_reg()
1737 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); in kvm_vcpu_ioctl_get_one_reg()
1740 val = get_reg_val(reg->id, vcpu->arch.vrsave); in kvm_vcpu_ioctl_get_one_reg()
1781 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; in kvm_vcpu_ioctl_set_one_reg()
1788 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); in kvm_vcpu_ioctl_set_one_reg()
1795 vcpu->arch.vrsave = set_reg_val(reg->id, val); in kvm_vcpu_ioctl_set_one_reg()
1819 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1820 vcpu->arch.mmio_vsx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1821 vcpu->arch.mmio_vsx_offset++; in kvm_arch_vcpu_ioctl_run()
1824 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1833 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1834 vcpu->arch.mmio_vmx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1835 vcpu->arch.mmio_vmx_offset++; in kvm_arch_vcpu_ioctl_run()
1838 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1846 } else if (vcpu->arch.osi_needed) { in kvm_arch_vcpu_ioctl_run()
1852 vcpu->arch.osi_needed = 0; in kvm_arch_vcpu_ioctl_run()
1853 } else if (vcpu->arch.hcall_needed) { in kvm_arch_vcpu_ioctl_run()
1859 vcpu->arch.hcall_needed = 0; in kvm_arch_vcpu_ioctl_run()
1861 } else if (vcpu->arch.epr_needed) { in kvm_arch_vcpu_ioctl_run()
1863 vcpu->arch.epr_needed = 0; in kvm_arch_vcpu_ioctl_run()
1916 vcpu->arch.osi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1920 vcpu->arch.papr_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1925 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1927 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1932 vcpu->arch.watchdog_enabled = true; in kvm_vcpu_ioctl_enable_cap()
2020 vcpu->kvm->arch.fwnmi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
2037 if (kvm->arch.mpic) in kvm_arch_intc_initialized()
2041 if (kvm->arch.xics || kvm->arch.xive) in kvm_arch_intc_initialized()
2201 set_bit(hcall / 4, kvm->arch.enabled_hcalls); in kvm_vm_ioctl_enable_cap()
2203 clear_bit(hcall / 4, kvm->arch.enabled_hcalls); in kvm_vm_ioctl_enable_cap()
2212 if (kvm->arch.kvm_ops->set_smt_mode) in kvm_vm_ioctl_enable_cap()
2213 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); in kvm_vm_ioctl_enable_cap()
2220 !kvm->arch.kvm_ops->enable_nested) in kvm_vm_ioctl_enable_cap()
2222 r = kvm->arch.kvm_ops->enable_nested(kvm); in kvm_vm_ioctl_enable_cap()
2228 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm) in kvm_vm_ioctl_enable_cap()
2230 r = kvm->arch.kvm_ops->enable_svm(kvm); in kvm_vm_ioctl_enable_cap()
2234 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1) in kvm_vm_ioctl_enable_cap()
2236 r = kvm->arch.kvm_ops->enable_dawr1(kvm); in kvm_vm_ioctl_enable_cap()
2432 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); in kvm_arch_vm_ioctl()
2448 if (!kvm->arch.kvm_ops->configure_mmu) in kvm_arch_vm_ioctl()
2453 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg); in kvm_arch_vm_ioctl()
2461 if (!kvm->arch.kvm_ops->get_rmmu_info) in kvm_arch_vm_ioctl()
2463 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info); in kvm_arch_vm_ioctl()
2480 if (!kvm->arch.kvm_ops->svm_off) in kvm_arch_vm_ioctl()
2483 r = kvm->arch.kvm_ops->svm_off(kvm); in kvm_arch_vm_ioctl()
2488 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); in kvm_arch_vm_ioctl()
2542 if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs) in kvm_arch_create_vcpu_debugfs()
2543 vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry); in kvm_arch_create_vcpu_debugfs()
2548 if (kvm->arch.kvm_ops->create_vm_debugfs) in kvm_arch_create_vm_debugfs()
2549 kvm->arch.kvm_ops->create_vm_debugfs(kvm); in kvm_arch_create_vm_debugfs()