Lines Matching +full:pic +full:- +full:base +full:- +full:vec
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
16 * Ben-Ami Yassour <benami@il.ibm.com>
45 #include <linux/user-return-notifier.h>
59 #include <linux/entry-kvm.h>
98 ((struct kvm_vcpu *)(ctxt)->vcpu)
101 * - enable syscall per default because its emulated by KVM
102 * - enable LME and LMA per default on 64 bit KVM
136 *(((struct kvm_x86_ops *)0)->func));
139 #include <asm/kvm-x86-ops.h>
156 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
161 * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables
163 * advancement entirely. Any other value is used as-is and disables adaptive
166 static int __read_mostly lapic_timer_advance_ns = -1;
177 * Flags to manipulate forced emulation behavior (any non-zero value will
184 int __read_mostly pi_inject_timer = -1;
339 size - useroffset, NULL); in kvm_alloc_emulator_cache()
348 vcpu->arch.apf.gfns[i] = ~0; in kvm_async_pf_hash_reset()
364 if (msrs->registered) { in kvm_on_user_return()
365 msrs->registered = false; in kvm_on_user_return()
370 values = &msrs->values[slot]; in kvm_on_user_return()
371 if (values->host != values->curr) { in kvm_on_user_return()
372 wrmsrl(kvm_uret_msrs_list[slot], values->host); in kvm_on_user_return()
373 values->curr = values->host; in kvm_on_user_return()
398 return -1; in kvm_add_user_return_msr()
413 return -1; in kvm_find_user_return_msr()
426 msrs->values[i].host = value; in kvm_user_return_msr_cpu_online()
427 msrs->values[i].curr = value; in kvm_user_return_msr_cpu_online()
437 value = (value & mask) | (msrs->values[slot].host & ~mask); in kvm_set_user_return_msr()
438 if (value == msrs->values[slot].curr) in kvm_set_user_return_msr()
444 msrs->values[slot].curr = value; in kvm_set_user_return_msr()
445 if (!msrs->registered) { in kvm_set_user_return_msr()
446 msrs->urn.on_user_return = kvm_on_user_return; in kvm_set_user_return_msr()
447 user_return_notifier_register(&msrs->urn); in kvm_set_user_return_msr()
448 msrs->registered = true; in kvm_set_user_return_msr()
459 if (msrs->registered) in drop_user_return_notifiers()
460 kvm_on_user_return(&msrs->urn); in drop_user_return_notifiers()
465 return vcpu->arch.apic_base; in kvm_get_apic_base()
478 enum lapic_mode new_mode = kvm_apic_mode(msr_info->data); in kvm_set_apic_base()
482 if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID) in kvm_set_apic_base()
484 if (!msr_info->host_initiated) { in kvm_set_apic_base()
491 kvm_lapic_set_base(vcpu, msr_info->data); in kvm_set_apic_base()
492 kvm_recalculate_apic_map(vcpu->kvm); in kvm_set_apic_base()
548 * #DBs can be trap-like or fault-like, the caller must check other CPU in exception_type()
567 if (!ex->has_payload) in kvm_deliver_exception_payload()
570 switch (ex->vector) { in kvm_deliver_exception_payload()
573 * "Certain debug exceptions may clear bit 0-3. The in kvm_deliver_exception_payload()
577 vcpu->arch.dr6 &= ~DR_TRAP_BITS; in kvm_deliver_exception_payload()
586 * Active low bits should be cleared if 1-setting in payload. in kvm_deliver_exception_payload()
587 * Active high bits should be set if 1-setting in payload. in kvm_deliver_exception_payload()
594 vcpu->arch.dr6 |= DR6_ACTIVE_LOW; in kvm_deliver_exception_payload()
595 vcpu->arch.dr6 |= ex->payload; in kvm_deliver_exception_payload()
596 vcpu->arch.dr6 ^= ex->payload & DR6_ACTIVE_LOW; in kvm_deliver_exception_payload()
604 vcpu->arch.dr6 &= ~BIT(12); in kvm_deliver_exception_payload()
607 vcpu->arch.cr2 = ex->payload; in kvm_deliver_exception_payload()
611 ex->has_payload = false; in kvm_deliver_exception_payload()
612 ex->payload = 0; in kvm_deliver_exception_payload()
620 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit; in kvm_queue_exception_vmexit()
622 ex->vector = vector; in kvm_queue_exception_vmexit()
623 ex->injected = false; in kvm_queue_exception_vmexit()
624 ex->pending = true; in kvm_queue_exception_vmexit()
625 ex->has_error_code = has_error_code; in kvm_queue_exception_vmexit()
626 ex->error_code = error_code; in kvm_queue_exception_vmexit()
627 ex->has_payload = has_payload; in kvm_queue_exception_vmexit()
628 ex->payload = payload; in kvm_queue_exception_vmexit()
634 kvm_x86_ops.nested_ops->leave_nested(vcpu); in kvm_leave_nested()
648 * morph it to a VM-Exit if L1 wants to intercept the exception. A in kvm_multiple_exception()
650 * when it was original queued, and re-checking is incorrect if _L1_ in kvm_multiple_exception()
654 kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, nr, error_code)) { in kvm_multiple_exception()
660 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { in kvm_multiple_exception()
664 * On VM-Entry, an exception can be pending if and only in kvm_multiple_exception()
671 vcpu->arch.exception.injected = true; in kvm_multiple_exception()
681 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
682 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
684 vcpu->arch.exception.has_error_code = has_error; in kvm_multiple_exception()
685 vcpu->arch.exception.vector = nr; in kvm_multiple_exception()
686 vcpu->arch.exception.error_code = error_code; in kvm_multiple_exception()
687 vcpu->arch.exception.has_payload = has_payload; in kvm_multiple_exception()
688 vcpu->arch.exception.payload = payload; in kvm_multiple_exception()
691 &vcpu->arch.exception); in kvm_multiple_exception()
696 prev_nr = vcpu->arch.exception.vector; in kvm_multiple_exception()
698 /* triple fault -> shutdown */ in kvm_multiple_exception()
710 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
711 vcpu->arch.exception.pending = false; in kvm_multiple_exception()
716 that instruction re-execution will regenerate lost in kvm_multiple_exception()
772 ++vcpu->stat.pf_guest; in kvm_inject_page_fault()
775 * Async #PF in L2 is always forwarded to L1 as a VM-Exit regardless of in kvm_inject_page_fault()
778 if (is_guest_mode(vcpu) && fault->async_page_fault) in kvm_inject_page_fault()
780 true, fault->error_code, in kvm_inject_page_fault()
781 true, fault->address); in kvm_inject_page_fault()
783 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code, in kvm_inject_page_fault()
784 fault->address); in kvm_inject_page_fault()
792 WARN_ON_ONCE(fault->vector != PF_VECTOR); in kvm_inject_emulated_page_fault()
794 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : in kvm_inject_emulated_page_fault()
795 vcpu->arch.walk_mmu; in kvm_inject_emulated_page_fault()
801 if ((fault->error_code & PFERR_PRESENT_MASK) && in kvm_inject_emulated_page_fault()
802 !(fault->error_code & PFERR_RSVD_MASK)) in kvm_inject_emulated_page_fault()
803 kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address, in kvm_inject_emulated_page_fault()
804 fault_mmu->root.hpa); in kvm_inject_emulated_page_fault()
806 fault_mmu->inject_page_fault(vcpu, fault); in kvm_inject_emulated_page_fault()
812 atomic_inc(&vcpu->arch.nmi_queued); in kvm_inject_nmi()
854 return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2); in pdptr_rsvd_bits()
862 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in load_pdptrs()
867 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; in load_pdptrs()
895 if (!tdp_enabled && memcmp(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs))) in load_pdptrs()
896 kvm_mmu_free_roots(vcpu->kvm, mmu, KVM_MMU_ROOT_CURRENT); in load_pdptrs()
898 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); in load_pdptrs()
901 vcpu->arch.pdptrs_from_userspace = false; in load_pdptrs()
925 kvm_arch_has_noncoherent_dma(vcpu->kvm) && in kvm_post_set_cr0()
926 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) in kvm_post_set_cr0()
927 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); in kvm_post_set_cr0()
951 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && in kvm_set_cr0()
962 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && in kvm_set_cr0()
987 if (vcpu->arch.guest_state_protected) in kvm_load_guest_xsave_state()
992 if (vcpu->arch.xcr0 != host_xcr0) in kvm_load_guest_xsave_state()
993 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); in kvm_load_guest_xsave_state()
995 if (vcpu->arch.xsaves_enabled && in kvm_load_guest_xsave_state()
996 vcpu->arch.ia32_xss != host_xss) in kvm_load_guest_xsave_state()
997 wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); in kvm_load_guest_xsave_state()
1002 vcpu->arch.pkru != vcpu->arch.host_pkru && in kvm_load_guest_xsave_state()
1003 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || in kvm_load_guest_xsave_state()
1005 write_pkru(vcpu->arch.pkru); in kvm_load_guest_xsave_state()
1012 if (vcpu->arch.guest_state_protected) in kvm_load_host_xsave_state()
1017 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || in kvm_load_host_xsave_state()
1019 vcpu->arch.pkru = rdpkru(); in kvm_load_host_xsave_state()
1020 if (vcpu->arch.pkru != vcpu->arch.host_pkru) in kvm_load_host_xsave_state()
1021 write_pkru(vcpu->arch.host_pkru); in kvm_load_host_xsave_state()
1027 if (vcpu->arch.xcr0 != host_xcr0) in kvm_load_host_xsave_state()
1030 if (vcpu->arch.xsaves_enabled && in kvm_load_host_xsave_state()
1031 vcpu->arch.ia32_xss != host_xss) in kvm_load_host_xsave_state()
1041 return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC; in kvm_guest_supported_xfd()
1048 u64 old_xcr0 = vcpu->arch.xcr0; in __kvm_set_xcr()
1064 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; in __kvm_set_xcr()
1083 vcpu->arch.xcr0 = xcr0; in __kvm_set_xcr()
1108 if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) in __kvm_is_valid_cr4()
1127 * If CR4.PCIDE is changed 0 -> 1, there is no need to flush the TLB in kvm_post_set_cr4()
1141 * - CR4.PCIDE is changed from 1 to 0 in kvm_post_set_cr4()
1142 * - CR4.PGE is toggled in kvm_post_set_cr4()
1153 * - CR4.SMEP is changed from 0 to 1 in kvm_post_set_cr4()
1154 * - CR4.PAE is toggled in kvm_post_set_cr4()
1199 struct kvm_mmu *mmu = vcpu->arch.mmu; in kvm_invalidate_pcid()
1234 if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid) in kvm_invalidate_pcid()
1237 kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free); in kvm_invalidate_pcid()
1272 vcpu->arch.cr3 = cr3; in kvm_set_cr3()
1281 * and it's impossible to use a non-zero PCID when PCID is disabled, in kvm_set_cr3()
1298 vcpu->arch.cr8 = cr8; in kvm_set_cr8()
1308 return vcpu->arch.cr8; in kvm_get_cr8()
1316 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { in kvm_update_dr0123()
1318 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_update_dr0123()
1326 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) in kvm_update_dr7()
1327 dr7 = vcpu->arch.guest_debug_dr7; in kvm_update_dr7()
1329 dr7 = vcpu->arch.dr7; in kvm_update_dr7()
1331 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1333 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1351 size_t size = ARRAY_SIZE(vcpu->arch.db); in kvm_set_dr()
1355 vcpu->arch.db[array_index_nospec(dr, size)] = val; in kvm_set_dr()
1356 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) in kvm_set_dr()
1357 vcpu->arch.eff_db[dr] = val; in kvm_set_dr()
1363 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); in kvm_set_dr()
1369 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; in kvm_set_dr()
1380 size_t size = ARRAY_SIZE(vcpu->arch.db); in kvm_get_dr()
1384 *val = vcpu->arch.db[array_index_nospec(dr, size)]; in kvm_get_dr()
1388 *val = vcpu->arch.dr6; in kvm_get_dr()
1392 *val = vcpu->arch.dr7; in kvm_get_dr()
1422 * kvm-specific. Those are put in emulated_msrs_all; filtering of emulated_msrs
1543 * List of msr numbers which are used to expose MSR-based features that
1578 * 10 - MISC_PACKAGE_CTRLS
1579 * 11 - ENERGY_FILTERING_CTL
1580 * 12 - DOITM
1581 * 18 - FB_CLEAR_CTRL
1582 * 21 - XAPIC_DISABLE_STATUS
1583 * 23 - OVERCLOCKING_STATUS
1653 switch (msr->index) { in kvm_get_msr_feature()
1655 msr->data = kvm_get_arch_capabilities(); in kvm_get_msr_feature()
1658 rdmsrl_safe(msr->index, &msr->data); in kvm_get_msr_feature()
1718 u64 old_efer = vcpu->arch.efer; in set_efer()
1719 u64 efer = msr_info->data; in set_efer()
1725 if (!msr_info->host_initiated) { in set_efer()
1730 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) in set_efer()
1735 efer |= vcpu->arch.efer & EFER_LMA; in set_efer()
1759 struct kvm *kvm = vcpu->kvm; in kvm_msr_allowed()
1768 idx = srcu_read_lock(&kvm->srcu); in kvm_msr_allowed()
1770 msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu); in kvm_msr_allowed()
1776 allowed = msr_filter->default_allow; in kvm_msr_allowed()
1777 ranges = msr_filter->ranges; in kvm_msr_allowed()
1779 for (i = 0; i < msr_filter->count; i++) { in kvm_msr_allowed()
1780 u32 start = ranges[i].base; in kvm_msr_allowed()
1786 allowed = !!test_bit(index - start, bitmap); in kvm_msr_allowed()
1792 srcu_read_unlock(&kvm->srcu, idx); in kvm_msr_allowed()
1801 * Returns 0 on success, non-0 otherwise.
1822 * non-canonical address is written on Intel but not on in __kvm_set_msr()
1823 * AMD (which ignores the top 32-bits, because it does in __kvm_set_msr()
1824 * not implement 64-bit SYSENTER). in __kvm_set_msr()
1826 * 64-bit code should hence be able to write a non-canonical in __kvm_set_msr()
1828 * vmentry does not fail on Intel after writing a non-canonical in __kvm_set_msr()
1830 * invokes 64-bit SYSENTER. in __kvm_set_msr()
1849 * the bits in all other cases. This ensures cross-vendor in __kvm_set_msr()
1881 * Returns 0 on success, non-0 otherwise.
1954 if (!vcpu->run->msr.error) { in complete_userspace_rdmsr()
1955 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data); in complete_userspace_rdmsr()
1956 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); in complete_userspace_rdmsr()
1962 return complete_emulated_insn_gp(vcpu, vcpu->run->msr.error); in complete_emulated_msr_access()
1973 return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error); in complete_fast_msr_access()
2002 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason)) in kvm_msr_user_space()
2005 vcpu->run->exit_reason = exit_reason; in kvm_msr_user_space()
2006 vcpu->run->msr.error = 0; in kvm_msr_user_space()
2007 memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad)); in kvm_msr_user_space()
2008 vcpu->run->msr.reason = msr_reason; in kvm_msr_user_space()
2009 vcpu->run->msr.index = index; in kvm_msr_user_space()
2010 vcpu->run->msr.data = data; in kvm_msr_user_space()
2011 vcpu->arch.complete_userspace_io = completion; in kvm_msr_user_space()
2027 kvm_rax_write(vcpu, data & -1u); in kvm_emulate_rdmsr()
2028 kvm_rdx_write(vcpu, (data >> 32) & -1u); in kvm_emulate_rdmsr()
2089 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS) && in kvm_emulate_monitor_mwait()
2111 return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || in kvm_vcpu_exit_request()
2117 * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces
2124 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic)) in handle_fastpath_set_x2apic_icr_irqoff()
2131 return kvm_x2apic_icr_write(vcpu->arch.apic, data); in handle_fastpath_set_x2apic_icr_irqoff()
2217 write_seqcount_begin(&vdata->seq); in update_pvclock_gtod()
2220 vdata->clock.vclock_mode = tk->tkr_mono.clock->vdso_clock_mode; in update_pvclock_gtod()
2221 vdata->clock.cycle_last = tk->tkr_mono.cycle_last; in update_pvclock_gtod()
2222 vdata->clock.mask = tk->tkr_mono.mask; in update_pvclock_gtod()
2223 vdata->clock.mult = tk->tkr_mono.mult; in update_pvclock_gtod()
2224 vdata->clock.shift = tk->tkr_mono.shift; in update_pvclock_gtod()
2225 vdata->clock.base_cycles = tk->tkr_mono.xtime_nsec; in update_pvclock_gtod()
2226 vdata->clock.offset = tk->tkr_mono.base; in update_pvclock_gtod()
2228 vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->vdso_clock_mode; in update_pvclock_gtod()
2229 vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last; in update_pvclock_gtod()
2230 vdata->raw_clock.mask = tk->tkr_raw.mask; in update_pvclock_gtod()
2231 vdata->raw_clock.mult = tk->tkr_raw.mult; in update_pvclock_gtod()
2232 vdata->raw_clock.shift = tk->tkr_raw.shift; in update_pvclock_gtod()
2233 vdata->raw_clock.base_cycles = tk->tkr_raw.xtime_nsec; in update_pvclock_gtod()
2234 vdata->raw_clock.offset = tk->tkr_raw.base; in update_pvclock_gtod()
2236 vdata->wall_time_sec = tk->xtime_sec; in update_pvclock_gtod()
2238 vdata->offs_boot = tk->offs_boot; in update_pvclock_gtod()
2240 write_seqcount_end(&vdata->seq); in update_pvclock_gtod()
2284 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm); in kvm_write_wall_clock()
2305 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_write_system_time()
2307 if (vcpu->vcpu_id == 0 && !host_initiated) { in kvm_write_system_time()
2308 if (ka->boot_vcpu_runs_old_kvmclock != old_msr) in kvm_write_system_time()
2311 ka->boot_vcpu_runs_old_kvmclock = old_msr; in kvm_write_system_time()
2314 vcpu->arch.time = system_time; in kvm_write_system_time()
2319 kvm_gpc_activate(vcpu->kvm, &vcpu->arch.pv_time, vcpu, in kvm_write_system_time()
2323 kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time); in kvm_write_system_time()
2347 shift--; in kvm_get_time_scale()
2392 vcpu->arch.tsc_catchup = 1; in set_tsc_khz()
2393 vcpu->arch.tsc_always_catchup = 1; in set_tsc_khz()
2397 return -1; in set_tsc_khz()
2401 /* TSC scaling required - calculate ratio */ in set_tsc_khz()
2406 pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", in set_tsc_khz()
2408 return -1; in set_tsc_khz()
2424 return -1; in kvm_set_tsc_khz()
2429 &vcpu->arch.virtual_tsc_shift, in kvm_set_tsc_khz()
2430 &vcpu->arch.virtual_tsc_mult); in kvm_set_tsc_khz()
2431 vcpu->arch.virtual_tsc_khz = user_tsc_khz; in kvm_set_tsc_khz()
2439 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); in kvm_set_tsc_khz()
2450 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, in compute_guest_tsc()
2451 vcpu->arch.virtual_tsc_mult, in compute_guest_tsc()
2452 vcpu->arch.virtual_tsc_shift); in compute_guest_tsc()
2453 tsc += vcpu->arch.this_tsc_write; in compute_guest_tsc()
2468 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_track_tsc_matching()
2471 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == in kvm_track_tsc_matching()
2472 atomic_read(&vcpu->kvm->online_vcpus)); in kvm_track_tsc_matching()
2482 if (ka->use_master_clock || in kvm_track_tsc_matching()
2483 (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched)) in kvm_track_tsc_matching()
2486 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, in kvm_track_tsc_matching()
2487 atomic_read(&vcpu->kvm->online_vcpus), in kvm_track_tsc_matching()
2488 ka->use_master_clock, gtod->clock.vclock_mode); in kvm_track_tsc_matching()
2495 * The most significant 64-N bits (mult) of ratio represent the
2498 * point number (mult + frac * 2^(-N)).
2522 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio); in kvm_compute_l1_tsc_offset()
2524 return target_tsc - tsc; in kvm_compute_l1_tsc_offset()
2529 return vcpu->arch.l1_tsc_offset + in kvm_read_l1_tsc()
2530 kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio); in kvm_read_l1_tsc()
2561 trace_kvm_write_tsc_offset(vcpu->vcpu_id, in kvm_vcpu_write_tsc_offset()
2562 vcpu->arch.l1_tsc_offset, in kvm_vcpu_write_tsc_offset()
2565 vcpu->arch.l1_tsc_offset = l1_offset; in kvm_vcpu_write_tsc_offset()
2573 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( in kvm_vcpu_write_tsc_offset()
2578 vcpu->arch.tsc_offset = l1_offset; in kvm_vcpu_write_tsc_offset()
2580 static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset); in kvm_vcpu_write_tsc_offset()
2585 vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier; in kvm_vcpu_write_tsc_multiplier()
2589 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier( in kvm_vcpu_write_tsc_multiplier()
2593 vcpu->arch.tsc_scaling_ratio = l1_multiplier; in kvm_vcpu_write_tsc_multiplier()
2597 vcpu, vcpu->arch.tsc_scaling_ratio); in kvm_vcpu_write_tsc_multiplier()
2604 * TSC is marked unstable when we're running on Hyper-V, in kvm_check_tsc_unstable()
2621 struct kvm *kvm = vcpu->kvm; in __kvm_synchronize_tsc()
2623 lockdep_assert_held(&kvm->arch.tsc_write_lock); in __kvm_synchronize_tsc()
2629 kvm->arch.last_tsc_nsec = ns; in __kvm_synchronize_tsc()
2630 kvm->arch.last_tsc_write = tsc; in __kvm_synchronize_tsc()
2631 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; in __kvm_synchronize_tsc()
2632 kvm->arch.last_tsc_offset = offset; in __kvm_synchronize_tsc()
2634 vcpu->arch.last_guest_tsc = tsc; in __kvm_synchronize_tsc()
2646 * These values are tracked in kvm->arch.cur_xxx variables. in __kvm_synchronize_tsc()
2648 kvm->arch.cur_tsc_generation++; in __kvm_synchronize_tsc()
2649 kvm->arch.cur_tsc_nsec = ns; in __kvm_synchronize_tsc()
2650 kvm->arch.cur_tsc_write = tsc; in __kvm_synchronize_tsc()
2651 kvm->arch.cur_tsc_offset = offset; in __kvm_synchronize_tsc()
2652 kvm->arch.nr_vcpus_matched_tsc = 0; in __kvm_synchronize_tsc()
2653 } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) { in __kvm_synchronize_tsc()
2654 kvm->arch.nr_vcpus_matched_tsc++; in __kvm_synchronize_tsc()
2658 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; in __kvm_synchronize_tsc()
2659 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; in __kvm_synchronize_tsc()
2660 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; in __kvm_synchronize_tsc()
2667 struct kvm *kvm = vcpu->kvm; in kvm_synchronize_tsc()
2673 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); in kvm_synchronize_tsc()
2676 elapsed = ns - kvm->arch.last_tsc_nsec; in kvm_synchronize_tsc()
2678 if (vcpu->arch.virtual_tsc_khz) { in kvm_synchronize_tsc()
2681 * detection of vcpu initialization -- need to sync in kvm_synchronize_tsc()
2687 u64 tsc_exp = kvm->arch.last_tsc_write + in kvm_synchronize_tsc()
2689 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; in kvm_synchronize_tsc()
2707 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { in kvm_synchronize_tsc()
2709 offset = kvm->arch.cur_tsc_offset; in kvm_synchronize_tsc()
2719 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); in kvm_synchronize_tsc()
2725 u64 tsc_offset = vcpu->arch.l1_tsc_offset; in adjust_tsc_offset_guest()
2731 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio) in adjust_tsc_offset_host()
2734 vcpu->arch.l1_tsc_scaling_ratio); in adjust_tsc_offset_host()
2766 switch (clock->vclock_mode) { in vgettsc()
2773 v = (tsc_pg_val - clock->cycle_last) & in vgettsc()
2774 clock->mask; in vgettsc()
2783 v = (*tsc_timestamp - clock->cycle_last) & in vgettsc()
2784 clock->mask; in vgettsc()
2793 return v * clock->mult; in vgettsc()
2804 seq = read_seqcount_begin(>od->seq); in do_monotonic_raw()
2805 ns = gtod->raw_clock.base_cycles; in do_monotonic_raw()
2806 ns += vgettsc(>od->raw_clock, tsc_timestamp, &mode); in do_monotonic_raw()
2807 ns >>= gtod->raw_clock.shift; in do_monotonic_raw()
2808 ns += ktime_to_ns(ktime_add(gtod->raw_clock.offset, gtod->offs_boot)); in do_monotonic_raw()
2809 } while (unlikely(read_seqcount_retry(>od->seq, seq))); in do_monotonic_raw()
2823 seq = read_seqcount_begin(>od->seq); in do_realtime()
2824 ts->tv_sec = gtod->wall_time_sec; in do_realtime()
2825 ns = gtod->clock.base_cycles; in do_realtime()
2826 ns += vgettsc(>od->clock, tsc_timestamp, &mode); in do_realtime()
2827 ns >>= gtod->clock.shift; in do_realtime()
2828 } while (unlikely(read_seqcount_retry(>od->seq, seq))); in do_realtime()
2830 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); in do_realtime()
2831 ts->tv_nsec = ns; in do_realtime()
2875 * 4. ret0 = timespec0 + (rdtsc - tsc0) |
2876 * 5. | ret1 = timespec1 + (rdtsc - tsc1)
2877 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
2881 * - ret0 < ret1
2882 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
2884 * - 0 < N - M => M < N
2903 struct kvm_arch *ka = &kvm->arch; in pvclock_update_vm_gtod_copy()
2907 lockdep_assert_held(&kvm->arch.tsc_write_lock); in pvclock_update_vm_gtod_copy()
2908 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == in pvclock_update_vm_gtod_copy()
2909 atomic_read(&kvm->online_vcpus)); in pvclock_update_vm_gtod_copy()
2916 &ka->master_kernel_ns, in pvclock_update_vm_gtod_copy()
2917 &ka->master_cycle_now); in pvclock_update_vm_gtod_copy()
2919 ka->use_master_clock = host_tsc_clocksource && vcpus_matched in pvclock_update_vm_gtod_copy()
2920 && !ka->backwards_tsc_observed in pvclock_update_vm_gtod_copy()
2921 && !ka->boot_vcpu_runs_old_kvmclock; in pvclock_update_vm_gtod_copy()
2923 if (ka->use_master_clock) in pvclock_update_vm_gtod_copy()
2927 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, in pvclock_update_vm_gtod_copy()
2939 raw_spin_lock_irq(&kvm->arch.tsc_write_lock); in __kvm_start_pvclock_update()
2940 write_seqcount_begin(&kvm->arch.pvclock_sc); in __kvm_start_pvclock_update()
2953 struct kvm_arch *ka = &kvm->arch; in kvm_end_pvclock_update()
2957 write_seqcount_end(&ka->pvclock_sc); in kvm_end_pvclock_update()
2958 raw_spin_unlock_irq(&ka->tsc_write_lock); in kvm_end_pvclock_update()
2975 /* Called within read_seqcount_begin/retry for kvm->pvclock_sc. */
2978 struct kvm_arch *ka = &kvm->arch; in __get_kvmclock()
2984 data->flags = 0; in __get_kvmclock()
2985 if (ka->use_master_clock && __this_cpu_read(cpu_tsc_khz)) { in __get_kvmclock()
2989 if (kvm_get_walltime_and_clockread(&ts, &data->host_tsc)) { in __get_kvmclock()
2990 data->realtime = ts.tv_nsec + NSEC_PER_SEC * ts.tv_sec; in __get_kvmclock()
2991 data->flags |= KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC; in __get_kvmclock()
2994 data->host_tsc = rdtsc(); in __get_kvmclock()
2996 data->flags |= KVM_CLOCK_TSC_STABLE; in __get_kvmclock()
2997 hv_clock.tsc_timestamp = ka->master_cycle_now; in __get_kvmclock()
2998 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; in __get_kvmclock()
3002 data->clock = __pvclock_read_cycles(&hv_clock, data->host_tsc); in __get_kvmclock()
3004 data->clock = get_kvmclock_base_ns() + ka->kvmclock_offset; in __get_kvmclock()
3012 struct kvm_arch *ka = &kvm->arch; in get_kvmclock()
3016 seq = read_seqcount_begin(&ka->pvclock_sc); in get_kvmclock()
3018 } while (read_seqcount_retry(&ka->pvclock_sc, seq)); in get_kvmclock()
3033 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_setup_guest_pvclock()
3037 read_lock_irqsave(&gpc->lock, flags); in kvm_setup_guest_pvclock()
3038 while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa, in kvm_setup_guest_pvclock()
3040 read_unlock_irqrestore(&gpc->lock, flags); in kvm_setup_guest_pvclock()
3042 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, in kvm_setup_guest_pvclock()
3046 read_lock_irqsave(&gpc->lock, flags); in kvm_setup_guest_pvclock()
3049 guest_hv_clock = (void *)(gpc->khva + offset); in kvm_setup_guest_pvclock()
3058 guest_hv_clock->version = vcpu->hv_clock.version = (guest_hv_clock->version + 1) | 1; in kvm_setup_guest_pvclock()
3062 vcpu->hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); in kvm_setup_guest_pvclock()
3064 if (vcpu->pvclock_set_guest_stopped_request) { in kvm_setup_guest_pvclock()
3065 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED; in kvm_setup_guest_pvclock()
3066 vcpu->pvclock_set_guest_stopped_request = false; in kvm_setup_guest_pvclock()
3069 memcpy(guest_hv_clock, &vcpu->hv_clock, sizeof(*guest_hv_clock)); in kvm_setup_guest_pvclock()
3072 guest_hv_clock->version = ++vcpu->hv_clock.version; in kvm_setup_guest_pvclock()
3074 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); in kvm_setup_guest_pvclock()
3075 read_unlock_irqrestore(&gpc->lock, flags); in kvm_setup_guest_pvclock()
3077 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); in kvm_setup_guest_pvclock()
3084 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_guest_time_update()
3085 struct kvm_arch *ka = &v->kvm->arch; in kvm_guest_time_update()
3099 seq = read_seqcount_begin(&ka->pvclock_sc); in kvm_guest_time_update()
3100 use_master_clock = ka->use_master_clock; in kvm_guest_time_update()
3102 host_tsc = ka->master_cycle_now; in kvm_guest_time_update()
3103 kernel_ns = ka->master_kernel_ns; in kvm_guest_time_update()
3105 } while (read_seqcount_retry(&ka->pvclock_sc, seq)); in kvm_guest_time_update()
3126 * 2) Broken TSC compensation resets the base at each VCPU in kvm_guest_time_update()
3132 if (vcpu->tsc_catchup) { in kvm_guest_time_update()
3135 adjust_tsc_offset_guest(v, tsc - tsc_timestamp); in kvm_guest_time_update()
3146 v->arch.l1_tsc_scaling_ratio); in kvm_guest_time_update()
3148 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { in kvm_guest_time_update()
3150 &vcpu->hv_clock.tsc_shift, in kvm_guest_time_update()
3151 &vcpu->hv_clock.tsc_to_system_mul); in kvm_guest_time_update()
3152 vcpu->hw_tsc_khz = tgt_tsc_khz; in kvm_guest_time_update()
3155 vcpu->hv_clock.tsc_timestamp = tsc_timestamp; in kvm_guest_time_update()
3156 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; in kvm_guest_time_update()
3157 vcpu->last_guest_tsc = tsc_timestamp; in kvm_guest_time_update()
3164 vcpu->hv_clock.flags = pvclock_flags; in kvm_guest_time_update()
3166 if (vcpu->pv_time.active) in kvm_guest_time_update()
3167 kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0); in kvm_guest_time_update()
3168 if (vcpu->xen.vcpu_info_cache.active) in kvm_guest_time_update()
3169 kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache, in kvm_guest_time_update()
3171 if (vcpu->xen.vcpu_time_info_cache.active) in kvm_guest_time_update()
3172 kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_time_info_cache, 0); in kvm_guest_time_update()
3173 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); in kvm_guest_time_update()
3179 * vcpu->cpu migration, should not allow system_timestamp from
3185 * We need to rate-limit these requests though, as they can
3188 * by the delay we use to rate-limit the updates.
3210 struct kvm *kvm = v->kvm; in kvm_gen_kvmclock_update()
3213 schedule_delayed_work(&kvm->arch.kvmclock_update_work, in kvm_gen_kvmclock_update()
3229 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); in kvmclock_sync_fn()
3230 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvmclock_sync_fn()
3251 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18)); in can_set_mci_status()
3258 u64 mcg_cap = vcpu->arch.mcg_cap; in set_msr_mce()
3260 u32 msr = msr_info->index; in set_msr_mce()
3261 u64 data = msr_info->data; in set_msr_mce()
3266 vcpu->arch.mcg_status = data; in set_msr_mce()
3270 (data || !msr_info->host_initiated)) in set_msr_mce()
3274 vcpu->arch.mcg_ctl = data; in set_msr_mce()
3276 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: in set_msr_mce()
3277 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1; in set_msr_mce()
3281 if (!(mcg_cap & MCG_CMCI_P) && (data || !msr_info->host_initiated)) in set_msr_mce()
3286 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2, in set_msr_mce()
3287 last_msr + 1 - MSR_IA32_MC0_CTL2); in set_msr_mce()
3288 vcpu->arch.mci_ctl2_banks[offset] = data; in set_msr_mce()
3290 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: in set_msr_mce()
3291 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1; in set_msr_mce()
3303 * single-bit ECC data errors. in set_msr_mce()
3311 * AMD-based CPUs allow non-zero values, but if and only if in set_msr_mce()
3314 if (!msr_info->host_initiated && is_mci_status_msr(msr) && in set_msr_mce()
3318 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL, in set_msr_mce()
3319 last_msr + 1 - MSR_IA32_MC0_CTL); in set_msr_mce()
3320 vcpu->arch.mce_banks[offset] = data; in set_msr_mce()
3332 return (vcpu->arch.apf.msr_en_val & mask) == mask; in kvm_pv_async_pf_enabled()
3354 vcpu->arch.apf.msr_en_val = data; in kvm_pv_enable_async_pf()
3362 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, in kvm_pv_enable_async_pf()
3366 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); in kvm_pv_enable_async_pf()
3367 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; in kvm_pv_enable_async_pf()
3376 /* Bits 8-63 are reserved */ in kvm_pv_enable_async_pf_int()
3383 vcpu->arch.apf.msr_int_val = data; in kvm_pv_enable_async_pf_int()
3385 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK; in kvm_pv_enable_async_pf_int()
3392 kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time); in kvmclock_reset()
3393 vcpu->arch.time = 0; in kvmclock_reset()
3398 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_all()
3404 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_guest()
3423 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_current()
3431 * prior before nested VM-Enter/VM-Exit.
3445 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; in record_steal_time()
3448 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; in record_steal_time()
3452 if (kvm_xen_msr_enabled(vcpu->kvm)) { in record_steal_time()
3457 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in record_steal_time()
3460 if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm)) in record_steal_time()
3463 slots = kvm_memslots(vcpu->kvm); in record_steal_time()
3465 if (unlikely(slots->generation != ghc->generation || in record_steal_time()
3466 gpa != ghc->gpa || in record_steal_time()
3467 kvm_is_error_hva(ghc->hva) || !ghc->memslot)) { in record_steal_time()
3469 BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS); in record_steal_time()
3471 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) || in record_steal_time()
3472 kvm_is_error_hva(ghc->hva) || !ghc->memslot) in record_steal_time()
3476 st = (struct kvm_steal_time __user *)ghc->hva; in record_steal_time()
3483 int err = -EFAULT; in record_steal_time()
3494 "+m" (st->preempted)); in record_steal_time()
3500 vcpu->arch.st.preempted = 0; in record_steal_time()
3502 trace_kvm_pv_tlb_flush(vcpu->vcpu_id, in record_steal_time()
3513 unsafe_put_user(0, &st->preempted, out); in record_steal_time()
3514 vcpu->arch.st.preempted = 0; in record_steal_time()
3517 unsafe_get_user(version, &st->version, out); in record_steal_time()
3522 unsafe_put_user(version, &st->version, out); in record_steal_time()
3526 unsafe_get_user(steal, &st->steal, out); in record_steal_time()
3527 steal += current->sched_info.run_delay - in record_steal_time()
3528 vcpu->arch.st.last_steal; in record_steal_time()
3529 vcpu->arch.st.last_steal = current->sched_info.run_delay; in record_steal_time()
3530 unsafe_put_user(steal, &st->steal, out); in record_steal_time()
3533 unsafe_put_user(version, &st->version, out); in record_steal_time()
3538 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); in record_steal_time()
3544 u32 msr = msr_info->index; in kvm_set_msr_common()
3545 u64 data = msr_info->data; in kvm_set_msr_common()
3547 if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr) in kvm_set_msr_common()
3561 if (msr_info->host_initiated) in kvm_set_msr_common()
3562 vcpu->arch.microcode_version = data; in kvm_set_msr_common()
3565 if (!msr_info->host_initiated) in kvm_set_msr_common()
3567 vcpu->arch.arch_capabilities = data; in kvm_set_msr_common()
3572 if (!msr_info->host_initiated) in kvm_set_msr_common()
3579 vcpu->arch.perf_capabilities = data; in kvm_set_msr_common()
3592 vcpu->arch.msr_hwcr = data; in kvm_set_msr_common()
3606 case 0x200 ... MSR_IA32_MC0_CTL2 - 1: in kvm_set_msr_common()
3618 if (!msr_info->host_initiated) { in kvm_set_msr_common()
3619 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; in kvm_set_msr_common()
3626 vcpu->arch.ia32_tsc_adjust_msr = data; in kvm_set_msr_common()
3630 u64 old_val = vcpu->arch.ia32_misc_enable_msr; in kvm_set_msr_common()
3632 if (!msr_info->host_initiated) { in kvm_set_msr_common()
3642 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) && in kvm_set_msr_common()
3646 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
3649 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
3654 if (!msr_info->host_initiated) in kvm_set_msr_common()
3656 vcpu->arch.smbase = data; in kvm_set_msr_common()
3659 vcpu->arch.msr_ia32_power_ctl = data; in kvm_set_msr_common()
3662 if (msr_info->host_initiated) { in kvm_set_msr_common()
3665 u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset; in kvm_set_msr_common()
3667 vcpu->arch.ia32_tsc_adjust_msr += adj; in kvm_set_msr_common()
3671 if (!msr_info->host_initiated && in kvm_set_msr_common()
3681 vcpu->arch.ia32_xss = data; in kvm_set_msr_common()
3685 if (!msr_info->host_initiated) in kvm_set_msr_common()
3687 vcpu->arch.smi_count = data; in kvm_set_msr_common()
3693 vcpu->kvm->arch.wall_clock = data; in kvm_set_msr_common()
3694 kvm_write_wall_clock(vcpu->kvm, data, 0); in kvm_set_msr_common()
3700 vcpu->kvm->arch.wall_clock = data; in kvm_set_msr_common()
3701 kvm_write_wall_clock(vcpu->kvm, data, 0); in kvm_set_msr_common()
3707 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated); in kvm_set_msr_common()
3713 kvm_write_system_time(vcpu, data, true, msr_info->host_initiated); in kvm_set_msr_common()
3733 vcpu->arch.apf.pageready_pending = false; in kvm_set_msr_common()
3747 vcpu->arch.st.msr_val = data; in kvm_set_msr_common()
3768 if (data & (-1ULL << 1)) in kvm_set_msr_common()
3771 vcpu->arch.msr_kvm_poll_control = data; in kvm_set_msr_common()
3776 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: in kvm_set_msr_common()
3777 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: in kvm_set_msr_common()
3797 * all pre-dating SVM, but a recommended workaround from in kvm_set_msr_common()
3813 msr_info->host_initiated); in kvm_set_msr_common()
3815 /* Drop writes to this legacy MSR -- see rdmsr in kvm_set_msr_common()
3825 vcpu->arch.osvw.length = data; in kvm_set_msr_common()
3830 vcpu->arch.osvw.status = data; in kvm_set_msr_common()
3833 if (!msr_info->host_initiated || in kvm_set_msr_common()
3837 vcpu->arch.msr_platform_info = data; in kvm_set_msr_common()
3844 vcpu->arch.msr_misc_features_enables = data; in kvm_set_msr_common()
3848 if (!msr_info->host_initiated && in kvm_set_msr_common()
3855 fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data); in kvm_set_msr_common()
3858 if (!msr_info->host_initiated && in kvm_set_msr_common()
3865 vcpu->arch.guest_fpu.xfd_err = data; in kvm_set_msr_common()
3876 * as to-be-saved, even if an MSRs isn't fully supported. in kvm_set_msr_common()
3878 return !msr_info->host_initiated || data; in kvm_set_msr_common()
3891 u64 mcg_cap = vcpu->arch.mcg_cap; in get_msr_mce()
3901 data = vcpu->arch.mcg_cap; in get_msr_mce()
3906 data = vcpu->arch.mcg_ctl; in get_msr_mce()
3909 data = vcpu->arch.mcg_status; in get_msr_mce()
3911 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: in get_msr_mce()
3912 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1; in get_msr_mce()
3918 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2, in get_msr_mce()
3919 last_msr + 1 - MSR_IA32_MC0_CTL2); in get_msr_mce()
3920 data = vcpu->arch.mci_ctl2_banks[offset]; in get_msr_mce()
3922 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: in get_msr_mce()
3923 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1; in get_msr_mce()
3927 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL, in get_msr_mce()
3928 last_msr + 1 - MSR_IA32_MC0_CTL); in get_msr_mce()
3929 data = vcpu->arch.mce_banks[offset]; in get_msr_mce()
3940 switch (msr_info->index) { in kvm_get_msr_common()
3962 * so for existing CPU-specific MSRs. in kvm_get_msr_common()
3969 msr_info->data = 0; in kvm_get_msr_common()
3975 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
3979 * to-be-saved, even if an MSR isn't fully supported. in kvm_get_msr_common()
3981 if (!msr_info->host_initiated) in kvm_get_msr_common()
3983 msr_info->data = 0; in kvm_get_msr_common()
3989 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
3991 msr_info->data = 0; in kvm_get_msr_common()
3994 msr_info->data = vcpu->arch.microcode_version; in kvm_get_msr_common()
3997 if (!msr_info->host_initiated && in kvm_get_msr_common()
4000 msr_info->data = vcpu->arch.arch_capabilities; in kvm_get_msr_common()
4003 if (!msr_info->host_initiated && in kvm_get_msr_common()
4006 msr_info->data = vcpu->arch.perf_capabilities; in kvm_get_msr_common()
4009 msr_info->data = vcpu->arch.msr_ia32_power_ctl; in kvm_get_msr_common()
4018 * return L1's TSC value to ensure backwards-compatible in kvm_get_msr_common()
4023 if (msr_info->host_initiated) { in kvm_get_msr_common()
4024 offset = vcpu->arch.l1_tsc_offset; in kvm_get_msr_common()
4025 ratio = vcpu->arch.l1_tsc_scaling_ratio; in kvm_get_msr_common()
4027 offset = vcpu->arch.tsc_offset; in kvm_get_msr_common()
4028 ratio = vcpu->arch.tsc_scaling_ratio; in kvm_get_msr_common()
4031 msr_info->data = kvm_scale_tsc(rdtsc(), ratio) + offset; in kvm_get_msr_common()
4035 case 0x200 ... MSR_IA32_MC0_CTL2 - 1: in kvm_get_msr_common()
4037 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
4039 msr_info->data = 3; in kvm_get_msr_common()
4053 msr_info->data = 1 << 24; in kvm_get_msr_common()
4056 msr_info->data = kvm_get_apic_base(vcpu); in kvm_get_msr_common()
4059 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
4061 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); in kvm_get_msr_common()
4064 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; in kvm_get_msr_common()
4067 msr_info->data = vcpu->arch.ia32_misc_enable_msr; in kvm_get_msr_common()
4070 if (!msr_info->host_initiated) in kvm_get_msr_common()
4072 msr_info->data = vcpu->arch.smbase; in kvm_get_msr_common()
4075 msr_info->data = vcpu->arch.smi_count; in kvm_get_msr_common()
4079 msr_info->data = 1000ULL; in kvm_get_msr_common()
4081 msr_info->data |= (((uint64_t)4ULL) << 40); in kvm_get_msr_common()
4084 msr_info->data = vcpu->arch.efer; in kvm_get_msr_common()
4090 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
4096 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
4102 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
4108 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
4114 msr_info->data = vcpu->arch.apf.msr_en_val; in kvm_get_msr_common()
4120 msr_info->data = vcpu->arch.apf.msr_int_val; in kvm_get_msr_common()
4126 msr_info->data = 0; in kvm_get_msr_common()
4132 msr_info->data = vcpu->arch.st.msr_val; in kvm_get_msr_common()
4138 msr_info->data = vcpu->arch.pv_eoi.msr_val; in kvm_get_msr_common()
4144 msr_info->data = vcpu->arch.msr_kvm_poll_control; in kvm_get_msr_common()
4151 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: in kvm_get_msr_common()
4152 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: in kvm_get_msr_common()
4153 return get_msr_mce(vcpu, msr_info->index, &msr_info->data, in kvm_get_msr_common()
4154 msr_info->host_initiated); in kvm_get_msr_common()
4156 if (!msr_info->host_initiated && in kvm_get_msr_common()
4159 msr_info->data = vcpu->arch.ia32_xss; in kvm_get_msr_common()
4163 * Provide expected ramp-up count for K7. All other in kvm_get_msr_common()
4171 msr_info->data = 0x20000000; in kvm_get_msr_common()
4183 msr_info->index, &msr_info->data, in kvm_get_msr_common()
4184 msr_info->host_initiated); in kvm_get_msr_common()
4196 msr_info->data = 0xbe702111; in kvm_get_msr_common()
4201 msr_info->data = vcpu->arch.osvw.length; in kvm_get_msr_common()
4206 msr_info->data = vcpu->arch.osvw.status; in kvm_get_msr_common()
4209 if (!msr_info->host_initiated && in kvm_get_msr_common()
4210 !vcpu->kvm->arch.guest_can_read_msr_platform_info) in kvm_get_msr_common()
4212 msr_info->data = vcpu->arch.msr_platform_info; in kvm_get_msr_common()
4215 msr_info->data = vcpu->arch.msr_misc_features_enables; in kvm_get_msr_common()
4218 msr_info->data = vcpu->arch.msr_hwcr; in kvm_get_msr_common()
4222 if (!msr_info->host_initiated && in kvm_get_msr_common()
4226 msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd; in kvm_get_msr_common()
4229 if (!msr_info->host_initiated && in kvm_get_msr_common()
4233 msr_info->data = vcpu->arch.guest_fpu.xfd_err; in kvm_get_msr_common()
4237 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
4257 for (i = 0; i < msrs->nmsrs; ++i) in __msr_io()
4279 r = -EFAULT; in msr_io()
4283 r = -E2BIG; in msr_io()
4288 entries = memdup_user(user_msrs->entries, size); in msr_io()
4298 r = -EFAULT; in msr_io()
4299 if (writeback && copy_to_user(user_msrs->entries, entries, size)) in msr_io()
4323 r = -EFAULT; in kvm_ioctl_get_supported_hv_cpuid()
4327 r = kvm_get_hv_cpuid(vcpu, &cpuid, cpuid_arg->entries); in kvm_ioctl_get_supported_hv_cpuid()
4331 r = -EFAULT; in kvm_ioctl_get_supported_hv_cpuid()
4480 r = kvm_x86_ops.nested_ops->get_state ? in kvm_vm_ioctl_check_extension()
4481 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0; in kvm_vm_ioctl_check_extension()
4487 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL; in kvm_vm_ioctl_check_extension()
4527 void __user *uaddr = (void __user*)(unsigned long)attr->addr; in kvm_get_attr_addr()
4529 if ((u64)(unsigned long)uaddr != attr->addr) in kvm_get_attr_addr()
4530 return ERR_PTR_USR(-EFAULT); in kvm_get_attr_addr()
4538 if (attr->group) in kvm_x86_dev_get_attr()
4539 return -ENXIO; in kvm_x86_dev_get_attr()
4544 switch (attr->attr) { in kvm_x86_dev_get_attr()
4547 return -EFAULT; in kvm_x86_dev_get_attr()
4550 return -ENXIO; in kvm_x86_dev_get_attr()
4557 if (attr->group) in kvm_x86_dev_has_attr()
4558 return -ENXIO; in kvm_x86_dev_has_attr()
4560 switch (attr->attr) { in kvm_x86_dev_has_attr()
4564 return -ENXIO; in kvm_x86_dev_has_attr()
4580 r = -EFAULT; in kvm_arch_dev_ioctl()
4587 r = -E2BIG; in kvm_arch_dev_ioctl()
4590 r = -EFAULT; in kvm_arch_dev_ioctl()
4591 if (copy_to_user(user_msr_list->indices, &msrs_to_save, in kvm_arch_dev_ioctl()
4594 if (copy_to_user(user_msr_list->indices + num_msrs_to_save, in kvm_arch_dev_ioctl()
4606 r = -EFAULT; in kvm_arch_dev_ioctl()
4610 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, in kvm_arch_dev_ioctl()
4615 r = -EFAULT; in kvm_arch_dev_ioctl()
4622 r = -EFAULT; in kvm_arch_dev_ioctl()
4633 r = -EFAULT; in kvm_arch_dev_ioctl()
4640 r = -E2BIG; in kvm_arch_dev_ioctl()
4643 r = -EFAULT; in kvm_arch_dev_ioctl()
4644 if (copy_to_user(user_msr_list->indices, &msr_based_features, in kvm_arch_dev_ioctl()
4658 r = -EFAULT; in kvm_arch_dev_ioctl()
4666 r = -EFAULT; in kvm_arch_dev_ioctl()
4673 r = -EINVAL; in kvm_arch_dev_ioctl()
4687 return kvm_arch_has_noncoherent_dma(vcpu->kvm); in need_emulate_wbinvd()
4695 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_load()
4696 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) in kvm_arch_vcpu_load()
4697 smp_call_function_single(vcpu->cpu, in kvm_arch_vcpu_load()
4704 vcpu->arch.host_pkru = read_pkru(); in kvm_arch_vcpu_load()
4707 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { in kvm_arch_vcpu_load()
4708 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); in kvm_arch_vcpu_load()
4709 vcpu->arch.tsc_offset_adjustment = 0; in kvm_arch_vcpu_load()
4713 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) { in kvm_arch_vcpu_load()
4714 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : in kvm_arch_vcpu_load()
4715 rdtsc() - vcpu->arch.last_host_tsc; in kvm_arch_vcpu_load()
4721 vcpu->arch.last_guest_tsc); in kvm_arch_vcpu_load()
4723 vcpu->arch.tsc_catchup = 1; in kvm_arch_vcpu_load()
4731 * kvmclock on vcpu->cpu migration in kvm_arch_vcpu_load()
4733 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) in kvm_arch_vcpu_load()
4735 if (vcpu->cpu != cpu) in kvm_arch_vcpu_load()
4737 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
4745 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; in kvm_steal_time_set_preempted()
4749 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; in kvm_steal_time_set_preempted()
4752 * The vCPU can be marked preempted if and only if the VM-Exit was on in kvm_steal_time_set_preempted()
4756 * preempted if and only if the VM-Exit was due to a host interrupt. in kvm_steal_time_set_preempted()
4758 if (!vcpu->arch.at_instruction_boundary) { in kvm_steal_time_set_preempted()
4759 vcpu->stat.preemption_other++; in kvm_steal_time_set_preempted()
4763 vcpu->stat.preemption_reported++; in kvm_steal_time_set_preempted()
4764 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in kvm_steal_time_set_preempted()
4767 if (vcpu->arch.st.preempted) in kvm_steal_time_set_preempted()
4771 if (unlikely(current->mm != vcpu->kvm->mm)) in kvm_steal_time_set_preempted()
4774 slots = kvm_memslots(vcpu->kvm); in kvm_steal_time_set_preempted()
4776 if (unlikely(slots->generation != ghc->generation || in kvm_steal_time_set_preempted()
4777 gpa != ghc->gpa || in kvm_steal_time_set_preempted()
4778 kvm_is_error_hva(ghc->hva) || !ghc->memslot)) in kvm_steal_time_set_preempted()
4781 st = (struct kvm_steal_time __user *)ghc->hva; in kvm_steal_time_set_preempted()
4782 BUILD_BUG_ON(sizeof(st->preempted) != sizeof(preempted)); in kvm_steal_time_set_preempted()
4784 if (!copy_to_user_nofault(&st->preempted, &preempted, sizeof(preempted))) in kvm_steal_time_set_preempted()
4785 vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; in kvm_steal_time_set_preempted()
4787 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); in kvm_steal_time_set_preempted()
4794 if (vcpu->preempted) { in kvm_arch_vcpu_put()
4795 if (!vcpu->arch.guest_state_protected) in kvm_arch_vcpu_put()
4796 vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); in kvm_arch_vcpu_put()
4802 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_put()
4803 if (kvm_xen_msr_enabled(vcpu->kvm)) in kvm_arch_vcpu_put()
4807 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_put()
4811 vcpu->arch.last_host_tsc = rdtsc(); in kvm_arch_vcpu_put()
4858 * instruction boundary and with no events half-injected. in kvm_vcpu_ready_for_interrupt_injection()
4869 if (irq->irq >= KVM_NR_INTERRUPTS) in kvm_vcpu_ioctl_interrupt()
4870 return -EINVAL; in kvm_vcpu_ioctl_interrupt()
4872 if (!irqchip_in_kernel(vcpu->kvm)) { in kvm_vcpu_ioctl_interrupt()
4873 kvm_queue_interrupt(vcpu, irq->irq, false); in kvm_vcpu_ioctl_interrupt()
4879 * With in-kernel LAPIC, we only use this to inject EXTINT, so in kvm_vcpu_ioctl_interrupt()
4880 * fail for in-kernel 8259. in kvm_vcpu_ioctl_interrupt()
4882 if (pic_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_interrupt()
4883 return -ENXIO; in kvm_vcpu_ioctl_interrupt()
4885 if (vcpu->arch.pending_external_vector != -1) in kvm_vcpu_ioctl_interrupt()
4886 return -EEXIST; in kvm_vcpu_ioctl_interrupt()
4888 vcpu->arch.pending_external_vector = irq->irq; in kvm_vcpu_ioctl_interrupt()
4910 if (tac->flags) in vcpu_ioctl_tpr_access_reporting()
4911 return -EINVAL; in vcpu_ioctl_tpr_access_reporting()
4912 vcpu->arch.tpr_access_reporting = !!tac->enabled; in vcpu_ioctl_tpr_access_reporting()
4922 r = -EINVAL; in kvm_vcpu_ioctl_x86_setup_mce()
4928 vcpu->arch.mcg_cap = mcg_cap; in kvm_vcpu_ioctl_x86_setup_mce()
4931 vcpu->arch.mcg_ctl = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
4934 vcpu->arch.mce_banks[bank*4] = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
4936 vcpu->arch.mci_ctl2_banks[bank] = 0; in kvm_vcpu_ioctl_x86_setup_mce()
4949 * - none of the bits for Machine Check Exceptions are set
4950 * - both the VAL (valid) and UC (uncorrectable) bits are set
4951 * MCI_STATUS_PCC - Processor Context Corrupted
4952 * MCI_STATUS_S - Signaled as a Machine Check Exception
4953 * MCI_STATUS_AR - Software recoverable Action Required
4957 return !mce->mcg_status && in is_ucna()
4958 !(mce->status & (MCI_STATUS_PCC | MCI_STATUS_S | MCI_STATUS_AR)) && in is_ucna()
4959 (mce->status & MCI_STATUS_VAL) && in is_ucna()
4960 (mce->status & MCI_STATUS_UC); in is_ucna()
4965 u64 mcg_cap = vcpu->arch.mcg_cap; in kvm_vcpu_x86_set_ucna()
4967 banks[1] = mce->status; in kvm_vcpu_x86_set_ucna()
4968 banks[2] = mce->addr; in kvm_vcpu_x86_set_ucna()
4969 banks[3] = mce->misc; in kvm_vcpu_x86_set_ucna()
4970 vcpu->arch.mcg_status = mce->mcg_status; in kvm_vcpu_x86_set_ucna()
4973 !(vcpu->arch.mci_ctl2_banks[mce->bank] & MCI_CTL2_CMCI_EN)) in kvm_vcpu_x86_set_ucna()
4977 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTCMCI); in kvm_vcpu_x86_set_ucna()
4985 u64 mcg_cap = vcpu->arch.mcg_cap; in kvm_vcpu_ioctl_x86_set_mce()
4987 u64 *banks = vcpu->arch.mce_banks; in kvm_vcpu_ioctl_x86_set_mce()
4989 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) in kvm_vcpu_ioctl_x86_set_mce()
4990 return -EINVAL; in kvm_vcpu_ioctl_x86_set_mce()
4992 banks += array_index_nospec(4 * mce->bank, 4 * bank_num); in kvm_vcpu_ioctl_x86_set_mce()
5001 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && in kvm_vcpu_ioctl_x86_set_mce()
5002 vcpu->arch.mcg_ctl != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
5008 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
5010 if (mce->status & MCI_STATUS_UC) { in kvm_vcpu_ioctl_x86_set_mce()
5011 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || in kvm_vcpu_ioctl_x86_set_mce()
5017 mce->status |= MCI_STATUS_OVER; in kvm_vcpu_ioctl_x86_set_mce()
5018 banks[2] = mce->addr; in kvm_vcpu_ioctl_x86_set_mce()
5019 banks[3] = mce->misc; in kvm_vcpu_ioctl_x86_set_mce()
5020 vcpu->arch.mcg_status = mce->mcg_status; in kvm_vcpu_ioctl_x86_set_mce()
5021 banks[1] = mce->status; in kvm_vcpu_ioctl_x86_set_mce()
5026 mce->status |= MCI_STATUS_OVER; in kvm_vcpu_ioctl_x86_set_mce()
5027 banks[2] = mce->addr; in kvm_vcpu_ioctl_x86_set_mce()
5028 banks[3] = mce->misc; in kvm_vcpu_ioctl_x86_set_mce()
5029 banks[1] = mce->status; in kvm_vcpu_ioctl_x86_set_mce()
5048 * non-exiting _injected_ exception, and a pending exiting exception. in kvm_vcpu_ioctl_x86_get_vcpu_events()
5049 * In that case, ignore the VM-Exiting exception as it's an extension in kvm_vcpu_ioctl_x86_get_vcpu_events()
5052 if (vcpu->arch.exception_vmexit.pending && in kvm_vcpu_ioctl_x86_get_vcpu_events()
5053 !vcpu->arch.exception.pending && in kvm_vcpu_ioctl_x86_get_vcpu_events()
5054 !vcpu->arch.exception.injected) in kvm_vcpu_ioctl_x86_get_vcpu_events()
5055 ex = &vcpu->arch.exception_vmexit; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5057 ex = &vcpu->arch.exception; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5062 * intercepts #PF, ditto for DR6 and #DBs. If the per-VM capability, in kvm_vcpu_ioctl_x86_get_vcpu_events()
5067 if (!vcpu->kvm->arch.exception_payload_enabled && in kvm_vcpu_ioctl_x86_get_vcpu_events()
5068 ex->pending && ex->has_payload) in kvm_vcpu_ioctl_x86_get_vcpu_events()
5077 if (kvm_exception_is_soft(ex->vector)) { in kvm_vcpu_ioctl_x86_get_vcpu_events()
5078 events->exception.injected = 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5079 events->exception.pending = 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5081 events->exception.injected = ex->injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5082 events->exception.pending = ex->pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5088 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
5089 events->exception.injected |= ex->pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5091 events->exception.nr = ex->vector; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5092 events->exception.has_error_code = ex->has_error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5093 events->exception.error_code = ex->error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5094 events->exception_has_payload = ex->has_payload; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5095 events->exception_payload = ex->payload; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5097 events->interrupt.injected = in kvm_vcpu_ioctl_x86_get_vcpu_events()
5098 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5099 events->interrupt.nr = vcpu->arch.interrupt.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5100 events->interrupt.soft = 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5101 events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5103 events->nmi.injected = vcpu->arch.nmi_injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5104 events->nmi.pending = vcpu->arch.nmi_pending != 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5105 events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5106 events->nmi.pad = 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5108 events->sipi_vector = 0; /* never valid when reporting to user space */ in kvm_vcpu_ioctl_x86_get_vcpu_events()
5110 events->smi.smm = is_smm(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5111 events->smi.pending = vcpu->arch.smi_pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5112 events->smi.smm_inside_nmi = in kvm_vcpu_ioctl_x86_get_vcpu_events()
5113 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5114 events->smi.latched_init = kvm_lapic_latched_init(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5116 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING in kvm_vcpu_ioctl_x86_get_vcpu_events()
5119 if (vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
5120 events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5121 if (vcpu->kvm->arch.triple_fault_event) { in kvm_vcpu_ioctl_x86_get_vcpu_events()
5122 events->triple_fault.pending = kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5123 events->flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5126 memset(&events->reserved, 0, sizeof(events->reserved)); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5134 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING in kvm_vcpu_ioctl_x86_set_vcpu_events()
5140 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5142 if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
5143 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5144 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5145 if (events->exception.pending) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5146 events->exception.injected = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5148 events->exception_has_payload = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5150 events->exception.pending = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5151 events->exception_has_payload = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5154 if ((events->exception.injected || events->exception.pending) && in kvm_vcpu_ioctl_x86_set_vcpu_events()
5155 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5156 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5159 if (events->flags & KVM_VCPUEVENT_VALID_SMM && in kvm_vcpu_ioctl_x86_set_vcpu_events()
5160 (events->smi.smm || events->smi.pending) && in kvm_vcpu_ioctl_x86_set_vcpu_events()
5161 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5162 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5168 * morph the exception to a VM-Exit if appropriate. Do this only for in kvm_vcpu_ioctl_x86_set_vcpu_events()
5169 * pending exceptions, already-injected exceptions are not subject to in kvm_vcpu_ioctl_x86_set_vcpu_events()
5172 * pending exception, which in turn may cause a spurious VM-Exit. in kvm_vcpu_ioctl_x86_set_vcpu_events()
5174 vcpu->arch.exception_from_userspace = events->exception.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5176 vcpu->arch.exception_vmexit.pending = false; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5178 vcpu->arch.exception.injected = events->exception.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5179 vcpu->arch.exception.pending = events->exception.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5180 vcpu->arch.exception.vector = events->exception.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5181 vcpu->arch.exception.has_error_code = events->exception.has_error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5182 vcpu->arch.exception.error_code = events->exception.error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5183 vcpu->arch.exception.has_payload = events->exception_has_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5184 vcpu->arch.exception.payload = events->exception_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5186 vcpu->arch.interrupt.injected = events->interrupt.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5187 vcpu->arch.interrupt.nr = events->interrupt.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5188 vcpu->arch.interrupt.soft = events->interrupt.soft; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5189 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5191 events->interrupt.shadow); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5193 vcpu->arch.nmi_injected = events->nmi.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5194 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5195 vcpu->arch.nmi_pending = events->nmi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5196 static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5198 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && in kvm_vcpu_ioctl_x86_set_vcpu_events()
5200 vcpu->arch.apic->sipi_vector = events->sipi_vector; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5202 if (events->flags & KVM_VCPUEVENT_VALID_SMM) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
5203 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
5205 kvm_smm_changed(vcpu, events->smi.smm); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5208 vcpu->arch.smi_pending = events->smi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5210 if (events->smi.smm) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
5211 if (events->smi.smm_inside_nmi) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5212 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5214 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5218 if (events->smi.latched_init) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5219 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5221 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5225 if (events->flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
5226 if (!vcpu->kvm->arch.triple_fault_event) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5227 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5228 if (events->triple_fault.pending) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5244 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_get_debugregs()
5246 dbgregs->dr6 = val; in kvm_vcpu_ioctl_x86_get_debugregs()
5247 dbgregs->dr7 = vcpu->arch.dr7; in kvm_vcpu_ioctl_x86_get_debugregs()
5248 dbgregs->flags = 0; in kvm_vcpu_ioctl_x86_get_debugregs()
5249 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); in kvm_vcpu_ioctl_x86_get_debugregs()
5255 if (dbgregs->flags) in kvm_vcpu_ioctl_x86_set_debugregs()
5256 return -EINVAL; in kvm_vcpu_ioctl_x86_set_debugregs()
5258 if (!kvm_dr6_valid(dbgregs->dr6)) in kvm_vcpu_ioctl_x86_set_debugregs()
5259 return -EINVAL; in kvm_vcpu_ioctl_x86_set_debugregs()
5260 if (!kvm_dr7_valid(dbgregs->dr7)) in kvm_vcpu_ioctl_x86_set_debugregs()
5261 return -EINVAL; in kvm_vcpu_ioctl_x86_set_debugregs()
5263 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_set_debugregs()
5265 vcpu->arch.dr6 = dbgregs->dr6; in kvm_vcpu_ioctl_x86_set_debugregs()
5266 vcpu->arch.dr7 = dbgregs->dr7; in kvm_vcpu_ioctl_x86_set_debugregs()
5275 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_vcpu_ioctl_x86_get_xsave()
5278 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, in kvm_vcpu_ioctl_x86_get_xsave()
5279 guest_xsave->region, in kvm_vcpu_ioctl_x86_get_xsave()
5280 sizeof(guest_xsave->region), in kvm_vcpu_ioctl_x86_get_xsave()
5281 vcpu->arch.pkru); in kvm_vcpu_ioctl_x86_get_xsave()
5287 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_vcpu_ioctl_x86_get_xsave2()
5290 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, in kvm_vcpu_ioctl_x86_get_xsave2()
5291 state, size, vcpu->arch.pkru); in kvm_vcpu_ioctl_x86_get_xsave2()
5297 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_vcpu_ioctl_x86_set_xsave()
5300 return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu, in kvm_vcpu_ioctl_x86_set_xsave()
5301 guest_xsave->region, in kvm_vcpu_ioctl_x86_set_xsave()
5303 &vcpu->arch.pkru); in kvm_vcpu_ioctl_x86_set_xsave()
5310 guest_xcrs->nr_xcrs = 0; in kvm_vcpu_ioctl_x86_get_xcrs()
5314 guest_xcrs->nr_xcrs = 1; in kvm_vcpu_ioctl_x86_get_xcrs()
5315 guest_xcrs->flags = 0; in kvm_vcpu_ioctl_x86_get_xcrs()
5316 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; in kvm_vcpu_ioctl_x86_get_xcrs()
5317 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; in kvm_vcpu_ioctl_x86_get_xcrs()
5326 return -EINVAL; in kvm_vcpu_ioctl_x86_set_xcrs()
5328 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) in kvm_vcpu_ioctl_x86_set_xcrs()
5329 return -EINVAL; in kvm_vcpu_ioctl_x86_set_xcrs()
5331 for (i = 0; i < guest_xcrs->nr_xcrs; i++) in kvm_vcpu_ioctl_x86_set_xcrs()
5333 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { in kvm_vcpu_ioctl_x86_set_xcrs()
5335 guest_xcrs->xcrs[i].value); in kvm_vcpu_ioctl_x86_set_xcrs()
5339 r = -EINVAL; in kvm_vcpu_ioctl_x86_set_xcrs()
5351 if (!vcpu->arch.pv_time.active) in kvm_set_guest_paused()
5352 return -EINVAL; in kvm_set_guest_paused()
5353 vcpu->arch.pvclock_set_guest_stopped_request = true; in kvm_set_guest_paused()
5363 switch (attr->attr) { in kvm_arch_tsc_has_attr()
5368 r = -ENXIO; in kvm_arch_tsc_has_attr()
5383 switch (attr->attr) { in kvm_arch_tsc_get_attr()
5385 r = -EFAULT; in kvm_arch_tsc_get_attr()
5386 if (put_user(vcpu->arch.l1_tsc_offset, uaddr)) in kvm_arch_tsc_get_attr()
5391 r = -ENXIO; in kvm_arch_tsc_get_attr()
5401 struct kvm *kvm = vcpu->kvm; in kvm_arch_tsc_set_attr()
5407 switch (attr->attr) { in kvm_arch_tsc_set_attr()
5413 r = -EFAULT; in kvm_arch_tsc_set_attr()
5417 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); in kvm_arch_tsc_set_attr()
5419 matched = (vcpu->arch.virtual_tsc_khz && in kvm_arch_tsc_set_attr()
5420 kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz && in kvm_arch_tsc_set_attr()
5421 kvm->arch.last_tsc_offset == offset); in kvm_arch_tsc_set_attr()
5423 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset; in kvm_arch_tsc_set_attr()
5427 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); in kvm_arch_tsc_set_attr()
5433 r = -ENXIO; in kvm_arch_tsc_set_attr()
5447 return -EFAULT; in kvm_vcpu_ioctl_device_attr()
5450 return -ENXIO; in kvm_vcpu_ioctl_device_attr()
5474 if (cap->flags) in kvm_vcpu_ioctl_enable_cap()
5475 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
5477 switch (cap->cap) { in kvm_vcpu_ioctl_enable_cap()
5479 if (cap->args[0]) in kvm_vcpu_ioctl_enable_cap()
5480 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
5484 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_enable_cap()
5485 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
5486 return kvm_hv_activate_synic(vcpu, cap->cap == in kvm_vcpu_ioctl_enable_cap()
5489 if (!kvm_x86_ops.nested_ops->enable_evmcs) in kvm_vcpu_ioctl_enable_cap()
5490 return -ENOTTY; in kvm_vcpu_ioctl_enable_cap()
5491 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); in kvm_vcpu_ioctl_enable_cap()
5493 user_ptr = (void __user *)(uintptr_t)cap->args[0]; in kvm_vcpu_ioctl_enable_cap()
5496 r = -EFAULT; in kvm_vcpu_ioctl_enable_cap()
5501 return -ENOTTY; in kvm_vcpu_ioctl_enable_cap()
5506 return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]); in kvm_vcpu_ioctl_enable_cap()
5509 vcpu->arch.pv_cpuid.enforce = cap->args[0]; in kvm_vcpu_ioctl_enable_cap()
5510 if (vcpu->arch.pv_cpuid.enforce) in kvm_vcpu_ioctl_enable_cap()
5515 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
5522 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
5538 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5544 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
5550 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5557 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5572 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5590 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5593 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
5600 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5604 cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
5611 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5615 cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
5618 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5625 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5627 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5631 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5633 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5639 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5645 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5655 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5658 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5661 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5663 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5669 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5678 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5689 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5698 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5710 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5720 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5729 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5730 if (vcpu->arch.guest_fpu.uabi_size > sizeof(struct kvm_xsave)) in kvm_arch_vcpu_ioctl()
5734 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
5740 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5747 int size = vcpu->arch.guest_fpu.uabi_size; in kvm_arch_vcpu_ioctl()
5760 int size = vcpu->arch.guest_fpu.uabi_size; in kvm_arch_vcpu_ioctl()
5763 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
5769 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5779 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
5785 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5805 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5821 r = vcpu->arch.virtual_tsc_khz; in kvm_arch_vcpu_ioctl()
5831 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5841 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5842 if (!kvm_x86_ops.nested_ops->get_state) in kvm_arch_vcpu_ioctl()
5845 BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size)); in kvm_arch_vcpu_ioctl()
5846 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5847 if (get_user(user_data_size, &user_kvm_nested_state->size)) in kvm_arch_vcpu_ioctl()
5850 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state, in kvm_arch_vcpu_ioctl()
5856 if (put_user(r, &user_kvm_nested_state->size)) in kvm_arch_vcpu_ioctl()
5857 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5859 r = -E2BIG; in kvm_arch_vcpu_ioctl()
5871 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5872 if (!kvm_x86_ops.nested_ops->set_state) in kvm_arch_vcpu_ioctl()
5875 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5879 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5894 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5895 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state); in kvm_arch_vcpu_ioctl()
5896 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5906 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5911 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5917 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5926 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
5930 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5952 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5970 if (addr > (unsigned int)(-3 * PAGE_SIZE)) in kvm_vm_ioctl_set_tss_addr()
5971 return -EINVAL; in kvm_vm_ioctl_set_tss_addr()
5986 return -EINVAL; in kvm_vm_ioctl_set_nr_mmu_pages()
5988 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_set_nr_mmu_pages()
5991 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; in kvm_vm_ioctl_set_nr_mmu_pages()
5993 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_set_nr_mmu_pages()
5999 return kvm->arch.n_max_mmu_pages; in kvm_vm_ioctl_get_nr_mmu_pages()
6004 struct kvm_pic *pic = kvm->arch.vpic; in kvm_vm_ioctl_get_irqchip() local
6008 switch (chip->chip_id) { in kvm_vm_ioctl_get_irqchip()
6010 memcpy(&chip->chip.pic, &pic->pics[0], in kvm_vm_ioctl_get_irqchip()
6014 memcpy(&chip->chip.pic, &pic->pics[1], in kvm_vm_ioctl_get_irqchip()
6018 kvm_get_ioapic(kvm, &chip->chip.ioapic); in kvm_vm_ioctl_get_irqchip()
6021 r = -EINVAL; in kvm_vm_ioctl_get_irqchip()
6029 struct kvm_pic *pic = kvm->arch.vpic; in kvm_vm_ioctl_set_irqchip() local
6033 switch (chip->chip_id) { in kvm_vm_ioctl_set_irqchip()
6035 spin_lock(&pic->lock); in kvm_vm_ioctl_set_irqchip()
6036 memcpy(&pic->pics[0], &chip->chip.pic, in kvm_vm_ioctl_set_irqchip()
6038 spin_unlock(&pic->lock); in kvm_vm_ioctl_set_irqchip()
6041 spin_lock(&pic->lock); in kvm_vm_ioctl_set_irqchip()
6042 memcpy(&pic->pics[1], &chip->chip.pic, in kvm_vm_ioctl_set_irqchip()
6044 spin_unlock(&pic->lock); in kvm_vm_ioctl_set_irqchip()
6047 kvm_set_ioapic(kvm, &chip->chip.ioapic); in kvm_vm_ioctl_set_irqchip()
6050 r = -EINVAL; in kvm_vm_ioctl_set_irqchip()
6053 kvm_pic_update_irq(pic); in kvm_vm_ioctl_set_irqchip()
6059 struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state; in kvm_vm_ioctl_get_pit()
6061 BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels)); in kvm_vm_ioctl_get_pit()
6063 mutex_lock(&kps->lock); in kvm_vm_ioctl_get_pit()
6064 memcpy(ps, &kps->channels, sizeof(*ps)); in kvm_vm_ioctl_get_pit()
6065 mutex_unlock(&kps->lock); in kvm_vm_ioctl_get_pit()
6072 struct kvm_pit *pit = kvm->arch.vpit; in kvm_vm_ioctl_set_pit()
6074 mutex_lock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit()
6075 memcpy(&pit->pit_state.channels, ps, sizeof(*ps)); in kvm_vm_ioctl_set_pit()
6077 kvm_pit_load_count(pit, i, ps->channels[i].count, 0); in kvm_vm_ioctl_set_pit()
6078 mutex_unlock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit()
6084 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit2()
6085 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, in kvm_vm_ioctl_get_pit2()
6086 sizeof(ps->channels)); in kvm_vm_ioctl_get_pit2()
6087 ps->flags = kvm->arch.vpit->pit_state.flags; in kvm_vm_ioctl_get_pit2()
6088 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit2()
6089 memset(&ps->reserved, 0, sizeof(ps->reserved)); in kvm_vm_ioctl_get_pit2()
6098 struct kvm_pit *pit = kvm->arch.vpit; in kvm_vm_ioctl_set_pit2()
6100 mutex_lock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit2()
6101 prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; in kvm_vm_ioctl_set_pit2()
6102 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; in kvm_vm_ioctl_set_pit2()
6105 memcpy(&pit->pit_state.channels, &ps->channels, in kvm_vm_ioctl_set_pit2()
6106 sizeof(pit->pit_state.channels)); in kvm_vm_ioctl_set_pit2()
6107 pit->pit_state.flags = ps->flags; in kvm_vm_ioctl_set_pit2()
6109 kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count, in kvm_vm_ioctl_set_pit2()
6111 mutex_unlock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit2()
6118 struct kvm_pit *pit = kvm->arch.vpit; in kvm_vm_ioctl_reinject()
6120 /* pit->pit_state.lock was overloaded to prevent userspace from getting in kvm_vm_ioctl_reinject()
6124 mutex_lock(&pit->pit_state.lock); in kvm_vm_ioctl_reinject()
6125 kvm_pit_set_reinject(pit, control->pit_reinject); in kvm_vm_ioctl_reinject()
6126 mutex_unlock(&pit->pit_state.lock); in kvm_vm_ioctl_reinject()
6137 * on all VM-Exits, thus we only need to kick running vCPUs to force a in kvm_arch_sync_dirty_log()
6138 * VM-Exit. in kvm_arch_sync_dirty_log()
6151 return -ENXIO; in kvm_vm_ioctl_irq_line()
6153 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, in kvm_vm_ioctl_irq_line()
6154 irq_event->irq, irq_event->level, in kvm_vm_ioctl_irq_line()
6164 if (cap->flags) in kvm_vm_ioctl_enable_cap()
6165 return -EINVAL; in kvm_vm_ioctl_enable_cap()
6167 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
6169 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6170 if (cap->args[0] & ~KVM_X86_VALID_QUIRKS) in kvm_vm_ioctl_enable_cap()
6174 kvm->arch.disabled_quirks = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6178 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6179 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6180 if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS) in kvm_vm_ioctl_enable_cap()
6182 r = -EEXIST; in kvm_vm_ioctl_enable_cap()
6185 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
6192 kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT; in kvm_vm_ioctl_enable_cap()
6193 kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6197 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6201 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6202 if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS) in kvm_vm_ioctl_enable_cap()
6205 if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS) in kvm_vm_ioctl_enable_cap()
6206 kvm->arch.x2apic_format = true; in kvm_vm_ioctl_enable_cap()
6207 if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) in kvm_vm_ioctl_enable_cap()
6208 kvm->arch.x2apic_broadcast_quirk_disabled = true; in kvm_vm_ioctl_enable_cap()
6213 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6214 if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS) in kvm_vm_ioctl_enable_cap()
6217 if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) && in kvm_vm_ioctl_enable_cap()
6219 kvm->arch.mwait_in_guest = true; in kvm_vm_ioctl_enable_cap()
6220 if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) in kvm_vm_ioctl_enable_cap()
6221 kvm->arch.hlt_in_guest = true; in kvm_vm_ioctl_enable_cap()
6222 if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE) in kvm_vm_ioctl_enable_cap()
6223 kvm->arch.pause_in_guest = true; in kvm_vm_ioctl_enable_cap()
6224 if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE) in kvm_vm_ioctl_enable_cap()
6225 kvm->arch.cstate_in_guest = true; in kvm_vm_ioctl_enable_cap()
6229 kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6233 kvm->arch.exception_payload_enabled = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6237 kvm->arch.triple_fault_event = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6241 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6242 if (cap->args[0] & ~(KVM_MSR_EXIT_REASON_INVAL | in kvm_vm_ioctl_enable_cap()
6246 kvm->arch.user_space_msr_mask = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6250 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6251 if (cap->args[0] & ~KVM_BUS_LOCK_DETECTION_VALID_MODE) in kvm_vm_ioctl_enable_cap()
6254 if ((cap->args[0] & KVM_BUS_LOCK_DETECTION_OFF) && in kvm_vm_ioctl_enable_cap()
6255 (cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT)) in kvm_vm_ioctl_enable_cap()
6259 cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT) in kvm_vm_ioctl_enable_cap()
6260 kvm->arch.bus_lock_detection_enabled = true; in kvm_vm_ioctl_enable_cap()
6267 r = sgx_set_attribute(&allowed_attributes, cap->args[0]); in kvm_vm_ioctl_enable_cap()
6274 kvm->arch.sgx_provisioning_allowed = true; in kvm_vm_ioctl_enable_cap()
6276 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6281 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6285 r = static_call(kvm_x86_vm_copy_enc_context_from)(kvm, cap->args[0]); in kvm_vm_ioctl_enable_cap()
6288 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6292 r = static_call(kvm_x86_vm_move_enc_context_from)(kvm, cap->args[0]); in kvm_vm_ioctl_enable_cap()
6295 if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) { in kvm_vm_ioctl_enable_cap()
6296 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6299 kvm->arch.hypercall_exit_enabled = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6303 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6304 if (cap->args[0] & ~1) in kvm_vm_ioctl_enable_cap()
6306 kvm->arch.exit_on_emulation_error = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6310 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6311 if (!enable_pmu || (cap->args[0] & ~KVM_CAP_PMU_VALID_MASK)) in kvm_vm_ioctl_enable_cap()
6314 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6315 if (!kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
6316 kvm->arch.enable_pmu = !(cap->args[0] & KVM_PMU_CAP_DISABLE); in kvm_vm_ioctl_enable_cap()
6319 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6322 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6323 if (cap->args[0] > KVM_MAX_VCPU_IDS) in kvm_vm_ioctl_enable_cap()
6326 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6327 if (kvm->arch.max_vcpu_ids == cap->args[0]) { in kvm_vm_ioctl_enable_cap()
6329 } else if (!kvm->arch.max_vcpu_ids) { in kvm_vm_ioctl_enable_cap()
6330 kvm->arch.max_vcpu_ids = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6333 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6336 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6337 if ((u32)cap->args[0] & ~KVM_X86_NOTIFY_VMEXIT_VALID_BITS) in kvm_vm_ioctl_enable_cap()
6341 if (!((u32)cap->args[0] & KVM_X86_NOTIFY_VMEXIT_ENABLED)) in kvm_vm_ioctl_enable_cap()
6343 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6344 if (!kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
6345 kvm->arch.notify_window = cap->args[0] >> 32; in kvm_vm_ioctl_enable_cap()
6346 kvm->arch.notify_vmexit_flags = (u32)cap->args[0]; in kvm_vm_ioctl_enable_cap()
6349 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6352 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6366 r = -EPERM; in kvm_vm_ioctl_enable_cap()
6370 if (cap->args[0]) in kvm_vm_ioctl_enable_cap()
6373 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6374 if (!kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
6375 kvm->arch.disable_nx_huge_pages = true; in kvm_vm_ioctl_enable_cap()
6378 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
6381 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
6395 msr_filter->default_allow = default_allow; in kvm_alloc_msr_filter()
6406 for (i = 0; i < msr_filter->count; i++) in kvm_free_msr_filter()
6407 kfree(msr_filter->ranges[i].bitmap); in kvm_free_msr_filter()
6418 if (!user_range->nmsrs) in kvm_add_msr_filter()
6421 if (user_range->flags & ~(KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE)) in kvm_add_msr_filter()
6422 return -EINVAL; in kvm_add_msr_filter()
6424 if (!user_range->flags) in kvm_add_msr_filter()
6425 return -EINVAL; in kvm_add_msr_filter()
6427 bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long); in kvm_add_msr_filter()
6429 return -EINVAL; in kvm_add_msr_filter()
6431 bitmap = memdup_user((__user u8*)user_range->bitmap, bitmap_size); in kvm_add_msr_filter()
6435 msr_filter->ranges[msr_filter->count] = (struct msr_bitmap_range) { in kvm_add_msr_filter()
6436 .flags = user_range->flags, in kvm_add_msr_filter()
6437 .base = user_range->base, in kvm_add_msr_filter()
6438 .nmsrs = user_range->nmsrs, in kvm_add_msr_filter()
6442 msr_filter->count++; in kvm_add_msr_filter()
6455 if (filter->flags & ~KVM_MSR_FILTER_DEFAULT_DENY) in kvm_vm_ioctl_set_msr_filter()
6456 return -EINVAL; in kvm_vm_ioctl_set_msr_filter()
6458 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) in kvm_vm_ioctl_set_msr_filter()
6459 empty &= !filter->ranges[i].nmsrs; in kvm_vm_ioctl_set_msr_filter()
6461 default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY); in kvm_vm_ioctl_set_msr_filter()
6463 return -EINVAL; in kvm_vm_ioctl_set_msr_filter()
6467 return -ENOMEM; in kvm_vm_ioctl_set_msr_filter()
6469 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) { in kvm_vm_ioctl_set_msr_filter()
6470 r = kvm_add_msr_filter(new_filter, &filter->ranges[i]); in kvm_vm_ioctl_set_msr_filter()
6477 mutex_lock(&kvm->lock); in kvm_vm_ioctl_set_msr_filter()
6479 /* The per-VM filter is protected by kvm->lock... */ in kvm_vm_ioctl_set_msr_filter()
6480 old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1); in kvm_vm_ioctl_set_msr_filter()
6482 rcu_assign_pointer(kvm->arch.msr_filter, new_filter); in kvm_vm_ioctl_set_msr_filter()
6483 synchronize_srcu(&kvm->srcu); in kvm_vm_ioctl_set_msr_filter()
6488 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_set_msr_filter()
6498 __u32 base; member
6513 struct kvm *kvm = filp->private_data; in kvm_arch_vm_compat_ioctl()
6514 long r = -ENOTTY; in kvm_arch_vm_compat_ioctl()
6525 return -EFAULT; in kvm_arch_vm_compat_ioctl()
6533 .flags = cr->flags, in kvm_arch_vm_compat_ioctl()
6534 .nmsrs = cr->nmsrs, in kvm_arch_vm_compat_ioctl()
6535 .base = cr->base, in kvm_arch_vm_compat_ioctl()
6536 .bitmap = (__u8 *)(ulong)cr->bitmap, in kvm_arch_vm_compat_ioctl()
6556 mutex_lock(&kvm->lock); in kvm_arch_suspend_notifier()
6558 if (!vcpu->arch.pv_time.active) in kvm_arch_suspend_notifier()
6564 vcpu->vcpu_id, ret); in kvm_arch_suspend_notifier()
6568 mutex_unlock(&kvm->lock); in kvm_arch_suspend_notifier()
6591 return -EFAULT; in kvm_vm_ioctl_get_clock()
6598 struct kvm_arch *ka = &kvm->arch; in kvm_vm_ioctl_set_clock()
6603 return -EFAULT; in kvm_vm_ioctl_set_clock()
6610 return -EINVAL; in kvm_vm_ioctl_set_clock()
6630 data.clock += now_real_ns - data.realtime; in kvm_vm_ioctl_set_clock()
6633 if (ka->use_master_clock) in kvm_vm_ioctl_set_clock()
6634 now_raw_ns = ka->master_kernel_ns; in kvm_vm_ioctl_set_clock()
6637 ka->kvmclock_offset = data.clock - now_raw_ns; in kvm_vm_ioctl_set_clock()
6645 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
6647 int r = -ENOTTY; in kvm_arch_vm_ioctl()
6649 * This union makes it completely explicit to gcc-3.x in kvm_arch_vm_ioctl()
6666 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
6667 r = -EINVAL; in kvm_arch_vm_ioctl()
6668 if (kvm->created_vcpus) in kvm_arch_vm_ioctl()
6670 r = -EFAULT; in kvm_arch_vm_ioctl()
6675 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
6685 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
6687 r = -EEXIST; in kvm_arch_vm_ioctl()
6691 r = -EINVAL; in kvm_arch_vm_ioctl()
6692 if (kvm->created_vcpus) in kvm_arch_vm_ioctl()
6711 /* Write kvm->irq_routing before enabling irqchip_in_kernel. */ in kvm_arch_vm_ioctl()
6713 kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL; in kvm_arch_vm_ioctl()
6716 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
6723 r = -EFAULT; in kvm_arch_vm_ioctl()
6728 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
6729 r = -EEXIST; in kvm_arch_vm_ioctl()
6730 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
6732 r = -ENOMEM; in kvm_arch_vm_ioctl()
6733 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); in kvm_arch_vm_ioctl()
6734 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
6737 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
6740 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ in kvm_arch_vm_ioctl()
6749 r = -ENXIO; in kvm_arch_vm_ioctl()
6755 r = -EFAULT; in kvm_arch_vm_ioctl()
6764 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ in kvm_arch_vm_ioctl()
6773 r = -ENXIO; in kvm_arch_vm_ioctl()
6782 r = -EFAULT; in kvm_arch_vm_ioctl()
6785 r = -ENXIO; in kvm_arch_vm_ioctl()
6786 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
6791 r = -EFAULT; in kvm_arch_vm_ioctl()
6798 r = -EFAULT; in kvm_arch_vm_ioctl()
6801 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
6802 r = -ENXIO; in kvm_arch_vm_ioctl()
6803 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
6807 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
6811 r = -ENXIO; in kvm_arch_vm_ioctl()
6812 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
6817 r = -EFAULT; in kvm_arch_vm_ioctl()
6824 r = -EFAULT; in kvm_arch_vm_ioctl()
6827 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
6828 r = -ENXIO; in kvm_arch_vm_ioctl()
6829 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
6833 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
6838 r = -EFAULT; in kvm_arch_vm_ioctl()
6841 r = -ENXIO; in kvm_arch_vm_ioctl()
6842 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
6849 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
6850 if (kvm->created_vcpus) in kvm_arch_vm_ioctl()
6851 r = -EBUSY; in kvm_arch_vm_ioctl()
6853 kvm->arch.bsp_vcpu_id = arg; in kvm_arch_vm_ioctl()
6854 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
6859 r = -EFAULT; in kvm_arch_vm_ioctl()
6868 r = -EFAULT; in kvm_arch_vm_ioctl()
6873 r = -EFAULT; in kvm_arch_vm_ioctl()
6879 r = -EFAULT; in kvm_arch_vm_ioctl()
6888 r = -EFAULT; in kvm_arch_vm_ioctl()
6904 r = -EINVAL; in kvm_arch_vm_ioctl()
6914 WRITE_ONCE(kvm->arch.default_tsc_khz, user_tsc_khz); in kvm_arch_vm_ioctl()
6920 r = READ_ONCE(kvm->arch.default_tsc_khz); in kvm_arch_vm_ioctl()
6924 r = -ENOTTY; in kvm_arch_vm_ioctl()
6934 r = -EFAULT; in kvm_arch_vm_ioctl()
6938 r = -ENOTTY; in kvm_arch_vm_ioctl()
6948 r = -EFAULT; in kvm_arch_vm_ioctl()
6952 r = -ENOTTY; in kvm_arch_vm_ioctl()
6962 r = -EFAULT; in kvm_arch_vm_ioctl()
6976 return -EFAULT; in kvm_arch_vm_ioctl()
6982 r = -ENOTTY; in kvm_arch_vm_ioctl()
7041 msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >= in kvm_init_msr_list()
7046 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >= in kvm_init_msr_list()
7051 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >= in kvm_init_msr_list()
7094 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) in vcpu_mmio_write()
7099 len -= n; in vcpu_mmio_write()
7114 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, in vcpu_mmio_read()
7121 len -= n; in vcpu_mmio_read()
7143 struct kvm_mmu *mmu = vcpu->arch.mmu; in translate_nested_gpa()
7148 /* NPT walks are always user-walks */ in translate_nested_gpa()
7150 t_gpa = mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception); in translate_nested_gpa()
7158 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_read()
7161 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); in kvm_mmu_gva_to_gpa_read()
7168 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_fetch()
7172 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); in kvm_mmu_gva_to_gpa_fetch()
7178 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_write()
7182 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); in kvm_mmu_gva_to_gpa_write()
7190 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_system()
7192 return mmu->gva_to_gpa(vcpu, mmu, gva, 0, exception); in kvm_mmu_gva_to_gpa_system()
7199 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_read_guest_virt_helper()
7204 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); in kvm_read_guest_virt_helper()
7205 unsigned offset = addr & (PAGE_SIZE-1); in kvm_read_guest_virt_helper()
7206 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); in kvm_read_guest_virt_helper()
7218 bytes -= toread; in kvm_read_guest_virt_helper()
7232 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_fetch_guest_virt()
7238 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK, in kvm_fetch_guest_virt()
7243 offset = addr & (PAGE_SIZE-1); in kvm_fetch_guest_virt()
7245 bytes = (unsigned)PAGE_SIZE - offset; in kvm_fetch_guest_virt()
7300 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_write_guest_virt_helper()
7305 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); in kvm_write_guest_virt_helper()
7306 unsigned offset = addr & (PAGE_SIZE-1); in kvm_write_guest_virt_helper()
7307 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); in kvm_write_guest_virt_helper()
7318 bytes -= towrite; in kvm_write_guest_virt_helper()
7346 vcpu->arch.l1tf_flush_l1d = true; in kvm_write_guest_virt_system()
7404 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in vcpu_mmio_gva_to_gpa()
7414 !permission_fault(vcpu, vcpu->arch.walk_mmu, in vcpu_mmio_gva_to_gpa()
7415 vcpu->arch.mmio_access, 0, access))) { in vcpu_mmio_gva_to_gpa()
7416 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | in vcpu_mmio_gva_to_gpa()
7417 (gva & (PAGE_SIZE - 1)); in vcpu_mmio_gva_to_gpa()
7422 *gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); in vcpu_mmio_gva_to_gpa()
7425 return -1; in vcpu_mmio_gva_to_gpa()
7456 if (vcpu->mmio_read_completed) { in read_prepare()
7458 vcpu->mmio_fragments[0].gpa, val); in read_prepare()
7459 vcpu->mmio_read_completed = 0; in read_prepare()
7494 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; in write_exit_mmio()
7496 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); in write_exit_mmio()
7522 bool write = ops->write; in emulator_read_write_onepage()
7524 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in emulator_read_write_onepage()
7533 if (ctxt->gpa_available && emulator_can_use_gpa(ctxt) && in emulator_read_write_onepage()
7534 (addr & ~PAGE_MASK) == (ctxt->gpa_val & ~PAGE_MASK)) { in emulator_read_write_onepage()
7535 gpa = ctxt->gpa_val; in emulator_read_write_onepage()
7543 if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes)) in emulator_read_write_onepage()
7549 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); in emulator_read_write_onepage()
7554 bytes -= handled; in emulator_read_write_onepage()
7557 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); in emulator_read_write_onepage()
7558 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; in emulator_read_write_onepage()
7559 frag->gpa = gpa; in emulator_read_write_onepage()
7560 frag->data = val; in emulator_read_write_onepage()
7561 frag->len = bytes; in emulator_read_write_onepage()
7575 if (ops->read_write_prepare && in emulator_read_write()
7576 ops->read_write_prepare(vcpu, val, bytes)) in emulator_read_write()
7579 vcpu->mmio_nr_fragments = 0; in emulator_read_write()
7582 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { in emulator_read_write()
7585 now = -addr & ~PAGE_MASK; in emulator_read_write()
7592 if (ctxt->mode != X86EMUL_MODE_PROT64) in emulator_read_write()
7595 bytes -= now; in emulator_read_write()
7603 if (!vcpu->mmio_nr_fragments) in emulator_read_write()
7606 gpa = vcpu->mmio_fragments[0].gpa; in emulator_read_write()
7608 vcpu->mmio_needed = 1; in emulator_read_write()
7609 vcpu->mmio_cur_fragment = 0; in emulator_read_write()
7611 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); in emulator_read_write()
7612 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; in emulator_read_write()
7613 vcpu->run->exit_reason = KVM_EXIT_MMIO; in emulator_read_write()
7614 vcpu->run->mmio.phys_addr = gpa; in emulator_read_write()
7616 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); in emulator_read_write()
7656 if (bytes > 8 || (bytes & (bytes - 1))) in emulator_cmpxchg_emulated()
7670 page_line_mask = ~(cache_line_size() - 1); in emulator_cmpxchg_emulated()
7674 if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask)) in emulator_cmpxchg_emulated()
7722 WARN_ON_ONCE(vcpu->arch.pio.count); in emulator_pio_in_out()
7738 memset(data, 0, size * (count - i)); in emulator_pio_in_out()
7747 vcpu->arch.pio.port = port; in emulator_pio_in_out()
7748 vcpu->arch.pio.in = in; in emulator_pio_in_out()
7749 vcpu->arch.pio.count = count; in emulator_pio_in_out()
7750 vcpu->arch.pio.size = size; in emulator_pio_in_out()
7753 memset(vcpu->arch.pio_data, 0, size * count); in emulator_pio_in_out()
7755 memcpy(vcpu->arch.pio_data, data, size * count); in emulator_pio_in_out()
7757 vcpu->run->exit_reason = KVM_EXIT_IO; in emulator_pio_in_out()
7758 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; in emulator_pio_in_out()
7759 vcpu->run->io.size = size; in emulator_pio_in_out()
7760 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; in emulator_pio_in_out()
7761 vcpu->run->io.count = count; in emulator_pio_in_out()
7762 vcpu->run->io.port = port; in emulator_pio_in_out()
7778 int size = vcpu->arch.pio.size; in complete_emulator_pio_in()
7779 unsigned int count = vcpu->arch.pio.count; in complete_emulator_pio_in()
7780 memcpy(val, vcpu->arch.pio_data, size * count); in complete_emulator_pio_in()
7781 trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data); in complete_emulator_pio_in()
7782 vcpu->arch.pio.count = 0; in complete_emulator_pio_in()
7790 if (vcpu->arch.pio.count) { in emulator_pio_in_emulated()
7838 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
7839 on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask, in kvm_emulate_wbinvd_noskip()
7842 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
7877 return (curr_cr & ~((1ULL << 32) - 1)) | new_val; in mk_cr_64()
7890 value = vcpu->arch.cr2; in emulator_get_cr()
7919 vcpu->arch.cr2 = val; in emulator_set_cr()
7932 res = -1; in emulator_set_cr()
7988 set_desc_base(desc, (unsigned long)var.base); in emulator_get_segment()
7991 *base3 = var.base >> 32; in emulator_get_segment()
7993 desc->type = var.type; in emulator_get_segment()
7994 desc->s = var.s; in emulator_get_segment()
7995 desc->dpl = var.dpl; in emulator_get_segment()
7996 desc->p = var.present; in emulator_get_segment()
7997 desc->avl = var.avl; in emulator_get_segment()
7998 desc->l = var.l; in emulator_get_segment()
7999 desc->d = var.db; in emulator_get_segment()
8000 desc->g = var.g; in emulator_get_segment()
8013 var.base = get_desc_base(desc); in emulator_set_segment()
8015 var.base |= ((u64)base3) << 32; in emulator_set_segment()
8018 if (desc->g) in emulator_set_segment()
8020 var.type = desc->type; in emulator_set_segment()
8021 var.dpl = desc->dpl; in emulator_set_segment()
8022 var.db = desc->d; in emulator_set_segment()
8023 var.s = desc->s; in emulator_set_segment()
8024 var.l = desc->l; in emulator_set_segment()
8025 var.g = desc->g; in emulator_set_segment()
8026 var.avl = desc->avl; in emulator_set_segment()
8027 var.present = desc->p; in emulator_set_segment()
8097 return vcpu->arch.smbase; in emulator_get_smbase()
8104 vcpu->arch.smbase = smbase; in emulator_set_smbase()
8112 return -EINVAL; in emulator_check_pmc()
8123 emul_to_vcpu(ctxt)->arch.halt_request = 1; in emulator_halt()
8131 &ctxt->exception); in emulator_intercept()
8178 return emul_to_vcpu(ctxt)->arch.hflags; in emulator_get_hflags()
8206 struct kvm *kvm = emul_to_vcpu(ctxt)->kvm; in emulator_vm_bugged()
8208 if (!kvm->vm_bugged) in emulator_vm_bugged()
8284 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in inject_emulated_exception()
8286 if (ctxt->exception.vector == PF_VECTOR) in inject_emulated_exception()
8287 kvm_inject_emulated_page_fault(vcpu, &ctxt->exception); in inject_emulated_exception()
8288 else if (ctxt->exception.error_code_valid) in inject_emulated_exception()
8289 kvm_queue_exception_e(vcpu, ctxt->exception.vector, in inject_emulated_exception()
8290 ctxt->exception.error_code); in inject_emulated_exception()
8292 kvm_queue_exception(vcpu, ctxt->exception.vector); in inject_emulated_exception()
8305 ctxt->vcpu = vcpu; in alloc_emulate_ctxt()
8306 ctxt->ops = &emulate_ops; in alloc_emulate_ctxt()
8307 vcpu->arch.emulate_ctxt = ctxt; in alloc_emulate_ctxt()
8314 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in init_emulate_ctxt()
8319 ctxt->gpa_available = false; in init_emulate_ctxt()
8320 ctxt->eflags = kvm_get_rflags(vcpu); in init_emulate_ctxt()
8321 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; in init_emulate_ctxt()
8323 ctxt->eip = kvm_rip_read(vcpu); in init_emulate_ctxt()
8324 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : in init_emulate_ctxt()
8325 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : in init_emulate_ctxt()
8333 ctxt->interruptibility = 0; in init_emulate_ctxt()
8334 ctxt->have_exception = false; in init_emulate_ctxt()
8335 ctxt->exception.vector = -1; in init_emulate_ctxt()
8336 ctxt->perm_ok = false; in init_emulate_ctxt()
8339 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in init_emulate_ctxt()
8344 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_inject_realmode_interrupt()
8349 ctxt->op_bytes = 2; in kvm_inject_realmode_interrupt()
8350 ctxt->ad_bytes = 2; in kvm_inject_realmode_interrupt()
8351 ctxt->_eip = ctxt->eip + inc_eip; in kvm_inject_realmode_interrupt()
8357 ctxt->eip = ctxt->_eip; in kvm_inject_realmode_interrupt()
8358 kvm_rip_write(vcpu, ctxt->eip); in kvm_inject_realmode_interrupt()
8359 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_inject_realmode_interrupt()
8367 struct kvm_run *run = vcpu->run; in prepare_emulation_failure_exit()
8381 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in prepare_emulation_failure_exit()
8382 run->emulation_failure.suberror = KVM_INTERNAL_ERROR_EMULATION; in prepare_emulation_failure_exit()
8394 run->emulation_failure.flags = 0; in prepare_emulation_failure_exit()
8397 BUILD_BUG_ON((sizeof(run->emulation_failure.insn_size) + in prepare_emulation_failure_exit()
8398 sizeof(run->emulation_failure.insn_bytes) != 16)); in prepare_emulation_failure_exit()
8400 run->emulation_failure.flags |= in prepare_emulation_failure_exit()
8402 run->emulation_failure.insn_size = insn_size; in prepare_emulation_failure_exit()
8403 memset(run->emulation_failure.insn_bytes, 0x90, in prepare_emulation_failure_exit()
8404 sizeof(run->emulation_failure.insn_bytes)); in prepare_emulation_failure_exit()
8405 memcpy(run->emulation_failure.insn_bytes, insn_bytes, insn_size); in prepare_emulation_failure_exit()
8408 memcpy(&run->internal.data[info_start], info, sizeof(info)); in prepare_emulation_failure_exit()
8409 memcpy(&run->internal.data[info_start + ARRAY_SIZE(info)], data, in prepare_emulation_failure_exit()
8412 run->emulation_failure.ndata = info_start + ARRAY_SIZE(info) + ndata; in prepare_emulation_failure_exit()
8417 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in prepare_emulation_ctxt_failure_exit()
8419 prepare_emulation_failure_exit(vcpu, NULL, 0, ctxt->fetch.data, in prepare_emulation_ctxt_failure_exit()
8420 ctxt->fetch.end - ctxt->fetch.data); in prepare_emulation_ctxt_failure_exit()
8438 struct kvm *kvm = vcpu->kvm; in handle_emulation_failure()
8440 ++vcpu->stat.insn_emulation_fail; in handle_emulation_failure()
8448 if (kvm->arch.exit_on_emulation_error || in handle_emulation_failure()
8478 if (!vcpu->arch.mmu->root_role.direct) { in reexecute_instruction()
8496 * retry instruction -> write #PF -> emulation fail -> retry in reexecute_instruction()
8497 * instruction -> ... in reexecute_instruction()
8499 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
8510 /* The instructions are well-emulated on direct mmu. */ in reexecute_instruction()
8511 if (vcpu->arch.mmu->root_role.direct) { in reexecute_instruction()
8514 write_lock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
8515 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; in reexecute_instruction()
8516 write_unlock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
8519 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
8526 * and it failed try to unshadow page and re-enter the in reexecute_instruction()
8529 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
8545 last_retry_eip = vcpu->arch.last_retry_eip; in retry_instruction()
8546 last_retry_addr = vcpu->arch.last_retry_addr; in retry_instruction()
8549 * If the emulation is caused by #PF and it is non-page_table in retry_instruction()
8550 * writing instruction, it means the VM-EXIT is caused by shadow in retry_instruction()
8554 * Note: if the guest uses a non-page-table modifying instruction in retry_instruction()
8561 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; in retry_instruction()
8573 if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa) in retry_instruction()
8576 vcpu->arch.last_retry_eip = ctxt->eip; in retry_instruction()
8577 vcpu->arch.last_retry_addr = cr2_or_gpa; in retry_instruction()
8579 if (!vcpu->arch.mmu->root_role.direct) in retry_instruction()
8582 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in retry_instruction()
8592 trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm); in kvm_smm_changed()
8595 vcpu->arch.hflags |= HF_SMM_MASK; in kvm_smm_changed()
8597 vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK); in kvm_smm_changed()
8607 vcpu->arch.pdptrs_from_userspace = false; in kvm_smm_changed()
8630 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_do_singlestep()
8632 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { in kvm_vcpu_do_singlestep()
8633 kvm_run->debug.arch.dr6 = DR6_BS | DR6_ACTIVE_LOW; in kvm_vcpu_do_singlestep()
8634 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); in kvm_vcpu_do_singlestep()
8635 kvm_run->debug.arch.exception = DB_VECTOR; in kvm_vcpu_do_singlestep()
8636 kvm_run->exit_reason = KVM_EXIT_DEBUG; in kvm_vcpu_do_singlestep()
8708 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && in kvm_vcpu_check_code_breakpoint()
8709 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { in kvm_vcpu_check_code_breakpoint()
8710 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_check_code_breakpoint()
8713 vcpu->arch.guest_debug_dr7, in kvm_vcpu_check_code_breakpoint()
8714 vcpu->arch.eff_db); in kvm_vcpu_check_code_breakpoint()
8717 kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW; in kvm_vcpu_check_code_breakpoint()
8718 kvm_run->debug.arch.pc = eip; in kvm_vcpu_check_code_breakpoint()
8719 kvm_run->debug.arch.exception = DB_VECTOR; in kvm_vcpu_check_code_breakpoint()
8720 kvm_run->exit_reason = KVM_EXIT_DEBUG; in kvm_vcpu_check_code_breakpoint()
8726 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && in kvm_vcpu_check_code_breakpoint()
8730 vcpu->arch.dr7, in kvm_vcpu_check_code_breakpoint()
8731 vcpu->arch.db); in kvm_vcpu_check_code_breakpoint()
8745 switch (ctxt->opcode_len) { in is_vmware_backdoor_opcode()
8747 switch (ctxt->b) { in is_vmware_backdoor_opcode()
8764 switch (ctxt->b) { in is_vmware_backdoor_opcode()
8777 * (and wrong) when emulating on an intercepted fault-like exception[*], as
8787 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in x86_decode_emulated_instruction()
8795 ++vcpu->stat.insn_emulation; in x86_decode_emulated_instruction()
8805 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in x86_emulate_instruction()
8812 vcpu->arch.l1tf_flush_l1d = true; in x86_emulate_instruction()
8818 write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; in x86_emulate_instruction()
8819 vcpu->arch.write_fault_to_shadow_pgtable = false; in x86_emulate_instruction()
8826 * are fault-like and are higher priority than any faults on in x86_emulate_instruction()
8844 if (ctxt->have_exception) { in x86_emulate_instruction()
8846 * #UD should result in just EMULATION_FAILED, and trap-like in x86_emulate_instruction()
8849 WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR || in x86_emulate_instruction()
8850 exception_type(ctxt->exception.vector) == EXCPT_TRAP); in x86_emulate_instruction()
8868 * injecting single-step #DBs. in x86_emulate_instruction()
8871 if (ctxt->mode != X86EMUL_MODE_PROT64) in x86_emulate_instruction()
8872 ctxt->eip = (u32)ctxt->_eip; in x86_emulate_instruction()
8874 ctxt->eip = ctxt->_eip; in x86_emulate_instruction()
8881 kvm_rip_write(vcpu, ctxt->eip); in x86_emulate_instruction()
8882 if (ctxt->eflags & X86_EFLAGS_RF) in x86_emulate_instruction()
8883 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); in x86_emulate_instruction()
8892 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { in x86_emulate_instruction()
8893 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in x86_emulate_instruction()
8900 ctxt->exception.address = cr2_or_gpa; in x86_emulate_instruction()
8903 if (vcpu->arch.mmu->root_role.direct) { in x86_emulate_instruction()
8904 ctxt->gpa_available = true; in x86_emulate_instruction()
8905 ctxt->gpa_val = cr2_or_gpa; in x86_emulate_instruction()
8909 ctxt->exception.address = 0; in x86_emulate_instruction()
8925 if (ctxt->have_exception) { in x86_emulate_instruction()
8928 } else if (vcpu->arch.pio.count) { in x86_emulate_instruction()
8929 if (!vcpu->arch.pio.in) { in x86_emulate_instruction()
8930 /* FIXME: return into emulator if single-stepping. */ in x86_emulate_instruction()
8931 vcpu->arch.pio.count = 0; in x86_emulate_instruction()
8934 vcpu->arch.complete_userspace_io = complete_emulated_pio; in x86_emulate_instruction()
8937 } else if (vcpu->mmio_needed) { in x86_emulate_instruction()
8938 ++vcpu->stat.mmio_exits; in x86_emulate_instruction()
8940 if (!vcpu->mmio_is_write) in x86_emulate_instruction()
8943 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in x86_emulate_instruction()
8944 } else if (vcpu->arch.complete_userspace_io) { in x86_emulate_instruction()
8955 toggle_interruptibility(vcpu, ctxt->interruptibility); in x86_emulate_instruction()
8956 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in x86_emulate_instruction()
8959 * Note, EXCPT_DB is assumed to be fault-like as the emulator in x86_emulate_instruction()
8961 * of which are fault-like. in x86_emulate_instruction()
8963 if (!ctxt->have_exception || in x86_emulate_instruction()
8964 exception_type(ctxt->exception.vector) == EXCPT_TRAP) { in x86_emulate_instruction()
8966 if (ctxt->is_branch) in x86_emulate_instruction()
8968 kvm_rip_write(vcpu, ctxt->eip); in x86_emulate_instruction()
8969 if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) in x86_emulate_instruction()
8972 __kvm_set_rflags(vcpu, ctxt->eflags); in x86_emulate_instruction()
8981 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF)) in x86_emulate_instruction()
8984 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; in x86_emulate_instruction()
9004 vcpu->arch.pio.count = 0; in complete_fast_pio_out_port_0x7e()
9010 vcpu->arch.pio.count = 0; in complete_fast_pio_out()
9012 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) in complete_fast_pio_out()
9032 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) { in kvm_fast_pio_out()
9033 vcpu->arch.complete_userspace_io = in kvm_fast_pio_out()
9037 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_out()
9038 vcpu->arch.complete_userspace_io = complete_fast_pio_out; in kvm_fast_pio_out()
9048 BUG_ON(vcpu->arch.pio.count != 1); in complete_fast_pio_in()
9050 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { in complete_fast_pio_in()
9051 vcpu->arch.pio.count = 0; in complete_fast_pio_in()
9056 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0; in complete_fast_pio_in()
9079 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_in()
9080 vcpu->arch.complete_userspace_io = complete_fast_pio_in; in kvm_fast_pio_in()
9109 khz = freq->new; in tsc_khz_changed()
9130 /* TSC frequency always matches when on Hyper-V */ in kvm_hyperv_tsc_notifier()
9196 if (vcpu->cpu != cpu) in __kvmclock_cpufreq_notifier()
9199 if (vcpu->cpu != raw_smp_processor_id()) in __kvmclock_cpufreq_notifier()
9205 if (freq->old < freq->new && send_ipi) { in __kvmclock_cpufreq_notifier()
9228 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) in kvmclock_cpufreq_notifier()
9230 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) in kvmclock_cpufreq_notifier()
9233 for_each_cpu(cpu, freq->policy->cpus) in kvmclock_cpufreq_notifier()
9261 if (policy->cpuinfo.max_freq) in kvm_timer_init()
9262 max_tsc_khz = policy->cpuinfo.max_freq; in kvm_timer_init()
9320 if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) && in pvclock_gtod_notify()
9339 return -EEXIST; in kvm_arch_init()
9342 if (!ops->cpu_has_kvm_support()) { in kvm_arch_init()
9344 ops->runtime_ops->name); in kvm_arch_init()
9345 return -EOPNOTSUPP; in kvm_arch_init()
9347 if (ops->disabled_by_bios()) { in kvm_arch_init()
9349 ops->runtime_ops->name); in kvm_arch_init()
9350 return -EOPNOTSUPP; in kvm_arch_init()
9360 return -EOPNOTSUPP; in kvm_arch_init()
9365 return -EOPNOTSUPP; in kvm_arch_init()
9378 return -EIO; in kvm_arch_init()
9384 return -ENOMEM; in kvm_arch_init()
9390 r = -ENOMEM; in kvm_arch_init()
9406 if (pi_inject_timer == -1) in kvm_arch_init()
9455 * local APIC is in-kernel, the run loop will detect the non-runnable in __kvm_emulate_halt()
9460 ++vcpu->stat.halt_exits; in __kvm_emulate_halt()
9462 vcpu->arch.mp_state = state; in __kvm_emulate_halt()
9465 vcpu->run->exit_reason = reason; in __kvm_emulate_halt()
9480 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered in kvm_emulate_halt()
9506 return -KVM_EOPNOTSUPP; in kvm_pv_clock_pairing()
9512 if (vcpu->arch.tsc_always_catchup) in kvm_pv_clock_pairing()
9513 return -KVM_EOPNOTSUPP; in kvm_pv_clock_pairing()
9516 return -KVM_EOPNOTSUPP; in kvm_pv_clock_pairing()
9525 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, in kvm_pv_clock_pairing()
9527 ret = -KVM_EFAULT; in kvm_pv_clock_pairing()
9536 * @apicid - apicid of vcpu to be kicked.
9556 return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0); in kvm_apicv_activated()
9562 ulong vm_reasons = READ_ONCE(vcpu->kvm->arch.apicv_inhibit_reasons); in kvm_vcpu_apicv_activated()
9582 unsigned long *inhibits = &kvm->arch.apicv_inhibit_reasons; in kvm_apicv_init()
9584 init_rwsem(&kvm->arch.apicv_update_lock); in kvm_apicv_init()
9598 vcpu->stat.directed_yield_attempted++; in kvm_sched_yield()
9604 map = rcu_dereference(vcpu->kvm->arch.apic_map); in kvm_sched_yield()
9606 if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id]) in kvm_sched_yield()
9607 target = map->phys_map[dest_id]->vcpu; in kvm_sched_yield()
9611 if (!target || !READ_ONCE(target->ready)) in kvm_sched_yield()
9621 vcpu->stat.directed_yield_successful++; in kvm_sched_yield()
9629 u64 ret = vcpu->run->hypercall.ret; in complete_hypercall_exit()
9634 ++vcpu->stat.hypercalls; in complete_hypercall_exit()
9643 if (kvm_xen_hypercall_enabled(vcpu->kvm)) in kvm_emulate_hypercall()
9667 ret = -KVM_EPERM; in kvm_emulate_hypercall()
9671 ret = -KVM_ENOSYS; in kvm_emulate_hypercall()
9681 kvm_pv_kick_cpu_op(vcpu->kvm, a1); in kvm_emulate_hypercall()
9694 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); in kvm_emulate_hypercall()
9706 ret = -KVM_ENOSYS; in kvm_emulate_hypercall()
9707 if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE))) in kvm_emulate_hypercall()
9712 ret = -KVM_EINVAL; in kvm_emulate_hypercall()
9716 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL; in kvm_emulate_hypercall()
9717 vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE; in kvm_emulate_hypercall()
9718 vcpu->run->hypercall.args[0] = gpa; in kvm_emulate_hypercall()
9719 vcpu->run->hypercall.args[1] = npages; in kvm_emulate_hypercall()
9720 vcpu->run->hypercall.args[2] = attrs; in kvm_emulate_hypercall()
9721 vcpu->run->hypercall.longmode = op_64_bit; in kvm_emulate_hypercall()
9722 vcpu->arch.complete_userspace_io = complete_hypercall_exit; in kvm_emulate_hypercall()
9726 ret = -KVM_ENOSYS; in kvm_emulate_hypercall()
9734 ++vcpu->stat.hypercalls; in kvm_emulate_hypercall()
9749 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) { in emulator_fix_hypercall()
9750 ctxt->exception.error_code_valid = false; in emulator_fix_hypercall()
9751 ctxt->exception.vector = UD_VECTOR; in emulator_fix_hypercall()
9752 ctxt->have_exception = true; in emulator_fix_hypercall()
9759 &ctxt->exception); in emulator_fix_hypercall()
9764 return vcpu->run->request_interrupt_window && in dm_request_for_irq_injection()
9765 likely(!pic_in_kernel(vcpu->kvm)); in dm_request_for_irq_injection()
9768 /* Called within kvm->srcu read side. */
9771 struct kvm_run *kvm_run = vcpu->run; in post_kvm_run_save()
9773 kvm_run->if_flag = static_call(kvm_x86_get_if_flag)(vcpu); in post_kvm_run_save()
9774 kvm_run->cr8 = kvm_get_cr8(vcpu); in post_kvm_run_save()
9775 kvm_run->apic_base = kvm_get_apic_base(vcpu); in post_kvm_run_save()
9777 kvm_run->ready_for_interrupt_injection = in post_kvm_run_save()
9778 pic_in_kernel(vcpu->kvm) || in post_kvm_run_save()
9782 kvm_run->flags |= KVM_RUN_X86_SMM; in post_kvm_run_save()
9795 if (vcpu->arch.apic->apicv_active) in update_cr8_intercept()
9798 if (!vcpu->arch.apic->vapic_addr) in update_cr8_intercept()
9801 max_irr = -1; in update_cr8_intercept()
9803 if (max_irr != -1) in update_cr8_intercept()
9815 kvm_x86_ops.nested_ops->triple_fault(vcpu); in kvm_check_nested_events()
9819 return kvm_x86_ops.nested_ops->check_events(vcpu); in kvm_check_nested_events()
9824 trace_kvm_inj_exception(vcpu->arch.exception.vector, in kvm_inject_exception()
9825 vcpu->arch.exception.has_error_code, in kvm_inject_exception()
9826 vcpu->arch.exception.error_code, in kvm_inject_exception()
9827 vcpu->arch.exception.injected); in kvm_inject_exception()
9829 if (vcpu->arch.exception.error_code && !is_protmode(vcpu)) in kvm_inject_exception()
9830 vcpu->arch.exception.error_code = false; in kvm_inject_exception()
9839 * injected as part of a previous VM-Enter, but weren't successfully delivered
9840 * and need to be re-injected.
9845 * also be able to re-inject NMIs and IRQs in the middle of an instruction.
9846 * I.e. for exceptions and re-injected events, NOT invoking this on instruction
9851 * instruction boundaries for asynchronous events. However, because VM-Exits
9857 * But, if a VM-Exit occurs during instruction execution, and KVM does NOT skip
9880 * Process nested events first, as nested VM-Exit supercedes event in kvm_check_and_inject_events()
9881 * re-injection. If there's an event queued for re-injection, it will in kvm_check_and_inject_events()
9882 * be saved into the appropriate vmc{b,s}12 fields on nested VM-Exit. in kvm_check_and_inject_events()
9890 * Re-inject exceptions and events *especially* if immediate entry+exit in kvm_check_and_inject_events()
9894 * Don't re-inject an NMI or interrupt if there is a pending exception. in kvm_check_and_inject_events()
9903 * as the exception "occurred" before the exit to userspace. Trap-like in kvm_check_and_inject_events()
9905 * And while fault-like exceptions, e.g. #GP and #PF, are the lowest in kvm_check_and_inject_events()
9908 * Thus a pending fault-like exception means the fault occurred on the in kvm_check_and_inject_events()
9912 if (vcpu->arch.exception.injected) in kvm_check_and_inject_events()
9916 else if (vcpu->arch.nmi_injected) in kvm_check_and_inject_events()
9918 else if (vcpu->arch.interrupt.injected) in kvm_check_and_inject_events()
9922 * Exceptions that morph to VM-Exits are handled above, and pending in kvm_check_and_inject_events()
9923 * exceptions on top of injected exceptions that do not VM-Exit should in kvm_check_and_inject_events()
9926 WARN_ON_ONCE(vcpu->arch.exception.injected && in kvm_check_and_inject_events()
9927 vcpu->arch.exception.pending); in kvm_check_and_inject_events()
9931 * nested VM-Enter or event re-injection so that a different pending in kvm_check_and_inject_events()
9934 * Otherwise, continue processing events even if VM-Exit occurred. The in kvm_check_and_inject_events()
9935 * VM-Exit will have cleared exceptions that were meant for L2, but in kvm_check_and_inject_events()
9942 * A pending exception VM-Exit should either result in nested VM-Exit in kvm_check_and_inject_events()
9943 * or force an immediate re-entry and exit to/from L2, and exception in kvm_check_and_inject_events()
9944 * VM-Exits cannot be injected (flag should _never_ be set). in kvm_check_and_inject_events()
9946 WARN_ON_ONCE(vcpu->arch.exception_vmexit.injected || in kvm_check_and_inject_events()
9947 vcpu->arch.exception_vmexit.pending); in kvm_check_and_inject_events()
9951 * to re-inject a previous event. See above comments on re-injecting in kvm_check_and_inject_events()
9956 if (vcpu->arch.exception.pending) { in kvm_check_and_inject_events()
9958 * Fault-class exceptions, except #DBs, set RF=1 in the RFLAGS in kvm_check_and_inject_events()
9959 * value pushed on the stack. Trap-like exception and all #DBs in kvm_check_and_inject_events()
9960 * leave RF as-is (KVM follows Intel's behavior in this regard; in kvm_check_and_inject_events()
9965 * fault-like. They do _not_ set RF, a la code breakpoints. in kvm_check_and_inject_events()
9967 if (exception_type(vcpu->arch.exception.vector) == EXCPT_FAULT) in kvm_check_and_inject_events()
9971 if (vcpu->arch.exception.vector == DB_VECTOR) { in kvm_check_and_inject_events()
9972 kvm_deliver_exception_payload(vcpu, &vcpu->arch.exception); in kvm_check_and_inject_events()
9973 if (vcpu->arch.dr7 & DR7_GD) { in kvm_check_and_inject_events()
9974 vcpu->arch.dr7 &= ~DR7_GD; in kvm_check_and_inject_events()
9981 vcpu->arch.exception.pending = false; in kvm_check_and_inject_events()
9982 vcpu->arch.exception.injected = true; in kvm_check_and_inject_events()
9988 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) in kvm_check_and_inject_events()
9993 * due to architectural conditions (e.g. IF=0) a window-open exit in kvm_check_and_inject_events()
9994 * will re-request KVM_REQ_EVENT. Sometimes however an event is pending in kvm_check_and_inject_events()
10000 * The kvm_x86_ops hooks communicate this by returning -EBUSY. in kvm_check_and_inject_events()
10002 if (vcpu->arch.smi_pending) { in kvm_check_and_inject_events()
10003 r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY; in kvm_check_and_inject_events()
10007 vcpu->arch.smi_pending = false; in kvm_check_and_inject_events()
10008 ++vcpu->arch.smi_count; in kvm_check_and_inject_events()
10015 if (vcpu->arch.nmi_pending) { in kvm_check_and_inject_events()
10016 r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY; in kvm_check_and_inject_events()
10020 --vcpu->arch.nmi_pending; in kvm_check_and_inject_events()
10021 vcpu->arch.nmi_injected = true; in kvm_check_and_inject_events()
10026 if (vcpu->arch.nmi_pending) in kvm_check_and_inject_events()
10031 r = can_inject ? static_call(kvm_x86_interrupt_allowed)(vcpu, true) : -EBUSY; in kvm_check_and_inject_events()
10044 kvm_x86_ops.nested_ops->has_events && in kvm_check_and_inject_events()
10045 kvm_x86_ops.nested_ops->has_events(vcpu)) in kvm_check_and_inject_events()
10050 * is done emulating and should only propagate the to-be-injected event in kvm_check_and_inject_events()
10052 * infinite loop as KVM will bail from VM-Enter to inject the pending in kvm_check_and_inject_events()
10060 WARN_ON_ONCE(vcpu->arch.exception.pending || in kvm_check_and_inject_events()
10061 vcpu->arch.exception_vmexit.pending); in kvm_check_and_inject_events()
10065 if (r == -EBUSY) { in kvm_check_and_inject_events()
10081 if (static_call(kvm_x86_get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected) in process_nmi()
10084 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); in process_nmi()
10085 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); in process_nmi()
10092 flags |= seg->g << 23; in enter_smm_get_segment_flags()
10093 flags |= seg->db << 22; in enter_smm_get_segment_flags()
10094 flags |= seg->l << 21; in enter_smm_get_segment_flags()
10095 flags |= seg->avl << 20; in enter_smm_get_segment_flags()
10096 flags |= seg->present << 15; in enter_smm_get_segment_flags()
10097 flags |= seg->dpl << 13; in enter_smm_get_segment_flags()
10098 flags |= seg->s << 12; in enter_smm_get_segment_flags()
10099 flags |= seg->type << 8; in enter_smm_get_segment_flags()
10114 offset = 0x7f2c + (n - 3) * 12; in enter_smm_save_seg_32()
10116 put_smstate(u32, buf, offset + 8, seg.base); in enter_smm_save_seg_32()
10135 put_smstate(u64, buf, offset + 8, seg.base); in enter_smm_save_seg_64()
10161 put_smstate(u32, buf, 0x7f64, seg.base); in enter_smm_save_state_32()
10167 put_smstate(u32, buf, 0x7f80, seg.base); in enter_smm_save_state_32()
10186 put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); in enter_smm_save_state_32()
10198 put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read_raw(vcpu, i)); in enter_smm_save_state_64()
10212 put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase); in enter_smm_save_state_64()
10217 put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer); in enter_smm_save_state_64()
10223 put_smstate(u64, buf, 0x7e98, seg.base); in enter_smm_save_state_64()
10233 put_smstate(u64, buf, 0x7e78, seg.base); in enter_smm_save_state_64()
10260 * Give enter_smm() a chance to make ISA-specific changes to the vCPU in enter_smm()
10262 * SMM state-save area. in enter_smm()
10267 kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); in enter_smm()
10270 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in enter_smm()
10277 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); in enter_smm()
10279 vcpu->arch.cr0 = cr0; in enter_smm()
10289 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; in enter_smm()
10290 cs.base = vcpu->arch.smbase; in enter_smm()
10293 ds.base = 0; in enter_smm()
10325 vcpu->arch.smi_pending = true; in process_smi()
10342 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_vcpu_update_apicv()
10348 down_read(&vcpu->kvm->arch.apicv_update_lock); in kvm_vcpu_update_apicv()
10355 if (apic->apicv_active == activate) in kvm_vcpu_update_apicv()
10358 apic->apicv_active = activate; in kvm_vcpu_update_apicv()
10368 if (!apic->apicv_active) in kvm_vcpu_update_apicv()
10373 up_read(&vcpu->kvm->arch.apicv_update_lock); in kvm_vcpu_update_apicv()
10382 lockdep_assert_held_write(&kvm->arch.apicv_update_lock); in __kvm_set_or_clear_apicv_inhibit()
10387 old = new = kvm->arch.apicv_inhibit_reasons; in __kvm_set_or_clear_apicv_inhibit()
10405 kvm->arch.apicv_inhibit_reasons = new; in __kvm_set_or_clear_apicv_inhibit()
10408 int idx = srcu_read_lock(&kvm->srcu); in __kvm_set_or_clear_apicv_inhibit()
10411 srcu_read_unlock(&kvm->srcu, idx); in __kvm_set_or_clear_apicv_inhibit()
10414 kvm->arch.apicv_inhibit_reasons = new; in __kvm_set_or_clear_apicv_inhibit()
10424 down_write(&kvm->arch.apicv_update_lock); in kvm_set_or_clear_apicv_inhibit()
10426 up_write(&kvm->arch.apicv_update_lock); in kvm_set_or_clear_apicv_inhibit()
10435 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); in vcpu_scan_ioapic()
10437 if (irqchip_split(vcpu->kvm)) in vcpu_scan_ioapic()
10438 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
10441 if (ioapic_in_kernel(vcpu->kvm)) in vcpu_scan_ioapic()
10442 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
10446 vcpu->arch.load_eoi_exitmap_pending = true; in vcpu_scan_ioapic()
10455 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) in vcpu_load_eoi_exitmap()
10460 vcpu->arch.ioapic_handled_vectors, in vcpu_load_eoi_exitmap()
10461 to_hv_synic(vcpu)->vec_bitmap, 256); in vcpu_load_eoi_exitmap()
10467 vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors); in vcpu_load_eoi_exitmap()
10499 smp_send_reschedule(vcpu->cpu); in __kvm_request_immediate_exit()
10504 * Called within kvm->srcu read side.
10519 /* Forbid vmenter if vcpu dirty ring is soft-full */ in vcpu_enter_guest()
10520 if (unlikely(vcpu->kvm->dirty_ring_size && in vcpu_enter_guest()
10521 kvm_dirty_ring_soft_full(&vcpu->dirty_ring))) { in vcpu_enter_guest()
10522 vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL; in vcpu_enter_guest()
10530 r = -EIO; in vcpu_enter_guest()
10534 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { in vcpu_enter_guest()
10544 kvm_update_masterclock(vcpu->kvm); in vcpu_enter_guest()
10565 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; in vcpu_enter_guest()
10571 kvm_x86_ops.nested_ops->triple_fault(vcpu); in vcpu_enter_guest()
10574 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; in vcpu_enter_guest()
10575 vcpu->mmio_needed = 0; in vcpu_enter_guest()
10582 vcpu->arch.apf.halted = true; in vcpu_enter_guest()
10597 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); in vcpu_enter_guest()
10598 if (test_bit(vcpu->arch.pending_ioapic_eoi, in vcpu_enter_guest()
10599 vcpu->arch.ioapic_handled_vectors)) { in vcpu_enter_guest()
10600 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI; in vcpu_enter_guest()
10601 vcpu->run->eoi.vector = in vcpu_enter_guest()
10602 vcpu->arch.pending_ioapic_eoi; in vcpu_enter_guest()
10614 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
10615 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; in vcpu_enter_guest()
10616 vcpu->run->system_event.ndata = 0; in vcpu_enter_guest()
10621 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
10622 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; in vcpu_enter_guest()
10623 vcpu->run->system_event.ndata = 0; in vcpu_enter_guest()
10630 vcpu->run->exit_reason = KVM_EXIT_HYPERV; in vcpu_enter_guest()
10631 vcpu->run->hyperv = hv_vcpu->exit; in vcpu_enter_guest()
10638 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers in vcpu_enter_guest()
10639 * depend on the guest clock being up-to-date in vcpu_enter_guest()
10656 ++vcpu->stat.req_event; in vcpu_enter_guest()
10662 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { in vcpu_enter_guest()
10697 /* Store vcpu->apicv_active before vcpu->mode. */ in vcpu_enter_guest()
10698 smp_store_release(&vcpu->mode, IN_GUEST_MODE); in vcpu_enter_guest()
10703 * 1) We should set ->mode before checking ->requests. Please see in vcpu_enter_guest()
10706 * 2) For APICv, we should set ->mode before checking PID.ON. This in vcpu_enter_guest()
10727 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
10745 if (vcpu->arch.guest_fpu.xfd_err) in vcpu_enter_guest()
10746 wrmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); in vcpu_enter_guest()
10748 if (unlikely(vcpu->arch.switch_db_regs)) { in vcpu_enter_guest()
10750 set_debugreg(vcpu->arch.eff_db[0], 0); in vcpu_enter_guest()
10751 set_debugreg(vcpu->arch.eff_db[1], 1); in vcpu_enter_guest()
10752 set_debugreg(vcpu->arch.eff_db[2], 2); in vcpu_enter_guest()
10753 set_debugreg(vcpu->arch.eff_db[3], 3); in vcpu_enter_guest()
10764 * per-VM state, and responsing vCPUs must wait for the update in vcpu_enter_guest()
10789 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { in vcpu_enter_guest()
10790 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); in vcpu_enter_guest()
10806 vcpu->arch.last_vmentry_cpu = vcpu->cpu; in vcpu_enter_guest()
10807 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); in vcpu_enter_guest()
10809 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
10814 * rely on the fact that guest_fpu::xfd is up-to-date (e.g. in vcpu_enter_guest()
10817 if (vcpu->arch.xfd_no_write_intercept) in vcpu_enter_guest()
10822 if (vcpu->arch.guest_fpu.xfd_err) in vcpu_enter_guest()
10827 * VM-Exit on SVM and any ticks that occur between VM-Exit and now. in vcpu_enter_guest()
10834 ++vcpu->stat.exits; in vcpu_enter_guest()
10860 if (unlikely(vcpu->arch.tsc_always_catchup)) in vcpu_enter_guest()
10863 if (vcpu->arch.apic_attention) in vcpu_enter_guest()
10873 if (unlikely(vcpu->arch.apic_attention)) in vcpu_enter_guest()
10879 /* Called within kvm->srcu read side. */
10886 * Switch to the software timer before halt-polling/blocking as in vcpu_block()
10889 * Switch before halt-polling so that KVM recognizes an expired in vcpu_block()
10897 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) in vcpu_block()
10918 * state field (AMD does not have a similar field and a VM-Exit always in vcpu_block()
10928 switch(vcpu->arch.mp_state) { in vcpu_block()
10931 vcpu->arch.pv.pv_unhalted = false; in vcpu_block()
10932 vcpu->arch.mp_state = in vcpu_block()
10936 vcpu->arch.apf.halted = false; in vcpu_block()
10949 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && in kvm_vcpu_running()
10950 !vcpu->arch.apf.halted); in kvm_vcpu_running()
10953 /* Called within kvm->srcu read side. */
10958 vcpu->arch.l1tf_flush_l1d = true; in vcpu_run()
10967 vcpu->arch.at_instruction_boundary = false; in vcpu_run()
10987 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; in vcpu_run()
10988 ++vcpu->stat.request_irq_exits; in vcpu_run()
11011 BUG_ON(!vcpu->arch.pio.count); in complete_emulated_pio()
11036 struct kvm_run *run = vcpu->run; in complete_emulated_mmio()
11040 BUG_ON(!vcpu->mmio_needed); in complete_emulated_mmio()
11043 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; in complete_emulated_mmio()
11044 len = min(8u, frag->len); in complete_emulated_mmio()
11045 if (!vcpu->mmio_is_write) in complete_emulated_mmio()
11046 memcpy(frag->data, run->mmio.data, len); in complete_emulated_mmio()
11048 if (frag->len <= 8) { in complete_emulated_mmio()
11051 vcpu->mmio_cur_fragment++; in complete_emulated_mmio()
11054 frag->data += len; in complete_emulated_mmio()
11055 frag->gpa += len; in complete_emulated_mmio()
11056 frag->len -= len; in complete_emulated_mmio()
11059 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { in complete_emulated_mmio()
11060 vcpu->mmio_needed = 0; in complete_emulated_mmio()
11062 /* FIXME: return into emulator if single-stepping. */ in complete_emulated_mmio()
11063 if (vcpu->mmio_is_write) in complete_emulated_mmio()
11065 vcpu->mmio_read_completed = 1; in complete_emulated_mmio()
11069 run->exit_reason = KVM_EXIT_MMIO; in complete_emulated_mmio()
11070 run->mmio.phys_addr = frag->gpa; in complete_emulated_mmio()
11071 if (vcpu->mmio_is_write) in complete_emulated_mmio()
11072 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); in complete_emulated_mmio()
11073 run->mmio.len = min(8u, frag->len); in complete_emulated_mmio()
11074 run->mmio.is_write = vcpu->mmio_is_write; in complete_emulated_mmio()
11075 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in complete_emulated_mmio()
11082 /* Exclude PKRU, it's restored separately immediately after VM-Exit. */ in kvm_load_guest_fpu()
11083 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, true); in kvm_load_guest_fpu()
11090 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, false); in kvm_put_guest_fpu()
11091 ++vcpu->stat.fpu_reload; in kvm_put_guest_fpu()
11097 struct kvm_queued_exception *ex = &vcpu->arch.exception; in kvm_arch_vcpu_ioctl_run()
11098 struct kvm_run *kvm_run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
11103 kvm_run->flags = 0; in kvm_arch_vcpu_ioctl_run()
11107 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { in kvm_arch_vcpu_ioctl_run()
11108 if (kvm_run->immediate_exit) { in kvm_arch_vcpu_ioctl_run()
11109 r = -EINTR; in kvm_arch_vcpu_ioctl_run()
11126 r = -EAGAIN; in kvm_arch_vcpu_ioctl_run()
11128 r = -EINTR; in kvm_arch_vcpu_ioctl_run()
11129 kvm_run->exit_reason = KVM_EXIT_INTR; in kvm_arch_vcpu_ioctl_run()
11130 ++vcpu->stat.signal_exits; in kvm_arch_vcpu_ioctl_run()
11135 if ((kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) || in kvm_arch_vcpu_ioctl_run()
11136 (kvm_run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)) { in kvm_arch_vcpu_ioctl_run()
11137 r = -EINVAL; in kvm_arch_vcpu_ioctl_run()
11141 if (kvm_run->kvm_dirty_regs) { in kvm_arch_vcpu_ioctl_run()
11147 /* re-sync apic's tpr */ in kvm_arch_vcpu_ioctl_run()
11149 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { in kvm_arch_vcpu_ioctl_run()
11150 r = -EINVAL; in kvm_arch_vcpu_ioctl_run()
11157 * a pending VM-Exit if L1 wants to intercept the exception. in kvm_arch_vcpu_ioctl_run()
11159 if (vcpu->arch.exception_from_userspace && is_guest_mode(vcpu) && in kvm_arch_vcpu_ioctl_run()
11160 kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, ex->vector, in kvm_arch_vcpu_ioctl_run()
11161 ex->error_code)) { in kvm_arch_vcpu_ioctl_run()
11162 kvm_queue_exception_vmexit(vcpu, ex->vector, in kvm_arch_vcpu_ioctl_run()
11163 ex->has_error_code, ex->error_code, in kvm_arch_vcpu_ioctl_run()
11164 ex->has_payload, ex->payload); in kvm_arch_vcpu_ioctl_run()
11165 ex->injected = false; in kvm_arch_vcpu_ioctl_run()
11166 ex->pending = false; in kvm_arch_vcpu_ioctl_run()
11168 vcpu->arch.exception_from_userspace = false; in kvm_arch_vcpu_ioctl_run()
11170 if (unlikely(vcpu->arch.complete_userspace_io)) { in kvm_arch_vcpu_ioctl_run()
11171 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; in kvm_arch_vcpu_ioctl_run()
11172 vcpu->arch.complete_userspace_io = NULL; in kvm_arch_vcpu_ioctl_run()
11177 WARN_ON_ONCE(vcpu->arch.pio.count); in kvm_arch_vcpu_ioctl_run()
11178 WARN_ON_ONCE(vcpu->mmio_needed); in kvm_arch_vcpu_ioctl_run()
11181 if (kvm_run->immediate_exit) { in kvm_arch_vcpu_ioctl_run()
11182 r = -EINTR; in kvm_arch_vcpu_ioctl_run()
11194 if (kvm_run->kvm_valid_regs) in kvm_arch_vcpu_ioctl_run()
11206 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { in __get_regs()
11214 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt); in __get_regs()
11215 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __get_regs()
11217 regs->rax = kvm_rax_read(vcpu); in __get_regs()
11218 regs->rbx = kvm_rbx_read(vcpu); in __get_regs()
11219 regs->rcx = kvm_rcx_read(vcpu); in __get_regs()
11220 regs->rdx = kvm_rdx_read(vcpu); in __get_regs()
11221 regs->rsi = kvm_rsi_read(vcpu); in __get_regs()
11222 regs->rdi = kvm_rdi_read(vcpu); in __get_regs()
11223 regs->rsp = kvm_rsp_read(vcpu); in __get_regs()
11224 regs->rbp = kvm_rbp_read(vcpu); in __get_regs()
11226 regs->r8 = kvm_r8_read(vcpu); in __get_regs()
11227 regs->r9 = kvm_r9_read(vcpu); in __get_regs()
11228 regs->r10 = kvm_r10_read(vcpu); in __get_regs()
11229 regs->r11 = kvm_r11_read(vcpu); in __get_regs()
11230 regs->r12 = kvm_r12_read(vcpu); in __get_regs()
11231 regs->r13 = kvm_r13_read(vcpu); in __get_regs()
11232 regs->r14 = kvm_r14_read(vcpu); in __get_regs()
11233 regs->r15 = kvm_r15_read(vcpu); in __get_regs()
11236 regs->rip = kvm_rip_read(vcpu); in __get_regs()
11237 regs->rflags = kvm_get_rflags(vcpu); in __get_regs()
11250 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; in __set_regs()
11251 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __set_regs()
11253 kvm_rax_write(vcpu, regs->rax); in __set_regs()
11254 kvm_rbx_write(vcpu, regs->rbx); in __set_regs()
11255 kvm_rcx_write(vcpu, regs->rcx); in __set_regs()
11256 kvm_rdx_write(vcpu, regs->rdx); in __set_regs()
11257 kvm_rsi_write(vcpu, regs->rsi); in __set_regs()
11258 kvm_rdi_write(vcpu, regs->rdi); in __set_regs()
11259 kvm_rsp_write(vcpu, regs->rsp); in __set_regs()
11260 kvm_rbp_write(vcpu, regs->rbp); in __set_regs()
11262 kvm_r8_write(vcpu, regs->r8); in __set_regs()
11263 kvm_r9_write(vcpu, regs->r9); in __set_regs()
11264 kvm_r10_write(vcpu, regs->r10); in __set_regs()
11265 kvm_r11_write(vcpu, regs->r11); in __set_regs()
11266 kvm_r12_write(vcpu, regs->r12); in __set_regs()
11267 kvm_r13_write(vcpu, regs->r13); in __set_regs()
11268 kvm_r14_write(vcpu, regs->r14); in __set_regs()
11269 kvm_r15_write(vcpu, regs->r15); in __set_regs()
11272 kvm_rip_write(vcpu, regs->rip); in __set_regs()
11273 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); in __set_regs()
11275 vcpu->arch.exception.pending = false; in __set_regs()
11276 vcpu->arch.exception_vmexit.pending = false; in __set_regs()
11293 if (vcpu->arch.guest_state_protected) in __get_sregs_common()
11296 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __get_sregs_common()
11297 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __get_sregs_common()
11298 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __get_sregs_common()
11299 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __get_sregs_common()
11300 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __get_sregs_common()
11301 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __get_sregs_common()
11303 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __get_sregs_common()
11304 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __get_sregs_common()
11307 sregs->idt.limit = dt.size; in __get_sregs_common()
11308 sregs->idt.base = dt.address; in __get_sregs_common()
11310 sregs->gdt.limit = dt.size; in __get_sregs_common()
11311 sregs->gdt.base = dt.address; in __get_sregs_common()
11313 sregs->cr2 = vcpu->arch.cr2; in __get_sregs_common()
11314 sregs->cr3 = kvm_read_cr3(vcpu); in __get_sregs_common()
11317 sregs->cr0 = kvm_read_cr0(vcpu); in __get_sregs_common()
11318 sregs->cr4 = kvm_read_cr4(vcpu); in __get_sregs_common()
11319 sregs->cr8 = kvm_get_cr8(vcpu); in __get_sregs_common()
11320 sregs->efer = vcpu->arch.efer; in __get_sregs_common()
11321 sregs->apic_base = kvm_get_apic_base(vcpu); in __get_sregs_common()
11328 if (vcpu->arch.guest_state_protected) in __get_sregs()
11331 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) in __get_sregs()
11332 set_bit(vcpu->arch.interrupt.nr, in __get_sregs()
11333 (unsigned long *)sregs->interrupt_bitmap); in __get_sregs()
11342 if (vcpu->arch.guest_state_protected) in __get_sregs2()
11347 sregs2->pdptrs[i] = kvm_pdptr_read(vcpu, i); in __get_sregs2()
11348 sregs2->flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID; in __get_sregs2()
11375 if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED || in kvm_arch_vcpu_ioctl_get_mpstate()
11376 vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) && in kvm_arch_vcpu_ioctl_get_mpstate()
11377 vcpu->arch.pv.pv_unhalted) in kvm_arch_vcpu_ioctl_get_mpstate()
11378 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_ioctl_get_mpstate()
11380 mp_state->mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
11392 int ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_mpstate()
11396 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
11419 if ((!kvm_apic_init_sipi_allowed(vcpu) || vcpu->arch.smi_pending) && in kvm_arch_vcpu_ioctl_set_mpstate()
11420 (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED || in kvm_arch_vcpu_ioctl_set_mpstate()
11421 mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED)) in kvm_arch_vcpu_ioctl_set_mpstate()
11424 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { in kvm_arch_vcpu_ioctl_set_mpstate()
11425 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; in kvm_arch_vcpu_ioctl_set_mpstate()
11426 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); in kvm_arch_vcpu_ioctl_set_mpstate()
11428 vcpu->arch.mp_state = mp_state->mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
11440 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_task_switch()
11448 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_task_switch()
11449 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in kvm_task_switch()
11450 vcpu->run->internal.ndata = 0; in kvm_task_switch()
11454 kvm_rip_write(vcpu, ctxt->eip); in kvm_task_switch()
11455 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_task_switch()
11462 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) { in kvm_is_valid_sregs()
11465 * 64-bit mode (though maybe in a 32-bit code segment). in kvm_is_valid_sregs()
11468 if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA)) in kvm_is_valid_sregs()
11470 if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3)) in kvm_is_valid_sregs()
11474 * Not in 64-bit mode: EFER.LMA is clear and the code in kvm_is_valid_sregs()
11475 * segment cannot be 64-bit. in kvm_is_valid_sregs()
11477 if (sregs->efer & EFER_LMA || sregs->cs.l) in kvm_is_valid_sregs()
11481 return kvm_is_valid_cr4(vcpu, sregs->cr4); in kvm_is_valid_sregs()
11492 return -EINVAL; in __set_sregs_common()
11494 apic_base_msr.data = sregs->apic_base; in __set_sregs_common()
11497 return -EINVAL; in __set_sregs_common()
11499 if (vcpu->arch.guest_state_protected) in __set_sregs_common()
11502 dt.size = sregs->idt.limit; in __set_sregs_common()
11503 dt.address = sregs->idt.base; in __set_sregs_common()
11505 dt.size = sregs->gdt.limit; in __set_sregs_common()
11506 dt.address = sregs->gdt.base; in __set_sregs_common()
11509 vcpu->arch.cr2 = sregs->cr2; in __set_sregs_common()
11510 *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; in __set_sregs_common()
11511 vcpu->arch.cr3 = sregs->cr3; in __set_sregs_common()
11513 static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); in __set_sregs_common()
11515 kvm_set_cr8(vcpu, sregs->cr8); in __set_sregs_common()
11517 *mmu_reset_needed |= vcpu->arch.efer != sregs->efer; in __set_sregs_common()
11518 static_call(kvm_x86_set_efer)(vcpu, sregs->efer); in __set_sregs_common()
11520 *mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; in __set_sregs_common()
11521 static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0); in __set_sregs_common()
11522 vcpu->arch.cr0 = sregs->cr0; in __set_sregs_common()
11524 *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; in __set_sregs_common()
11525 static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4); in __set_sregs_common()
11528 idx = srcu_read_lock(&vcpu->kvm->srcu); in __set_sregs_common()
11533 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __set_sregs_common()
11536 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __set_sregs_common()
11537 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __set_sregs_common()
11538 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __set_sregs_common()
11539 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __set_sregs_common()
11540 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __set_sregs_common()
11541 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __set_sregs_common()
11543 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __set_sregs_common()
11544 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __set_sregs_common()
11550 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && in __set_sregs_common()
11552 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in __set_sregs_common()
11571 (const unsigned long *)sregs->interrupt_bitmap, max_bits); in __set_sregs()
11584 bool valid_pdptrs = sregs2->flags & KVM_SREGS2_FLAGS_PDPTRS_VALID; in __set_sregs2()
11585 bool pae = (sregs2->cr0 & X86_CR0_PG) && (sregs2->cr4 & X86_CR4_PAE) && in __set_sregs2()
11586 !(sregs2->efer & EFER_LMA); in __set_sregs2()
11589 if (sregs2->flags & ~KVM_SREGS2_FLAGS_PDPTRS_VALID) in __set_sregs2()
11590 return -EINVAL; in __set_sregs2()
11592 if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected)) in __set_sregs2()
11593 return -EINVAL; in __set_sregs2()
11602 kvm_pdptr_write(vcpu, i, sregs2->pdptrs[i]); in __set_sregs2()
11606 vcpu->arch.pdptrs_from_userspace = true; in __set_sregs2()
11633 down_write(&kvm->arch.apicv_update_lock); in kvm_arch_vcpu_guestdbg_update_apicv_inhibit()
11636 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) { in kvm_arch_vcpu_guestdbg_update_apicv_inhibit()
11642 up_write(&kvm->arch.apicv_update_lock); in kvm_arch_vcpu_guestdbg_update_apicv_inhibit()
11651 if (vcpu->arch.guest_state_protected) in kvm_arch_vcpu_ioctl_set_guest_debug()
11652 return -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
11656 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { in kvm_arch_vcpu_ioctl_set_guest_debug()
11657 r = -EBUSY; in kvm_arch_vcpu_ioctl_set_guest_debug()
11660 if (dbg->control & KVM_GUESTDBG_INJECT_DB) in kvm_arch_vcpu_ioctl_set_guest_debug()
11672 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
11673 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) in kvm_arch_vcpu_ioctl_set_guest_debug()
11674 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
11676 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { in kvm_arch_vcpu_ioctl_set_guest_debug()
11678 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
11679 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; in kvm_arch_vcpu_ioctl_set_guest_debug()
11682 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
11686 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_arch_vcpu_ioctl_set_guest_debug()
11687 vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
11697 kvm_arch_vcpu_guestdbg_update_apicv_inhibit(vcpu->kvm); in kvm_arch_vcpu_ioctl_set_guest_debug()
11712 unsigned long vaddr = tr->linear_address; in kvm_arch_vcpu_ioctl_translate()
11718 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_translate()
11720 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl_translate()
11721 tr->physical_address = gpa; in kvm_arch_vcpu_ioctl_translate()
11722 tr->valid = gpa != INVALID_GPA; in kvm_arch_vcpu_ioctl_translate()
11723 tr->writeable = 1; in kvm_arch_vcpu_ioctl_translate()
11724 tr->usermode = 0; in kvm_arch_vcpu_ioctl_translate()
11734 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_arch_vcpu_ioctl_get_fpu()
11739 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; in kvm_arch_vcpu_ioctl_get_fpu()
11740 memcpy(fpu->fpr, fxsave->st_space, 128); in kvm_arch_vcpu_ioctl_get_fpu()
11741 fpu->fcw = fxsave->cwd; in kvm_arch_vcpu_ioctl_get_fpu()
11742 fpu->fsw = fxsave->swd; in kvm_arch_vcpu_ioctl_get_fpu()
11743 fpu->ftwx = fxsave->twd; in kvm_arch_vcpu_ioctl_get_fpu()
11744 fpu->last_opcode = fxsave->fop; in kvm_arch_vcpu_ioctl_get_fpu()
11745 fpu->last_ip = fxsave->rip; in kvm_arch_vcpu_ioctl_get_fpu()
11746 fpu->last_dp = fxsave->rdp; in kvm_arch_vcpu_ioctl_get_fpu()
11747 memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space)); in kvm_arch_vcpu_ioctl_get_fpu()
11757 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_arch_vcpu_ioctl_set_fpu()
11762 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; in kvm_arch_vcpu_ioctl_set_fpu()
11764 memcpy(fxsave->st_space, fpu->fpr, 128); in kvm_arch_vcpu_ioctl_set_fpu()
11765 fxsave->cwd = fpu->fcw; in kvm_arch_vcpu_ioctl_set_fpu()
11766 fxsave->swd = fpu->fsw; in kvm_arch_vcpu_ioctl_set_fpu()
11767 fxsave->twd = fpu->ftwx; in kvm_arch_vcpu_ioctl_set_fpu()
11768 fxsave->fop = fpu->last_opcode; in kvm_arch_vcpu_ioctl_set_fpu()
11769 fxsave->rip = fpu->last_ip; in kvm_arch_vcpu_ioctl_set_fpu()
11770 fxsave->rdp = fpu->last_dp; in kvm_arch_vcpu_ioctl_set_fpu()
11771 memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space)); in kvm_arch_vcpu_ioctl_set_fpu()
11781 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS) in store_regs()
11782 __get_regs(vcpu, &vcpu->run->s.regs.regs); in store_regs()
11784 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS) in store_regs()
11785 __get_sregs(vcpu, &vcpu->run->s.regs.sregs); in store_regs()
11787 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS) in store_regs()
11789 vcpu, &vcpu->run->s.regs.events); in store_regs()
11794 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) { in sync_regs()
11795 __set_regs(vcpu, &vcpu->run->s.regs.regs); in sync_regs()
11796 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS; in sync_regs()
11798 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) { in sync_regs()
11799 if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs)) in sync_regs()
11800 return -EINVAL; in sync_regs()
11801 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS; in sync_regs()
11803 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) { in sync_regs()
11805 vcpu, &vcpu->run->s.regs.events)) in sync_regs()
11806 return -EINVAL; in sync_regs()
11807 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS; in sync_regs()
11815 if (kvm_check_tsc_unstable() && kvm->created_vcpus) in kvm_arch_vcpu_precreate()
11819 if (!kvm->arch.max_vcpu_ids) in kvm_arch_vcpu_precreate()
11820 kvm->arch.max_vcpu_ids = KVM_MAX_VCPU_IDS; in kvm_arch_vcpu_precreate()
11822 if (id >= kvm->arch.max_vcpu_ids) in kvm_arch_vcpu_precreate()
11823 return -EINVAL; in kvm_arch_vcpu_precreate()
11833 vcpu->arch.last_vmentry_cpu = -1; in kvm_arch_vcpu_create()
11834 vcpu->arch.regs_avail = ~0; in kvm_arch_vcpu_create()
11835 vcpu->arch.regs_dirty = ~0; in kvm_arch_vcpu_create()
11837 kvm_gpc_init(&vcpu->arch.pv_time); in kvm_arch_vcpu_create()
11839 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) in kvm_arch_vcpu_create()
11840 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_create()
11842 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; in kvm_arch_vcpu_create()
11848 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arch_vcpu_create()
11859 * Ignore the current per-VM APICv state so that vCPU creation in kvm_arch_vcpu_create()
11861 * will ensure the vCPU gets the correct state before VM-Entry. in kvm_arch_vcpu_create()
11864 vcpu->arch.apic->apicv_active = true; in kvm_arch_vcpu_create()
11870 r = -ENOMEM; in kvm_arch_vcpu_create()
11875 vcpu->arch.pio_data = page_address(page); in kvm_arch_vcpu_create()
11877 vcpu->arch.mce_banks = kcalloc(KVM_MAX_MCE_BANKS * 4, sizeof(u64), in kvm_arch_vcpu_create()
11879 vcpu->arch.mci_ctl2_banks = kcalloc(KVM_MAX_MCE_BANKS, sizeof(u64), in kvm_arch_vcpu_create()
11881 if (!vcpu->arch.mce_banks || !vcpu->arch.mci_ctl2_banks) in kvm_arch_vcpu_create()
11883 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; in kvm_arch_vcpu_create()
11885 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, in kvm_arch_vcpu_create()
11892 if (!fpu_alloc_guest_fpstate(&vcpu->arch.guest_fpu)) { in kvm_arch_vcpu_create()
11897 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); in kvm_arch_vcpu_create()
11898 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); in kvm_arch_vcpu_create()
11900 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; in kvm_arch_vcpu_create()
11905 vcpu->arch.pending_external_vector = -1; in kvm_arch_vcpu_create()
11906 vcpu->arch.preempted_in_kernel = false; in kvm_arch_vcpu_create()
11909 vcpu->arch.hv_root_tdp = INVALID_PAGE; in kvm_arch_vcpu_create()
11916 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); in kvm_arch_vcpu_create()
11917 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; in kvm_arch_vcpu_create()
11921 kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz); in kvm_arch_vcpu_create()
11928 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); in kvm_arch_vcpu_create()
11930 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_create()
11932 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_create()
11934 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_create()
11935 kfree(vcpu->arch.mci_ctl2_banks); in kvm_arch_vcpu_create()
11936 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_create()
11946 struct kvm *kvm = vcpu->kvm; in kvm_arch_vcpu_postcreate()
11948 if (mutex_lock_killable(&vcpu->mutex)) in kvm_arch_vcpu_postcreate()
11955 vcpu->arch.msr_kvm_poll_control = 1; in kvm_arch_vcpu_postcreate()
11957 mutex_unlock(&vcpu->mutex); in kvm_arch_vcpu_postcreate()
11959 if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0) in kvm_arch_vcpu_postcreate()
11960 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvm_arch_vcpu_postcreate()
11972 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_destroy()
11973 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_destroy()
11974 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); in kvm_arch_vcpu_destroy()
11979 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_destroy()
11980 kfree(vcpu->arch.mci_ctl2_banks); in kvm_arch_vcpu_destroy()
11982 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_destroy()
11984 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_destroy()
11985 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_destroy()
11986 kvfree(vcpu->arch.cpuid_entries); in kvm_arch_vcpu_destroy()
11998 * Several of the "set" flows, e.g. ->set_cr0(), read other registers in kvm_vcpu_reset()
12008 * SVM doesn't unconditionally VM-Exit on INIT and SHUTDOWN, thus it's in kvm_vcpu_reset()
12019 vcpu->arch.hflags = 0; in kvm_vcpu_reset()
12021 vcpu->arch.smi_pending = 0; in kvm_vcpu_reset()
12022 vcpu->arch.smi_count = 0; in kvm_vcpu_reset()
12023 atomic_set(&vcpu->arch.nmi_queued, 0); in kvm_vcpu_reset()
12024 vcpu->arch.nmi_pending = 0; in kvm_vcpu_reset()
12025 vcpu->arch.nmi_injected = false; in kvm_vcpu_reset()
12029 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); in kvm_vcpu_reset()
12031 vcpu->arch.dr6 = DR6_ACTIVE_LOW; in kvm_vcpu_reset()
12032 vcpu->arch.dr7 = DR7_FIXED_1; in kvm_vcpu_reset()
12035 vcpu->arch.cr2 = 0; in kvm_vcpu_reset()
12038 vcpu->arch.apf.msr_en_val = 0; in kvm_vcpu_reset()
12039 vcpu->arch.apf.msr_int_val = 0; in kvm_vcpu_reset()
12040 vcpu->arch.st.msr_val = 0; in kvm_vcpu_reset()
12046 vcpu->arch.apf.halted = false; in kvm_vcpu_reset()
12048 if (vcpu->arch.guest_fpu.fpstate && kvm_mpx_supported()) { in kvm_vcpu_reset()
12049 struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate; in kvm_vcpu_reset()
12067 vcpu->arch.smbase = 0x30000; in kvm_vcpu_reset()
12069 vcpu->arch.msr_misc_features_enables = 0; in kvm_vcpu_reset()
12070 vcpu->arch.ia32_misc_enable_msr = MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | in kvm_vcpu_reset()
12078 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); in kvm_vcpu_reset()
12089 kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600); in kvm_vcpu_reset()
12096 vcpu->arch.cr3 = 0; in kvm_vcpu_reset()
12118 * which PCIDs have to be flushed. However, CR0.WP and the paging-related in kvm_vcpu_reset()
12148 cs.base = vector << 12; in kvm_vcpu_deliver_sipi_vector()
12173 if (!stable && vcpu->cpu == smp_processor_id()) in kvm_arch_hardware_enable()
12175 if (stable && vcpu->arch.last_host_tsc > local_tsc) { in kvm_arch_hardware_enable()
12177 if (vcpu->arch.last_host_tsc > max_tsc) in kvm_arch_hardware_enable()
12178 max_tsc = vcpu->arch.last_host_tsc; in kvm_arch_hardware_enable()
12208 * N.B. - this code below runs only on platforms with reliable TSC, in kvm_arch_hardware_enable()
12222 u64 delta_cyc = max_tsc - local_tsc; in kvm_arch_hardware_enable()
12224 kvm->arch.backwards_tsc_observed = true; in kvm_arch_hardware_enable()
12226 vcpu->arch.tsc_offset_adjustment += delta_cyc; in kvm_arch_hardware_enable()
12227 vcpu->arch.last_host_tsc = local_tsc; in kvm_arch_hardware_enable()
12237 kvm->arch.last_tsc_nsec = 0; in kvm_arch_hardware_enable()
12238 kvm->arch.last_tsc_write = 0; in kvm_arch_hardware_enable()
12253 memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); in kvm_ops_update()
12263 #include <asm/kvm-x86-ops.h> in kvm_ops_update()
12266 kvm_pmu_ops_update(ops->pmu_ops); in kvm_ops_update()
12281 r = ops->hardware_setup(); in kvm_arch_hardware_setup()
12287 kvm_register_perf_callbacks(ops->handle_intel_pt_intr); in kvm_arch_hardware_setup()
12328 return -EIO; in kvm_arch_check_processor_compat()
12330 return ops->check_processor_compatibility(); in kvm_arch_check_processor_compat()
12335 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; in kvm_vcpu_is_reset_bsp()
12341 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; in kvm_vcpu_is_bsp()
12351 vcpu->arch.l1tf_flush_l1d = true; in kvm_arch_sched_in()
12352 if (pmu->version && unlikely(pmu->event_count)) { in kvm_arch_sched_in()
12353 pmu->need_cleanup = true; in kvm_arch_sched_in()
12361 kfree(to_kvm_hv(kvm)->hv_pa_pg); in kvm_arch_free_vm()
12372 return -EINVAL; in kvm_arch_init_vm()
12386 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); in kvm_arch_init_vm()
12387 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); in kvm_arch_init_vm()
12388 atomic_set(&kvm->arch.noncoherent_dma_count, 0); in kvm_arch_init_vm()
12391 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); in kvm_arch_init_vm()
12392 /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ in kvm_arch_init_vm()
12394 &kvm->arch.irq_sources_bitmap); in kvm_arch_init_vm()
12396 raw_spin_lock_init(&kvm->arch.tsc_write_lock); in kvm_arch_init_vm()
12397 mutex_init(&kvm->arch.apic_map_lock); in kvm_arch_init_vm()
12398 seqcount_raw_spinlock_init(&kvm->arch.pvclock_sc, &kvm->arch.tsc_write_lock); in kvm_arch_init_vm()
12399 kvm->arch.kvmclock_offset = -get_kvmclock_base_ns(); in kvm_arch_init_vm()
12401 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); in kvm_arch_init_vm()
12403 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); in kvm_arch_init_vm()
12405 kvm->arch.default_tsc_khz = max_tsc_khz ? : tsc_khz; in kvm_arch_init_vm()
12406 kvm->arch.guest_can_read_msr_platform_info = true; in kvm_arch_init_vm()
12407 kvm->arch.enable_pmu = enable_pmu; in kvm_arch_init_vm()
12410 spin_lock_init(&kvm->arch.hv_root_tdp_lock); in kvm_arch_init_vm()
12411 kvm->arch.hv_root_tdp = INVALID_PAGE; in kvm_arch_init_vm()
12414 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); in kvm_arch_init_vm()
12415 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); in kvm_arch_init_vm()
12456 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); in kvm_arch_sync_events()
12457 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); in kvm_arch_sync_events()
12474 * -errno: on error
12479 * GPA->HVA translation will not change. However, the HVA is a user
12491 /* Called with kvm->slots_lock held. */ in __x86_set_memory_region()
12493 return ERR_PTR_USR(-EINVAL); in __x86_set_memory_region()
12497 if (slot && slot->npages) in __x86_set_memory_region()
12498 return ERR_PTR_USR(-EEXIST); in __x86_set_memory_region()
12509 if (!slot || !slot->npages) in __x86_set_memory_region()
12512 old_npages = slot->npages; in __x86_set_memory_region()
12513 hva = slot->userspace_addr; in __x86_set_memory_region()
12543 if (current->mm == kvm->mm) { in kvm_arch_destroy_vm()
12549 mutex_lock(&kvm->slots_lock); in kvm_arch_destroy_vm()
12555 mutex_unlock(&kvm->slots_lock); in kvm_arch_destroy_vm()
12559 kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1)); in kvm_arch_destroy_vm()
12563 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); in kvm_arch_destroy_vm()
12564 kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1)); in kvm_arch_destroy_vm()
12576 kvfree(slot->arch.rmap[i]); in memslot_rmap_free()
12577 slot->arch.rmap[i] = NULL; in memslot_rmap_free()
12588 kvfree(slot->arch.lpage_info[i - 1]); in kvm_arch_free_memslot()
12589 slot->arch.lpage_info[i - 1] = NULL; in kvm_arch_free_memslot()
12597 const int sz = sizeof(*slot->arch.rmap[0]); in memslot_rmap_alloc()
12604 if (slot->arch.rmap[i]) in memslot_rmap_alloc()
12607 slot->arch.rmap[i] = __vcalloc(lpages, sz, GFP_KERNEL_ACCOUNT); in memslot_rmap_alloc()
12608 if (!slot->arch.rmap[i]) { in memslot_rmap_alloc()
12610 return -ENOMEM; in memslot_rmap_alloc()
12620 unsigned long npages = slot->npages; in kvm_alloc_memslot_metadata()
12628 memset(&slot->arch, 0, sizeof(slot->arch)); in kvm_alloc_memslot_metadata()
12648 slot->arch.lpage_info[i - 1] = linfo; in kvm_alloc_memslot_metadata()
12650 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) in kvm_alloc_memslot_metadata()
12652 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) in kvm_alloc_memslot_metadata()
12653 linfo[lpages - 1].disallow_lpage = 1; in kvm_alloc_memslot_metadata()
12654 ugfn = slot->userspace_addr >> PAGE_SHIFT; in kvm_alloc_memslot_metadata()
12659 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) { in kvm_alloc_memslot_metadata()
12676 kvfree(slot->arch.lpage_info[i - 1]); in kvm_alloc_memslot_metadata()
12677 slot->arch.lpage_info[i - 1] = NULL; in kvm_alloc_memslot_metadata()
12679 return -ENOMEM; in kvm_alloc_memslot_metadata()
12688 * memslots->generation has been incremented. in kvm_arch_memslots_updated()
12693 /* Force re-initialization of steal_time cache */ in kvm_arch_memslots_updated()
12704 if ((new->base_gfn + new->npages - 1) > kvm_mmu_max_gfn()) in kvm_arch_prepare_memory_region()
12705 return -EINVAL; in kvm_arch_prepare_memory_region()
12711 memcpy(&new->arch, &old->arch, sizeof(old->arch)); in kvm_arch_prepare_memory_region()
12713 return -EIO; in kvm_arch_prepare_memory_region()
12721 struct kvm_arch *ka = &kvm->arch; in kvm_mmu_update_cpu_dirty_logging()
12726 if ((enable && ++ka->cpu_dirty_logging_count == 1) || in kvm_mmu_update_cpu_dirty_logging()
12727 (!enable && --ka->cpu_dirty_logging_count == 0)) in kvm_mmu_update_cpu_dirty_logging()
12730 WARN_ON_ONCE(ka->cpu_dirty_logging_count < 0); in kvm_mmu_update_cpu_dirty_logging()
12738 u32 old_flags = old ? old->flags : 0; in kvm_mmu_slot_apply_flags()
12739 u32 new_flags = new ? new->flags : 0; in kvm_mmu_slot_apply_flags()
12759 * CREATE: No shadow pages exist, thus nothing to write-protect in kvm_mmu_slot_apply_flags()
12768 * READONLY and non-flags changes were filtered out above, and the only in kvm_mmu_slot_apply_flags()
12786 * which can be collapsed into a single large-page spte. Later in kvm_mmu_slot_apply_flags()
12787 * page faults will create the large-page sptes. in kvm_mmu_slot_apply_flags()
12792 * Initially-all-set does not require write protecting any page, in kvm_mmu_slot_apply_flags()
12817 * write-protected before returning to userspace, i.e. before in kvm_mmu_slot_apply_flags()
12824 * Specifically, KVM also write-protects guest page tables to in kvm_mmu_slot_apply_flags()
12833 * To handle these scenarios, KVM uses a separate software-only in kvm_mmu_slot_apply_flags()
12834 * bit (MMU-writable) to track if a SPTE is !writable due to in kvm_mmu_slot_apply_flags()
12835 * a guest page table being write-protected (KVM clears the in kvm_mmu_slot_apply_flags()
12836 * MMU-writable flag when write-protecting for shadow paging). in kvm_mmu_slot_apply_flags()
12838 * The use of MMU-writable is also the primary motivation for in kvm_mmu_slot_apply_flags()
12841 * !MMU-writable SPTE, KVM must flush if it encounters any in kvm_mmu_slot_apply_flags()
12842 * MMU-writable SPTE regardless of whether the actual hardware in kvm_mmu_slot_apply_flags()
12845 * write access" helpers to ignore MMU-writable entirely. in kvm_mmu_slot_apply_flags()
12848 * access-tracked SPTEs is particularly relevant). in kvm_mmu_slot_apply_flags()
12859 if (!kvm->arch.n_requested_mmu_pages && in kvm_arch_commit_memory_region()
12863 nr_mmu_pages = kvm->nr_memslot_pages / KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO; in kvm_arch_commit_memory_region()
12894 if (!list_empty_careful(&vcpu->async_pf.done)) in kvm_vcpu_has_events()
12901 if (vcpu->arch.pv.pv_unhalted) in kvm_vcpu_has_events()
12908 (vcpu->arch.nmi_pending && in kvm_vcpu_has_events()
12913 (vcpu->arch.smi_pending && in kvm_vcpu_has_events()
12926 kvm_x86_ops.nested_ops->has_events && in kvm_vcpu_has_events()
12927 kvm_x86_ops.nested_ops->has_events(vcpu)) in kvm_vcpu_has_events()
12952 if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) in kvm_arch_dy_runnable()
12965 if (vcpu->arch.guest_state_protected) in kvm_arch_vcpu_in_kernel()
12968 return vcpu->arch.preempted_in_kernel; in kvm_arch_vcpu_in_kernel()
12989 if (vcpu->arch.guest_state_protected) in kvm_get_linear_rip()
13010 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_get_rflags()
13018 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && in __kvm_set_rflags()
13019 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) in __kvm_set_rflags()
13040 return (key + 1) & (ASYNC_PF_PER_VCPU - 1); in kvm_async_pf_next_probe()
13047 while (vcpu->arch.apf.gfns[key] != ~0) in kvm_add_async_pf_gfn()
13050 vcpu->arch.apf.gfns[key] = gfn; in kvm_add_async_pf_gfn()
13059 (vcpu->arch.apf.gfns[key] != gfn && in kvm_async_pf_gfn_slot()
13060 vcpu->arch.apf.gfns[key] != ~0); i++) in kvm_async_pf_gfn_slot()
13068 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; in kvm_find_async_pf_gfn()
13077 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn)) in kvm_del_async_pf_gfn()
13081 vcpu->arch.apf.gfns[i] = ~0; in kvm_del_async_pf_gfn()
13084 if (vcpu->arch.apf.gfns[j] == ~0) in kvm_del_async_pf_gfn()
13086 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); in kvm_del_async_pf_gfn()
13093 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; in kvm_del_async_pf_gfn()
13102 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason, in apf_put_user_notpresent()
13110 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_put_user_ready()
13119 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_pageready_slot_free()
13132 if (vcpu->arch.apf.send_user_only && in kvm_can_deliver_async_pf()
13141 return vcpu->arch.apf.delivery_as_pf_vmexit; in kvm_can_deliver_async_pf()
13159 if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu)) in kvm_can_do_async_pf()
13174 trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa); in kvm_arch_async_page_not_present()
13175 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_not_present()
13183 fault.address = work->arch.token; in kvm_arch_async_page_not_present()
13206 .vector = vcpu->arch.apf.vec in kvm_arch_async_page_present()
13209 if (work->wakeup_all) in kvm_arch_async_page_present()
13210 work->arch.token = ~0; /* broadcast wakeup */ in kvm_arch_async_page_present()
13212 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_present()
13213 trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa); in kvm_arch_async_page_present()
13215 if ((work->wakeup_all || work->notpresent_injected) && in kvm_arch_async_page_present()
13217 !apf_put_user_ready(vcpu, work->arch.token)) { in kvm_arch_async_page_present()
13218 vcpu->arch.apf.pageready_pending = true; in kvm_arch_async_page_present()
13222 vcpu->arch.apf.halted = false; in kvm_arch_async_page_present()
13223 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_async_page_present()
13229 if (!vcpu->arch.apf.pageready_pending) in kvm_arch_async_page_present_queued()
13243 if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1) in kvm_arch_start_assignment()
13250 atomic_dec(&kvm->arch.assigned_device_count); in kvm_arch_end_assignment()
13256 return arch_atomic_read(&kvm->arch.assigned_device_count); in kvm_arch_has_assigned_device()
13262 atomic_inc(&kvm->arch.noncoherent_dma_count); in kvm_arch_register_noncoherent_dma()
13268 atomic_dec(&kvm->arch.noncoherent_dma_count); in kvm_arch_unregister_noncoherent_dma()
13274 return atomic_read(&kvm->arch.noncoherent_dma_count); in kvm_arch_has_noncoherent_dma()
13290 irqfd->producer = prod; in kvm_arch_irq_bypass_add_producer()
13291 kvm_arch_start_assignment(irqfd->kvm); in kvm_arch_irq_bypass_add_producer()
13292 ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, in kvm_arch_irq_bypass_add_producer()
13293 prod->irq, irqfd->gsi, 1); in kvm_arch_irq_bypass_add_producer()
13296 kvm_arch_end_assignment(irqfd->kvm); in kvm_arch_irq_bypass_add_producer()
13308 WARN_ON(irqfd->producer != prod); in kvm_arch_irq_bypass_del_producer()
13309 irqfd->producer = NULL; in kvm_arch_irq_bypass_del_producer()
13313 * remapped mode, so we can re-use the current implementation in kvm_arch_irq_bypass_del_producer()
13317 ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 0); in kvm_arch_irq_bypass_del_producer()
13320 " fails: %d\n", irqfd->consumer.token, ret); in kvm_arch_irq_bypass_del_producer()
13322 kvm_arch_end_assignment(irqfd->kvm); in kvm_arch_irq_bypass_del_producer()
13334 if (new->type != KVM_IRQ_ROUTING_MSI) in kvm_arch_irqfd_route_changed()
13337 return !!memcmp(&old->msi, &new->msi, sizeof(new->msi)); in kvm_arch_irqfd_route_changed()
13347 return (vcpu->arch.msr_kvm_poll_control & 1) == 0; in kvm_arch_no_poll()
13380 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_fixup_and_inject_pf_error()
13386 mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != INVALID_GPA) { in kvm_fixup_and_inject_pf_error()
13388 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page in kvm_fixup_and_inject_pf_error()
13399 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault); in kvm_fixup_and_inject_pf_error()
13420 * doesn't seem to be a real use-case behind such requests, just return in kvm_handle_memory_failure()
13472 * page tables, so a non-global flush just degenerates to a in kvm_handle_invpcid()
13491 struct kvm_run *run = vcpu->run; in complete_sev_es_emulated_mmio()
13495 BUG_ON(!vcpu->mmio_needed); in complete_sev_es_emulated_mmio()
13498 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; in complete_sev_es_emulated_mmio()
13499 len = min(8u, frag->len); in complete_sev_es_emulated_mmio()
13500 if (!vcpu->mmio_is_write) in complete_sev_es_emulated_mmio()
13501 memcpy(frag->data, run->mmio.data, len); in complete_sev_es_emulated_mmio()
13503 if (frag->len <= 8) { in complete_sev_es_emulated_mmio()
13506 vcpu->mmio_cur_fragment++; in complete_sev_es_emulated_mmio()
13509 frag->data += len; in complete_sev_es_emulated_mmio()
13510 frag->gpa += len; in complete_sev_es_emulated_mmio()
13511 frag->len -= len; in complete_sev_es_emulated_mmio()
13514 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { in complete_sev_es_emulated_mmio()
13515 vcpu->mmio_needed = 0; in complete_sev_es_emulated_mmio()
13523 run->mmio.phys_addr = frag->gpa; in complete_sev_es_emulated_mmio()
13524 run->mmio.len = min(8u, frag->len); in complete_sev_es_emulated_mmio()
13525 run->mmio.is_write = vcpu->mmio_is_write; in complete_sev_es_emulated_mmio()
13526 if (run->mmio.is_write) in complete_sev_es_emulated_mmio()
13527 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); in complete_sev_es_emulated_mmio()
13528 run->exit_reason = KVM_EXIT_MMIO; in complete_sev_es_emulated_mmio()
13530 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; in complete_sev_es_emulated_mmio()
13542 return -EINVAL; in kvm_sev_es_mmio_write()
13548 bytes -= handled; in kvm_sev_es_mmio_write()
13553 frag = vcpu->mmio_fragments; in kvm_sev_es_mmio_write()
13554 vcpu->mmio_nr_fragments = 1; in kvm_sev_es_mmio_write()
13555 frag->len = bytes; in kvm_sev_es_mmio_write()
13556 frag->gpa = gpa; in kvm_sev_es_mmio_write()
13557 frag->data = data; in kvm_sev_es_mmio_write()
13559 vcpu->mmio_needed = 1; in kvm_sev_es_mmio_write()
13560 vcpu->mmio_cur_fragment = 0; in kvm_sev_es_mmio_write()
13562 vcpu->run->mmio.phys_addr = gpa; in kvm_sev_es_mmio_write()
13563 vcpu->run->mmio.len = min(8u, frag->len); in kvm_sev_es_mmio_write()
13564 vcpu->run->mmio.is_write = 1; in kvm_sev_es_mmio_write()
13565 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); in kvm_sev_es_mmio_write()
13566 vcpu->run->exit_reason = KVM_EXIT_MMIO; in kvm_sev_es_mmio_write()
13568 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; in kvm_sev_es_mmio_write()
13581 return -EINVAL; in kvm_sev_es_mmio_read()
13587 bytes -= handled; in kvm_sev_es_mmio_read()
13592 frag = vcpu->mmio_fragments; in kvm_sev_es_mmio_read()
13593 vcpu->mmio_nr_fragments = 1; in kvm_sev_es_mmio_read()
13594 frag->len = bytes; in kvm_sev_es_mmio_read()
13595 frag->gpa = gpa; in kvm_sev_es_mmio_read()
13596 frag->data = data; in kvm_sev_es_mmio_read()
13598 vcpu->mmio_needed = 1; in kvm_sev_es_mmio_read()
13599 vcpu->mmio_cur_fragment = 0; in kvm_sev_es_mmio_read()
13601 vcpu->run->mmio.phys_addr = gpa; in kvm_sev_es_mmio_read()
13602 vcpu->run->mmio.len = min(8u, frag->len); in kvm_sev_es_mmio_read()
13603 vcpu->run->mmio.is_write = 0; in kvm_sev_es_mmio_read()
13604 vcpu->run->exit_reason = KVM_EXIT_MMIO; in kvm_sev_es_mmio_read()
13606 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; in kvm_sev_es_mmio_read()
13614 vcpu->arch.sev_pio_count -= count; in advance_sev_es_emulated_pio()
13615 vcpu->arch.sev_pio_data += count * size; in advance_sev_es_emulated_pio()
13623 int size = vcpu->arch.pio.size; in complete_sev_es_emulated_outs()
13624 int port = vcpu->arch.pio.port; in complete_sev_es_emulated_outs()
13626 vcpu->arch.pio.count = 0; in complete_sev_es_emulated_outs()
13627 if (vcpu->arch.sev_pio_count) in complete_sev_es_emulated_outs()
13637 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); in kvm_sev_es_outs()
13638 int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count); in kvm_sev_es_outs()
13646 if (!vcpu->arch.sev_pio_count) in kvm_sev_es_outs()
13650 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs; in kvm_sev_es_outs()
13659 unsigned count = vcpu->arch.pio.count; in complete_sev_es_emulated_ins()
13660 int size = vcpu->arch.pio.size; in complete_sev_es_emulated_ins()
13661 int port = vcpu->arch.pio.port; in complete_sev_es_emulated_ins()
13663 complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data); in complete_sev_es_emulated_ins()
13665 if (vcpu->arch.sev_pio_count) in complete_sev_es_emulated_ins()
13675 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); in kvm_sev_es_ins()
13676 if (!emulator_pio_in(vcpu, size, port, vcpu->arch.sev_pio_data, count)) in kvm_sev_es_ins()
13681 if (!vcpu->arch.sev_pio_count) in kvm_sev_es_ins()
13685 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins; in kvm_sev_es_ins()
13693 vcpu->arch.sev_pio_data = data; in kvm_sev_es_string_io()
13694 vcpu->arch.sev_pio_count = count; in kvm_sev_es_string_io()