Lines Matching +full:ecx +full:- +full:1000

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
16 * Ben-Ami Yassour <benami@il.ibm.com>
43 #include <linux/intel-iommu.h>
45 #include <linux/user-return-notifier.h>
59 #include <linux/entry-kvm.h>
88 ((struct kvm_vcpu *)(ctxt)->vcpu)
91 * - enable syscall per default because its emulated by KVM
92 * - enable LME and LMA per default on 64 bit KVM
139 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
144 * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables
145 * adaptive tuning starting from default advancment of 1000ns. '0' disables
146 * advancement entirely. Any other value is used as-is and disables adaptive
149 static int __read_mostly lapic_timer_advance_ns = -1;
162 int __read_mostly pi_inject_timer = -1;
286 size - useroffset, NULL); in kvm_alloc_emulator_cache()
295 vcpu->arch.apf.gfns[i] = ~0; in kvm_async_pf_hash_reset()
311 if (msrs->registered) { in kvm_on_user_return()
312 msrs->registered = false; in kvm_on_user_return()
317 values = &msrs->values[slot]; in kvm_on_user_return()
318 if (values->host != values->curr) { in kvm_on_user_return()
319 wrmsrl(user_return_msrs_global.msrs[slot], values->host); in kvm_on_user_return()
320 values->curr = values->host; in kvm_on_user_return()
343 msrs->values[i].host = value; in kvm_user_return_msr_cpu_online()
344 msrs->values[i].curr = value; in kvm_user_return_msr_cpu_online()
354 value = (value & mask) | (msrs->values[slot].host & ~mask); in kvm_set_user_return_msr()
355 if (value == msrs->values[slot].curr) in kvm_set_user_return_msr()
361 msrs->values[slot].curr = value; in kvm_set_user_return_msr()
362 if (!msrs->registered) { in kvm_set_user_return_msr()
363 msrs->urn.on_user_return = kvm_on_user_return; in kvm_set_user_return_msr()
364 user_return_notifier_register(&msrs->urn); in kvm_set_user_return_msr()
365 msrs->registered = true; in kvm_set_user_return_msr()
376 if (msrs->registered) in drop_user_return_notifiers()
377 kvm_on_user_return(&msrs->urn); in drop_user_return_notifiers()
382 return vcpu->arch.apic_base; in kvm_get_apic_base()
395 enum lapic_mode new_mode = kvm_apic_mode(msr_info->data); in kvm_set_apic_base()
399 if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID) in kvm_set_apic_base()
401 if (!msr_info->host_initiated) { in kvm_set_apic_base()
408 kvm_lapic_set_base(vcpu, msr_info->data); in kvm_set_apic_base()
409 kvm_recalculate_apic_map(vcpu->kvm); in kvm_set_apic_base()
469 unsigned nr = vcpu->arch.exception.nr; in kvm_deliver_exception_payload()
470 bool has_payload = vcpu->arch.exception.has_payload; in kvm_deliver_exception_payload()
471 unsigned long payload = vcpu->arch.exception.payload; in kvm_deliver_exception_payload()
479 * "Certain debug exceptions may clear bit 0-3. The in kvm_deliver_exception_payload()
483 vcpu->arch.dr6 &= ~DR_TRAP_BITS; in kvm_deliver_exception_payload()
487 vcpu->arch.dr6 |= DR6_RTM; in kvm_deliver_exception_payload()
488 vcpu->arch.dr6 |= payload; in kvm_deliver_exception_payload()
497 vcpu->arch.dr6 ^= payload & DR6_RTM; in kvm_deliver_exception_payload()
505 vcpu->arch.dr6 &= ~BIT(12); in kvm_deliver_exception_payload()
508 vcpu->arch.cr2 = payload; in kvm_deliver_exception_payload()
512 vcpu->arch.exception.has_payload = false; in kvm_deliver_exception_payload()
513 vcpu->arch.exception.payload = 0; in kvm_deliver_exception_payload()
526 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { in kvm_multiple_exception()
532 * On vmentry, vcpu->arch.exception.pending is only in kvm_multiple_exception()
539 WARN_ON_ONCE(vcpu->arch.exception.pending); in kvm_multiple_exception()
540 vcpu->arch.exception.injected = true; in kvm_multiple_exception()
550 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
551 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
553 vcpu->arch.exception.has_error_code = has_error; in kvm_multiple_exception()
554 vcpu->arch.exception.nr = nr; in kvm_multiple_exception()
555 vcpu->arch.exception.error_code = error_code; in kvm_multiple_exception()
556 vcpu->arch.exception.has_payload = has_payload; in kvm_multiple_exception()
557 vcpu->arch.exception.payload = payload; in kvm_multiple_exception()
564 prev_nr = vcpu->arch.exception.nr; in kvm_multiple_exception()
566 /* triple fault -> shutdown */ in kvm_multiple_exception()
575 * Generate double fault per SDM Table 5-5. Set in kvm_multiple_exception()
579 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
580 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
581 vcpu->arch.exception.has_error_code = true; in kvm_multiple_exception()
582 vcpu->arch.exception.nr = DF_VECTOR; in kvm_multiple_exception()
583 vcpu->arch.exception.error_code = 0; in kvm_multiple_exception()
584 vcpu->arch.exception.has_payload = false; in kvm_multiple_exception()
585 vcpu->arch.exception.payload = 0; in kvm_multiple_exception()
588 that instruction re-execution will regenerate lost in kvm_multiple_exception()
632 ++vcpu->stat.pf_guest; in kvm_inject_page_fault()
633 vcpu->arch.exception.nested_apf = in kvm_inject_page_fault()
634 is_guest_mode(vcpu) && fault->async_page_fault; in kvm_inject_page_fault()
635 if (vcpu->arch.exception.nested_apf) { in kvm_inject_page_fault()
636 vcpu->arch.apf.nested_apf_token = fault->address; in kvm_inject_page_fault()
637 kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); in kvm_inject_page_fault()
639 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code, in kvm_inject_page_fault()
640 fault->address); in kvm_inject_page_fault()
649 WARN_ON_ONCE(fault->vector != PF_VECTOR); in kvm_inject_emulated_page_fault()
651 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : in kvm_inject_emulated_page_fault()
652 vcpu->arch.walk_mmu; in kvm_inject_emulated_page_fault()
658 if ((fault->error_code & PFERR_PRESENT_MASK) && in kvm_inject_emulated_page_fault()
659 !(fault->error_code & PFERR_RSVD_MASK)) in kvm_inject_emulated_page_fault()
660 kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address, in kvm_inject_emulated_page_fault()
661 fault_mmu->root_hpa); in kvm_inject_emulated_page_fault()
663 fault_mmu->inject_page_fault(vcpu, fault); in kvm_inject_emulated_page_fault()
664 return fault->nested_page_fault; in kvm_inject_emulated_page_fault()
670 atomic_inc(&vcpu->arch.nmi_queued); in kvm_inject_nmi()
724 real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception); in kvm_read_guest_page_mmu()
726 return -EFAULT; in kvm_read_guest_page_mmu()
737 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, in kvm_read_nested_guest_page()
753 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; in load_pdptrs()
756 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; in load_pdptrs()
774 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); in load_pdptrs()
785 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; in pdptrs_changed()
797 offset = (kvm_read_cr3(vcpu) & 0xffffffe0ul) & (PAGE_SIZE - 1); in pdptrs_changed()
803 return memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; in pdptrs_changed()
829 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && in kvm_set_cr0()
840 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && in kvm_set_cr0()
842 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) in kvm_set_cr0()
859 kvm_arch_has_noncoherent_dma(vcpu->kvm) && in kvm_set_cr0()
860 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) in kvm_set_cr0()
861 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); in kvm_set_cr0()
877 if (vcpu->arch.xcr0 != host_xcr0) in kvm_load_guest_xsave_state()
878 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); in kvm_load_guest_xsave_state()
880 if (vcpu->arch.xsaves_enabled && in kvm_load_guest_xsave_state()
881 vcpu->arch.ia32_xss != host_xss) in kvm_load_guest_xsave_state()
882 wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); in kvm_load_guest_xsave_state()
887 (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU)) && in kvm_load_guest_xsave_state()
888 vcpu->arch.pkru != vcpu->arch.host_pkru) in kvm_load_guest_xsave_state()
889 __write_pkru(vcpu->arch.pkru); in kvm_load_guest_xsave_state()
897 (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) { in kvm_load_host_xsave_state()
898 vcpu->arch.pkru = rdpkru(); in kvm_load_host_xsave_state()
899 if (vcpu->arch.pkru != vcpu->arch.host_pkru) in kvm_load_host_xsave_state()
900 __write_pkru(vcpu->arch.host_pkru); in kvm_load_host_xsave_state()
905 if (vcpu->arch.xcr0 != host_xcr0) in kvm_load_host_xsave_state()
908 if (vcpu->arch.xsaves_enabled && in kvm_load_host_xsave_state()
909 vcpu->arch.ia32_xss != host_xss) in kvm_load_host_xsave_state()
919 u64 old_xcr0 = vcpu->arch.xcr0; in __kvm_set_xcr()
935 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; in __kvm_set_xcr()
949 vcpu->arch.xcr0 = xcr0; in __kvm_set_xcr()
970 return -EINVAL; in kvm_valid_cr4()
972 if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) in kvm_valid_cr4()
973 return -EINVAL; in kvm_valid_cr4()
996 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, in kvm_set_cr4()
1044 (cr3 & vcpu->arch.cr3_lm_rsvd_bits)) in kvm_set_cr3()
1047 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) in kvm_set_cr3()
1051 vcpu->arch.cr3 = cr3; in kvm_set_cr3()
1065 vcpu->arch.cr8 = cr8; in kvm_set_cr8()
1075 return vcpu->arch.cr8; in kvm_get_cr8()
1083 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { in kvm_update_dr0123()
1085 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_update_dr0123()
1086 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; in kvm_update_dr0123()
1094 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) in kvm_update_dr7()
1095 dr7 = vcpu->arch.guest_debug_dr7; in kvm_update_dr7()
1097 dr7 = vcpu->arch.dr7; in kvm_update_dr7()
1099 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1101 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1116 size_t size = ARRAY_SIZE(vcpu->arch.db); in __kvm_set_dr()
1120 vcpu->arch.db[array_index_nospec(dr, size)] = val; in __kvm_set_dr()
1121 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) in __kvm_set_dr()
1122 vcpu->arch.eff_db[dr] = val; in __kvm_set_dr()
1127 return -1; /* #GP */ in __kvm_set_dr()
1128 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); in __kvm_set_dr()
1133 return -1; /* #GP */ in __kvm_set_dr()
1134 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; in __kvm_set_dr()
1154 size_t size = ARRAY_SIZE(vcpu->arch.db); in kvm_get_dr()
1158 *val = vcpu->arch.db[array_index_nospec(dr, size)]; in kvm_get_dr()
1162 *val = vcpu->arch.dr6; in kvm_get_dr()
1166 *val = vcpu->arch.dr7; in kvm_get_dr()
1175 u32 ecx = kvm_rcx_read(vcpu); in kvm_rdpmc() local
1179 err = kvm_pmu_rdpmc(vcpu, ecx, &data); in kvm_rdpmc()
1196 * kvm-specific. Those are put in emulated_msrs_all; filtering of emulated_msrs
1312 * List of msr numbers which are used to expose MSR-based features that
1380 * - nothing to do if TSX is disabled on the host. in kvm_get_arch_capabilities()
1381 * - we emulate TSX_CTRL if present on the host. in kvm_get_arch_capabilities()
1394 switch (msr->index) { in kvm_get_msr_feature()
1396 msr->data = kvm_get_arch_capabilities(); in kvm_get_msr_feature()
1399 rdmsrl_safe(msr->index, &msr->data); in kvm_get_msr_feature()
1459 u64 old_efer = vcpu->arch.efer; in set_efer()
1460 u64 efer = msr_info->data; in set_efer()
1466 if (!msr_info->host_initiated) { in set_efer()
1471 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) in set_efer()
1476 efer |= vcpu->arch.efer & EFER_LMA; in set_efer()
1499 struct kvm *kvm = vcpu->kvm; in kvm_msr_allowed()
1500 struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges; in kvm_msr_allowed()
1501 u32 count = kvm->arch.msr_filter.count; in kvm_msr_allowed()
1503 bool r = kvm->arch.msr_filter.default_allow; in kvm_msr_allowed()
1511 idx = srcu_read_lock(&kvm->srcu); in kvm_msr_allowed()
1520 r = !!test_bit(index - start, bitmap); in kvm_msr_allowed()
1525 srcu_read_unlock(&kvm->srcu, idx); in kvm_msr_allowed()
1534 * Returns 0 on success, non-0 otherwise.
1558 * non-canonical address is written on Intel but not on in __kvm_set_msr()
1559 * AMD (which ignores the top 32-bits, because it does in __kvm_set_msr()
1560 * not implement 64-bit SYSENTER). in __kvm_set_msr()
1562 * 64-bit code should hence be able to write a non-canonical in __kvm_set_msr()
1564 * vmentry does not fail on Intel after writing a non-canonical in __kvm_set_msr()
1566 * invokes 64-bit SYSENTER. in __kvm_set_msr()
1593 * Returns 0 on success, non-0 otherwise.
1643 if (vcpu->run->msr.error) { in complete_emulated_msr()
1647 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data); in complete_emulated_msr()
1648 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); in complete_emulated_msr()
1684 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason)) in kvm_msr_user_space()
1687 vcpu->run->exit_reason = exit_reason; in kvm_msr_user_space()
1688 vcpu->run->msr.error = 0; in kvm_msr_user_space()
1689 memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad)); in kvm_msr_user_space()
1690 vcpu->run->msr.reason = msr_reason; in kvm_msr_user_space()
1691 vcpu->run->msr.index = index; in kvm_msr_user_space()
1692 vcpu->run->msr.data = data; in kvm_msr_user_space()
1693 vcpu->arch.complete_userspace_io = completion; in kvm_msr_user_space()
1712 u32 ecx = kvm_rcx_read(vcpu); in kvm_emulate_rdmsr() local
1716 r = kvm_get_msr(vcpu, ecx, &data); in kvm_emulate_rdmsr()
1719 if (r && kvm_get_msr_user_space(vcpu, ecx, r)) { in kvm_emulate_rdmsr()
1726 trace_kvm_msr_read_ex(ecx); in kvm_emulate_rdmsr()
1731 trace_kvm_msr_read(ecx, data); in kvm_emulate_rdmsr()
1733 kvm_rax_write(vcpu, data & -1u); in kvm_emulate_rdmsr()
1734 kvm_rdx_write(vcpu, (data >> 32) & -1u); in kvm_emulate_rdmsr()
1741 u32 ecx = kvm_rcx_read(vcpu); in kvm_emulate_wrmsr() local
1745 r = kvm_set_msr(vcpu, ecx, data); in kvm_emulate_wrmsr()
1748 if (r && kvm_set_msr_user_space(vcpu, ecx, data, r)) in kvm_emulate_wrmsr()
1758 trace_kvm_msr_write_ex(ecx, data); in kvm_emulate_wrmsr()
1763 trace_kvm_msr_write(ecx, data); in kvm_emulate_wrmsr()
1770 return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || in kvm_vcpu_exit_request()
1777 * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces
1784 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic)) in handle_fastpath_set_x2apic_icr_irqoff()
1793 kvm_apic_send_ipi(vcpu->arch.apic, (u32)data, (u32)(data >> 32)); in handle_fastpath_set_x2apic_icr_irqoff()
1794 kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR2, (u32)(data >> 32)); in handle_fastpath_set_x2apic_icr_irqoff()
1795 kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR, (u32)data); in handle_fastpath_set_x2apic_icr_irqoff()
1884 write_seqcount_begin(&vdata->seq); in update_pvclock_gtod()
1887 vdata->clock.vclock_mode = tk->tkr_mono.clock->vdso_clock_mode; in update_pvclock_gtod()
1888 vdata->clock.cycle_last = tk->tkr_mono.cycle_last; in update_pvclock_gtod()
1889 vdata->clock.mask = tk->tkr_mono.mask; in update_pvclock_gtod()
1890 vdata->clock.mult = tk->tkr_mono.mult; in update_pvclock_gtod()
1891 vdata->clock.shift = tk->tkr_mono.shift; in update_pvclock_gtod()
1892 vdata->clock.base_cycles = tk->tkr_mono.xtime_nsec; in update_pvclock_gtod()
1893 vdata->clock.offset = tk->tkr_mono.base; in update_pvclock_gtod()
1895 vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->vdso_clock_mode; in update_pvclock_gtod()
1896 vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last; in update_pvclock_gtod()
1897 vdata->raw_clock.mask = tk->tkr_raw.mask; in update_pvclock_gtod()
1898 vdata->raw_clock.mult = tk->tkr_raw.mult; in update_pvclock_gtod()
1899 vdata->raw_clock.shift = tk->tkr_raw.shift; in update_pvclock_gtod()
1900 vdata->raw_clock.base_cycles = tk->tkr_raw.xtime_nsec; in update_pvclock_gtod()
1901 vdata->raw_clock.offset = tk->tkr_raw.base; in update_pvclock_gtod()
1903 vdata->wall_time_sec = tk->xtime_sec; in update_pvclock_gtod()
1905 vdata->offs_boot = tk->offs_boot; in update_pvclock_gtod()
1907 write_seqcount_end(&vdata->seq); in update_pvclock_gtod()
1930 kvm->arch.wall_clock = wall_clock; in kvm_write_wall_clock()
1952 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm); in kvm_write_wall_clock()
1967 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_write_system_time()
1969 if (vcpu->vcpu_id == 0 && !host_initiated) { in kvm_write_system_time()
1970 if (ka->boot_vcpu_runs_old_kvmclock != old_msr) in kvm_write_system_time()
1973 ka->boot_vcpu_runs_old_kvmclock = old_msr; in kvm_write_system_time()
1976 vcpu->arch.time = system_time; in kvm_write_system_time()
1980 vcpu->arch.pv_time_enabled = false; in kvm_write_system_time()
1984 if (!kvm_gfn_to_hva_cache_init(vcpu->kvm, in kvm_write_system_time()
1985 &vcpu->arch.pv_time, system_time & ~1ULL, in kvm_write_system_time()
1987 vcpu->arch.pv_time_enabled = true; in kvm_write_system_time()
2010 shift--; in kvm_get_time_scale()
2046 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; in set_tsc_khz()
2053 vcpu->arch.tsc_catchup = 1; in set_tsc_khz()
2054 vcpu->arch.tsc_always_catchup = 1; in set_tsc_khz()
2058 return -1; in set_tsc_khz()
2062 /* TSC scaling required - calculate ratio */ in set_tsc_khz()
2067 pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", in set_tsc_khz()
2069 return -1; in set_tsc_khz()
2072 vcpu->arch.tsc_scaling_ratio = ratio; in set_tsc_khz()
2084 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; in kvm_set_tsc_khz()
2085 return -1; in kvm_set_tsc_khz()
2089 kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC, in kvm_set_tsc_khz()
2090 &vcpu->arch.virtual_tsc_shift, in kvm_set_tsc_khz()
2091 &vcpu->arch.virtual_tsc_mult); in kvm_set_tsc_khz()
2092 vcpu->arch.virtual_tsc_khz = user_tsc_khz; in kvm_set_tsc_khz()
2100 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); in kvm_set_tsc_khz()
2111 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, in compute_guest_tsc()
2112 vcpu->arch.virtual_tsc_mult, in compute_guest_tsc()
2113 vcpu->arch.virtual_tsc_shift); in compute_guest_tsc()
2114 tsc += vcpu->arch.this_tsc_write; in compute_guest_tsc()
2127 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_track_tsc_matching()
2130 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == in kvm_track_tsc_matching()
2131 atomic_read(&vcpu->kvm->online_vcpus)); in kvm_track_tsc_matching()
2141 if (ka->use_master_clock || in kvm_track_tsc_matching()
2142 (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched)) in kvm_track_tsc_matching()
2145 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, in kvm_track_tsc_matching()
2146 atomic_read(&vcpu->kvm->online_vcpus), in kvm_track_tsc_matching()
2147 ka->use_master_clock, gtod->clock.vclock_mode); in kvm_track_tsc_matching()
2154 * The most significant 64-N bits (mult) of ratio represent the
2157 * point number (mult + frac * 2^(-N)).
2169 u64 ratio = vcpu->arch.tsc_scaling_ratio; in kvm_scale_tsc()
2184 return target_tsc - tsc; in kvm_compute_tsc_offset()
2189 return vcpu->arch.l1_tsc_offset + kvm_scale_tsc(vcpu, host_tsc); in kvm_read_l1_tsc()
2195 vcpu->arch.l1_tsc_offset = offset; in kvm_vcpu_write_tsc_offset()
2196 vcpu->arch.tsc_offset = kvm_x86_ops.write_l1_tsc_offset(vcpu, offset); in kvm_vcpu_write_tsc_offset()
2203 * TSC is marked unstable when we're running on Hyper-V, in kvm_check_tsc_unstable()
2214 struct kvm *kvm = vcpu->kvm; in kvm_synchronize_tsc()
2221 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); in kvm_synchronize_tsc()
2224 elapsed = ns - kvm->arch.last_tsc_nsec; in kvm_synchronize_tsc()
2226 if (vcpu->arch.virtual_tsc_khz) { in kvm_synchronize_tsc()
2229 * detection of vcpu initialization -- need to sync in kvm_synchronize_tsc()
2235 u64 tsc_exp = kvm->arch.last_tsc_write + in kvm_synchronize_tsc()
2237 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; in kvm_synchronize_tsc()
2255 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { in kvm_synchronize_tsc()
2257 offset = kvm->arch.cur_tsc_offset; in kvm_synchronize_tsc()
2264 already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); in kvm_synchronize_tsc()
2273 * These values are tracked in kvm->arch.cur_xxx variables. in kvm_synchronize_tsc()
2275 kvm->arch.cur_tsc_generation++; in kvm_synchronize_tsc()
2276 kvm->arch.cur_tsc_nsec = ns; in kvm_synchronize_tsc()
2277 kvm->arch.cur_tsc_write = data; in kvm_synchronize_tsc()
2278 kvm->arch.cur_tsc_offset = offset; in kvm_synchronize_tsc()
2286 kvm->arch.last_tsc_nsec = ns; in kvm_synchronize_tsc()
2287 kvm->arch.last_tsc_write = data; in kvm_synchronize_tsc()
2288 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; in kvm_synchronize_tsc()
2290 vcpu->arch.last_guest_tsc = data; in kvm_synchronize_tsc()
2293 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; in kvm_synchronize_tsc()
2294 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; in kvm_synchronize_tsc()
2295 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; in kvm_synchronize_tsc()
2298 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); in kvm_synchronize_tsc()
2300 spin_lock(&kvm->arch.pvclock_gtod_sync_lock); in kvm_synchronize_tsc()
2302 kvm->arch.nr_vcpus_matched_tsc = 0; in kvm_synchronize_tsc()
2304 kvm->arch.nr_vcpus_matched_tsc++; in kvm_synchronize_tsc()
2308 spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); in kvm_synchronize_tsc()
2314 u64 tsc_offset = vcpu->arch.l1_tsc_offset; in adjust_tsc_offset_guest()
2320 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio) in adjust_tsc_offset_host()
2354 switch (clock->vclock_mode) { in vgettsc()
2361 v = (tsc_pg_val - clock->cycle_last) & in vgettsc()
2362 clock->mask; in vgettsc()
2371 v = (*tsc_timestamp - clock->cycle_last) & in vgettsc()
2372 clock->mask; in vgettsc()
2381 return v * clock->mult; in vgettsc()
2392 seq = read_seqcount_begin(&gtod->seq); in do_monotonic_raw()
2393 ns = gtod->raw_clock.base_cycles; in do_monotonic_raw()
2394 ns += vgettsc(&gtod->raw_clock, tsc_timestamp, &mode); in do_monotonic_raw()
2395 ns >>= gtod->raw_clock.shift; in do_monotonic_raw()
2396 ns += ktime_to_ns(ktime_add(gtod->raw_clock.offset, gtod->offs_boot)); in do_monotonic_raw()
2397 } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); in do_monotonic_raw()
2411 seq = read_seqcount_begin(&gtod->seq); in do_realtime()
2412 ts->tv_sec = gtod->wall_time_sec; in do_realtime()
2413 ns = gtod->clock.base_cycles; in do_realtime()
2414 ns += vgettsc(&gtod->clock, tsc_timestamp, &mode); in do_realtime()
2415 ns >>= gtod->clock.shift; in do_realtime()
2416 } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); in do_realtime()
2418 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); in do_realtime()
2419 ts->tv_nsec = ns; in do_realtime()
2463 * 4. ret0 = timespec0 + (rdtsc - tsc0) |
2464 * 5. | ret1 = timespec1 + (rdtsc - tsc1)
2465 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
2469 * - ret0 < ret1
2470 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
2472 * - 0 < N - M => M < N
2491 struct kvm_arch *ka = &kvm->arch; in pvclock_update_vm_gtod_copy()
2495 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == in pvclock_update_vm_gtod_copy()
2496 atomic_read(&kvm->online_vcpus)); in pvclock_update_vm_gtod_copy()
2503 &ka->master_kernel_ns, in pvclock_update_vm_gtod_copy()
2504 &ka->master_cycle_now); in pvclock_update_vm_gtod_copy()
2506 ka->use_master_clock = host_tsc_clocksource && vcpus_matched in pvclock_update_vm_gtod_copy()
2507 && !ka->backwards_tsc_observed in pvclock_update_vm_gtod_copy()
2508 && !ka->boot_vcpu_runs_old_kvmclock; in pvclock_update_vm_gtod_copy()
2510 if (ka->use_master_clock) in pvclock_update_vm_gtod_copy()
2514 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, in pvclock_update_vm_gtod_copy()
2529 struct kvm_arch *ka = &kvm->arch; in kvm_gen_update_masterclock()
2531 spin_lock(&ka->pvclock_gtod_sync_lock); in kvm_gen_update_masterclock()
2543 spin_unlock(&ka->pvclock_gtod_sync_lock); in kvm_gen_update_masterclock()
2549 struct kvm_arch *ka = &kvm->arch; in get_kvmclock_ns()
2553 spin_lock(&ka->pvclock_gtod_sync_lock); in get_kvmclock_ns()
2554 if (!ka->use_master_clock) { in get_kvmclock_ns()
2555 spin_unlock(&ka->pvclock_gtod_sync_lock); in get_kvmclock_ns()
2556 return get_kvmclock_base_ns() + ka->kvmclock_offset; in get_kvmclock_ns()
2559 hv_clock.tsc_timestamp = ka->master_cycle_now; in get_kvmclock_ns()
2560 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; in get_kvmclock_ns()
2561 spin_unlock(&ka->pvclock_gtod_sync_lock); in get_kvmclock_ns()
2567 kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, in get_kvmclock_ns()
2572 ret = get_kvmclock_base_ns() + ka->kvmclock_offset; in get_kvmclock_ns()
2581 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_setup_pvclock_page()
2584 if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2599 * and third write. The vcpu->pv_time cache is still valid, because the in kvm_setup_pvclock_page()
2607 vcpu->hv_clock.version = guest_hv_clock.version + 1; in kvm_setup_pvclock_page()
2608 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2609 &vcpu->hv_clock, in kvm_setup_pvclock_page()
2610 sizeof(vcpu->hv_clock.version)); in kvm_setup_pvclock_page()
2615 vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); in kvm_setup_pvclock_page()
2617 if (vcpu->pvclock_set_guest_stopped_request) { in kvm_setup_pvclock_page()
2618 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED; in kvm_setup_pvclock_page()
2619 vcpu->pvclock_set_guest_stopped_request = false; in kvm_setup_pvclock_page()
2622 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); in kvm_setup_pvclock_page()
2624 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2625 &vcpu->hv_clock, in kvm_setup_pvclock_page()
2626 sizeof(vcpu->hv_clock)); in kvm_setup_pvclock_page()
2630 vcpu->hv_clock.version++; in kvm_setup_pvclock_page()
2631 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2632 &vcpu->hv_clock, in kvm_setup_pvclock_page()
2633 sizeof(vcpu->hv_clock.version)); in kvm_setup_pvclock_page()
2639 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_guest_time_update()
2640 struct kvm_arch *ka = &v->kvm->arch; in kvm_guest_time_update()
2653 spin_lock(&ka->pvclock_gtod_sync_lock); in kvm_guest_time_update()
2654 use_master_clock = ka->use_master_clock; in kvm_guest_time_update()
2656 host_tsc = ka->master_cycle_now; in kvm_guest_time_update()
2657 kernel_ns = ka->master_kernel_ns; in kvm_guest_time_update()
2659 spin_unlock(&ka->pvclock_gtod_sync_lock); in kvm_guest_time_update()
2686 if (vcpu->tsc_catchup) { in kvm_guest_time_update()
2689 adjust_tsc_offset_guest(v, tsc - tsc_timestamp); in kvm_guest_time_update()
2701 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { in kvm_guest_time_update()
2702 kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL, in kvm_guest_time_update()
2703 &vcpu->hv_clock.tsc_shift, in kvm_guest_time_update()
2704 &vcpu->hv_clock.tsc_to_system_mul); in kvm_guest_time_update()
2705 vcpu->hw_tsc_khz = tgt_tsc_khz; in kvm_guest_time_update()
2708 vcpu->hv_clock.tsc_timestamp = tsc_timestamp; in kvm_guest_time_update()
2709 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; in kvm_guest_time_update()
2710 vcpu->last_guest_tsc = tsc_timestamp; in kvm_guest_time_update()
2717 vcpu->hv_clock.flags = pvclock_flags; in kvm_guest_time_update()
2719 if (vcpu->pv_time_enabled) in kvm_guest_time_update()
2721 if (v == kvm_get_vcpu(v->kvm, 0)) in kvm_guest_time_update()
2722 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); in kvm_guest_time_update()
2728 * vcpu->cpu migration, should not allow system_timestamp from
2734 * We need to rate-limit these requests though, as they can
2737 * by the delay we use to rate-limit the updates.
2759 struct kvm *kvm = v->kvm; in kvm_gen_kvmclock_update()
2762 schedule_delayed_work(&kvm->arch.kvmclock_update_work, in kvm_gen_kvmclock_update()
2778 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); in kvmclock_sync_fn()
2779 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvmclock_sync_fn()
2790 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18)); in can_set_mci_status()
2797 u64 mcg_cap = vcpu->arch.mcg_cap; in set_msr_mce()
2799 u32 msr = msr_info->index; in set_msr_mce()
2800 u64 data = msr_info->data; in set_msr_mce()
2804 vcpu->arch.mcg_status = data; in set_msr_mce()
2808 (data || !msr_info->host_initiated)) in set_msr_mce()
2812 vcpu->arch.mcg_ctl = data; in set_msr_mce()
2818 msr - MSR_IA32_MC0_CTL, in set_msr_mce()
2819 MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL); in set_msr_mce()
2828 return -1; in set_msr_mce()
2831 if (!msr_info->host_initiated && in set_msr_mce()
2834 return -1; in set_msr_mce()
2837 vcpu->arch.mce_banks[offset] = data; in set_msr_mce()
2847 struct kvm *kvm = vcpu->kvm; in xen_hvm_config()
2849 u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 in xen_hvm_config()
2850 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; in xen_hvm_config()
2851 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 in xen_hvm_config()
2852 : kvm->arch.xen_hvm_config.blob_size_32; in xen_hvm_config()
2875 return (vcpu->arch.apf.msr_en_val & mask) == mask; in kvm_pv_async_pf_enabled()
2897 vcpu->arch.apf.msr_en_val = data; in kvm_pv_enable_async_pf()
2905 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, in kvm_pv_enable_async_pf()
2909 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); in kvm_pv_enable_async_pf()
2910 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; in kvm_pv_enable_async_pf()
2919 /* Bits 8-63 are reserved */ in kvm_pv_enable_async_pf_int()
2926 vcpu->arch.apf.msr_int_val = data; in kvm_pv_enable_async_pf_int()
2928 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK; in kvm_pv_enable_async_pf_int()
2935 vcpu->arch.pv_time_enabled = false; in kvmclock_reset()
2936 vcpu->arch.time = 0; in kvmclock_reset()
2941 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_all()
2947 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_guest()
2956 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in record_steal_time()
2959 /* -EAGAIN is returned in atomic context so we can just return. */ in record_steal_time()
2960 if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, in record_steal_time()
2961 &map, &vcpu->arch.st.cache, false)) in record_steal_time()
2965 offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); in record_steal_time()
2972 trace_kvm_pv_tlb_flush(vcpu->vcpu_id, in record_steal_time()
2973 st->preempted & KVM_VCPU_FLUSH_TLB); in record_steal_time()
2974 if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB) in record_steal_time()
2978 vcpu->arch.st.preempted = 0; in record_steal_time()
2980 if (st->version & 1) in record_steal_time()
2981 st->version += 1; /* first time write, random junk */ in record_steal_time()
2983 st->version += 1; in record_steal_time()
2987 st->steal += current->sched_info.run_delay - in record_steal_time()
2988 vcpu->arch.st.last_steal; in record_steal_time()
2989 vcpu->arch.st.last_steal = current->sched_info.run_delay; in record_steal_time()
2993 st->version += 1; in record_steal_time()
2995 kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false); in record_steal_time()
3001 u32 msr = msr_info->index; in kvm_set_msr_common()
3002 u64 data = msr_info->data; in kvm_set_msr_common()
3015 if (msr_info->host_initiated) in kvm_set_msr_common()
3016 vcpu->arch.microcode_version = data; in kvm_set_msr_common()
3019 if (!msr_info->host_initiated) in kvm_set_msr_common()
3021 vcpu->arch.arch_capabilities = data; in kvm_set_msr_common()
3026 if (!msr_info->host_initiated) in kvm_set_msr_common()
3033 vcpu->arch.perf_capabilities = data; in kvm_set_msr_common()
3046 vcpu->arch.msr_hwcr = data; in kvm_set_msr_common()
3062 /* We support the non-activated case already */ in kvm_set_msr_common()
3065 /* Values other than LBR and BTF are vendor-specific, in kvm_set_msr_common()
3083 if (!msr_info->host_initiated) { in kvm_set_msr_common()
3084 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; in kvm_set_msr_common()
3087 vcpu->arch.ia32_tsc_adjust_msr = data; in kvm_set_msr_common()
3091 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) && in kvm_set_msr_common()
3092 ((vcpu->arch.ia32_misc_enable_msr ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) { in kvm_set_msr_common()
3095 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
3098 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
3102 if (!msr_info->host_initiated) in kvm_set_msr_common()
3104 vcpu->arch.smbase = data; in kvm_set_msr_common()
3107 vcpu->arch.msr_ia32_power_ctl = data; in kvm_set_msr_common()
3110 if (msr_info->host_initiated) { in kvm_set_msr_common()
3113 u64 adj = kvm_compute_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset; in kvm_set_msr_common()
3115 vcpu->arch.ia32_tsc_adjust_msr += adj; in kvm_set_msr_common()
3119 if (!msr_info->host_initiated && in kvm_set_msr_common()
3129 vcpu->arch.ia32_xss = data; in kvm_set_msr_common()
3132 if (!msr_info->host_initiated) in kvm_set_msr_common()
3134 vcpu->arch.smi_count = data; in kvm_set_msr_common()
3140 kvm_write_wall_clock(vcpu->kvm, data); in kvm_set_msr_common()
3146 kvm_write_wall_clock(vcpu->kvm, data); in kvm_set_msr_common()
3152 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated); in kvm_set_msr_common()
3158 kvm_write_system_time(vcpu, data, true, msr_info->host_initiated); in kvm_set_msr_common()
3178 vcpu->arch.apf.pageready_pending = false; in kvm_set_msr_common()
3192 vcpu->arch.st.msr_val = data; in kvm_set_msr_common()
3213 if (data & (-1ULL << 1)) in kvm_set_msr_common()
3216 vcpu->arch.msr_kvm_poll_control = data; in kvm_set_msr_common()
3221 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: in kvm_set_msr_common()
3241 * all pre-dating SVM, but a recommended workaround from in kvm_set_msr_common()
3257 msr_info->host_initiated); in kvm_set_msr_common()
3259 /* Drop writes to this legacy MSR -- see rdmsr in kvm_set_msr_common()
3269 vcpu->arch.osvw.length = data; in kvm_set_msr_common()
3274 vcpu->arch.osvw.status = data; in kvm_set_msr_common()
3277 if (!msr_info->host_initiated || in kvm_set_msr_common()
3281 vcpu->arch.msr_platform_info = data; in kvm_set_msr_common()
3288 vcpu->arch.msr_misc_features_enables = data; in kvm_set_msr_common()
3291 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) in kvm_set_msr_common()
3304 u64 mcg_cap = vcpu->arch.mcg_cap; in get_msr_mce()
3313 data = vcpu->arch.mcg_cap; in get_msr_mce()
3318 data = vcpu->arch.mcg_ctl; in get_msr_mce()
3321 data = vcpu->arch.mcg_status; in get_msr_mce()
3327 msr - MSR_IA32_MC0_CTL, in get_msr_mce()
3328 MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL); in get_msr_mce()
3330 data = vcpu->arch.mce_banks[offset]; in get_msr_mce()
3341 switch (msr_info->index) { in kvm_get_msr_common()
3364 * so for existing CPU-specific MSRs. in kvm_get_msr_common()
3371 msr_info->data = 0; in kvm_get_msr_common()
3378 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
3380 msr_info->data = 0; in kvm_get_msr_common()
3383 msr_info->data = vcpu->arch.microcode_version; in kvm_get_msr_common()
3386 if (!msr_info->host_initiated && in kvm_get_msr_common()
3389 msr_info->data = vcpu->arch.arch_capabilities; in kvm_get_msr_common()
3392 if (!msr_info->host_initiated && in kvm_get_msr_common()
3395 msr_info->data = vcpu->arch.perf_capabilities; in kvm_get_msr_common()
3398 msr_info->data = vcpu->arch.msr_ia32_power_ctl; in kvm_get_msr_common()
3407 * return L1's TSC value to ensure backwards-compatible in kvm_get_msr_common()
3410 u64 tsc_offset = msr_info->host_initiated ? vcpu->arch.l1_tsc_offset : in kvm_get_msr_common()
3411 vcpu->arch.tsc_offset; in kvm_get_msr_common()
3413 msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + tsc_offset; in kvm_get_msr_common()
3418 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
3420 msr_info->data = 3; in kvm_get_msr_common()
3434 msr_info->data = 1 << 24; in kvm_get_msr_common()
3437 msr_info->data = kvm_get_apic_base(vcpu); in kvm_get_msr_common()
3440 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
3442 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); in kvm_get_msr_common()
3445 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; in kvm_get_msr_common()
3448 msr_info->data = vcpu->arch.ia32_misc_enable_msr; in kvm_get_msr_common()
3451 if (!msr_info->host_initiated) in kvm_get_msr_common()
3453 msr_info->data = vcpu->arch.smbase; in kvm_get_msr_common()
3456 msr_info->data = vcpu->arch.smi_count; in kvm_get_msr_common()
3460 msr_info->data = 1000ULL; in kvm_get_msr_common()
3462 msr_info->data |= (((uint64_t)4ULL) << 40); in kvm_get_msr_common()
3465 msr_info->data = vcpu->arch.efer; in kvm_get_msr_common()
3471 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
3477 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
3483 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
3489 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
3495 msr_info->data = vcpu->arch.apf.msr_en_val; in kvm_get_msr_common()
3501 msr_info->data = vcpu->arch.apf.msr_int_val; in kvm_get_msr_common()
3507 msr_info->data = 0; in kvm_get_msr_common()
3513 msr_info->data = vcpu->arch.st.msr_val; in kvm_get_msr_common()
3519 msr_info->data = vcpu->arch.pv_eoi.msr_val; in kvm_get_msr_common()
3525 msr_info->data = vcpu->arch.msr_kvm_poll_control; in kvm_get_msr_common()
3532 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: in kvm_get_msr_common()
3533 return get_msr_mce(vcpu, msr_info->index, &msr_info->data, in kvm_get_msr_common()
3534 msr_info->host_initiated); in kvm_get_msr_common()
3536 if (!msr_info->host_initiated && in kvm_get_msr_common()
3539 msr_info->data = vcpu->arch.ia32_xss; in kvm_get_msr_common()
3543 * Provide expected ramp-up count for K7. All other in kvm_get_msr_common()
3551 msr_info->data = 0x20000000; in kvm_get_msr_common()
3563 msr_info->index, &msr_info->data, in kvm_get_msr_common()
3564 msr_info->host_initiated); in kvm_get_msr_common()
3576 msr_info->data = 0xbe702111; in kvm_get_msr_common()
3581 msr_info->data = vcpu->arch.osvw.length; in kvm_get_msr_common()
3586 msr_info->data = vcpu->arch.osvw.status; in kvm_get_msr_common()
3589 if (!msr_info->host_initiated && in kvm_get_msr_common()
3590 !vcpu->kvm->arch.guest_can_read_msr_platform_info) in kvm_get_msr_common()
3592 msr_info->data = vcpu->arch.msr_platform_info; in kvm_get_msr_common()
3595 msr_info->data = vcpu->arch.msr_misc_features_enables; in kvm_get_msr_common()
3598 msr_info->data = vcpu->arch.msr_hwcr; in kvm_get_msr_common()
3601 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
3621 for (i = 0; i < msrs->nmsrs; ++i) in __msr_io()
3643 r = -EFAULT; in msr_io()
3647 r = -E2BIG; in msr_io()
3652 entries = memdup_user(user_msrs->entries, size); in msr_io()
3662 r = -EFAULT; in msr_io()
3663 if (writeback && copy_to_user(user_msrs->entries, entries, size)) in msr_io()
3795 r = kvm_x86_ops.nested_ops->get_state ? in kvm_vm_ioctl_check_extension()
3796 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0; in kvm_vm_ioctl_check_extension()
3802 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL; in kvm_vm_ioctl_check_extension()
3829 r = -EFAULT; in kvm_arch_dev_ioctl()
3836 r = -E2BIG; in kvm_arch_dev_ioctl()
3839 r = -EFAULT; in kvm_arch_dev_ioctl()
3840 if (copy_to_user(user_msr_list->indices, &msrs_to_save, in kvm_arch_dev_ioctl()
3843 if (copy_to_user(user_msr_list->indices + num_msrs_to_save, in kvm_arch_dev_ioctl()
3855 r = -EFAULT; in kvm_arch_dev_ioctl()
3859 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, in kvm_arch_dev_ioctl()
3864 r = -EFAULT; in kvm_arch_dev_ioctl()
3871 r = -EFAULT; in kvm_arch_dev_ioctl()
3882 r = -EFAULT; in kvm_arch_dev_ioctl()
3889 r = -E2BIG; in kvm_arch_dev_ioctl()
3892 r = -EFAULT; in kvm_arch_dev_ioctl()
3893 if (copy_to_user(user_msr_list->indices, &msr_based_features, in kvm_arch_dev_ioctl()
3903 r = -EINVAL; in kvm_arch_dev_ioctl()
3917 return kvm_arch_has_noncoherent_dma(vcpu->kvm); in need_emulate_wbinvd()
3925 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_load()
3926 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) in kvm_arch_vcpu_load()
3927 smp_call_function_single(vcpu->cpu, in kvm_arch_vcpu_load()
3934 vcpu->arch.host_pkru = read_pkru(); in kvm_arch_vcpu_load()
3937 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { in kvm_arch_vcpu_load()
3938 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); in kvm_arch_vcpu_load()
3939 vcpu->arch.tsc_offset_adjustment = 0; in kvm_arch_vcpu_load()
3943 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) { in kvm_arch_vcpu_load()
3944 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : in kvm_arch_vcpu_load()
3945 rdtsc() - vcpu->arch.last_host_tsc; in kvm_arch_vcpu_load()
3951 vcpu->arch.last_guest_tsc); in kvm_arch_vcpu_load()
3953 vcpu->arch.tsc_catchup = 1; in kvm_arch_vcpu_load()
3961 * kvmclock on vcpu->cpu migration in kvm_arch_vcpu_load()
3963 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) in kvm_arch_vcpu_load()
3965 if (vcpu->cpu != cpu) in kvm_arch_vcpu_load()
3967 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
3978 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in kvm_steal_time_set_preempted()
3981 if (vcpu->arch.st.preempted) in kvm_steal_time_set_preempted()
3984 if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map, in kvm_steal_time_set_preempted()
3985 &vcpu->arch.st.cache, true)) in kvm_steal_time_set_preempted()
3989 offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); in kvm_steal_time_set_preempted()
3991 st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; in kvm_steal_time_set_preempted()
3993 kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true); in kvm_steal_time_set_preempted()
4000 if (vcpu->preempted) in kvm_arch_vcpu_put()
4001 vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu); in kvm_arch_vcpu_put()
4016 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_put()
4018 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_put()
4021 vcpu->arch.last_host_tsc = rdtsc(); in kvm_arch_vcpu_put()
4033 if (vcpu->arch.apicv_active) in kvm_vcpu_ioctl_get_lapic()
4077 if (irq->irq >= KVM_NR_INTERRUPTS) in kvm_vcpu_ioctl_interrupt()
4078 return -EINVAL; in kvm_vcpu_ioctl_interrupt()
4080 if (!irqchip_in_kernel(vcpu->kvm)) { in kvm_vcpu_ioctl_interrupt()
4081 kvm_queue_interrupt(vcpu, irq->irq, false); in kvm_vcpu_ioctl_interrupt()
4087 * With in-kernel LAPIC, we only use this to inject EXTINT, so in kvm_vcpu_ioctl_interrupt()
4088 * fail for in-kernel 8259. in kvm_vcpu_ioctl_interrupt()
4090 if (pic_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_interrupt()
4091 return -ENXIO; in kvm_vcpu_ioctl_interrupt()
4093 if (vcpu->arch.pending_external_vector != -1) in kvm_vcpu_ioctl_interrupt()
4094 return -EEXIST; in kvm_vcpu_ioctl_interrupt()
4096 vcpu->arch.pending_external_vector = irq->irq; in kvm_vcpu_ioctl_interrupt()
4118 if (tac->flags) in vcpu_ioctl_tpr_access_reporting()
4119 return -EINVAL; in vcpu_ioctl_tpr_access_reporting()
4120 vcpu->arch.tpr_access_reporting = !!tac->enabled; in vcpu_ioctl_tpr_access_reporting()
4130 r = -EINVAL; in kvm_vcpu_ioctl_x86_setup_mce()
4136 vcpu->arch.mcg_cap = mcg_cap; in kvm_vcpu_ioctl_x86_setup_mce()
4139 vcpu->arch.mcg_ctl = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
4142 vcpu->arch.mce_banks[bank*4] = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
4152 u64 mcg_cap = vcpu->arch.mcg_cap; in kvm_vcpu_ioctl_x86_set_mce()
4154 u64 *banks = vcpu->arch.mce_banks; in kvm_vcpu_ioctl_x86_set_mce()
4156 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) in kvm_vcpu_ioctl_x86_set_mce()
4157 return -EINVAL; in kvm_vcpu_ioctl_x86_set_mce()
4162 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && in kvm_vcpu_ioctl_x86_set_mce()
4163 vcpu->arch.mcg_ctl != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
4165 banks += 4 * mce->bank; in kvm_vcpu_ioctl_x86_set_mce()
4170 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
4172 if (mce->status & MCI_STATUS_UC) { in kvm_vcpu_ioctl_x86_set_mce()
4173 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || in kvm_vcpu_ioctl_x86_set_mce()
4179 mce->status |= MCI_STATUS_OVER; in kvm_vcpu_ioctl_x86_set_mce()
4180 banks[2] = mce->addr; in kvm_vcpu_ioctl_x86_set_mce()
4181 banks[3] = mce->misc; in kvm_vcpu_ioctl_x86_set_mce()
4182 vcpu->arch.mcg_status = mce->mcg_status; in kvm_vcpu_ioctl_x86_set_mce()
4183 banks[1] = mce->status; in kvm_vcpu_ioctl_x86_set_mce()
4188 mce->status |= MCI_STATUS_OVER; in kvm_vcpu_ioctl_x86_set_mce()
4189 banks[2] = mce->addr; in kvm_vcpu_ioctl_x86_set_mce()
4190 banks[3] = mce->misc; in kvm_vcpu_ioctl_x86_set_mce()
4191 banks[1] = mce->status; in kvm_vcpu_ioctl_x86_set_mce()
4206 * modified under nVMX). Unless the per-VM capability, in kvm_vcpu_ioctl_x86_get_vcpu_events()
4213 if (!vcpu->kvm->arch.exception_payload_enabled && in kvm_vcpu_ioctl_x86_get_vcpu_events()
4214 vcpu->arch.exception.pending && vcpu->arch.exception.has_payload) in kvm_vcpu_ioctl_x86_get_vcpu_events()
4223 if (kvm_exception_is_soft(vcpu->arch.exception.nr)) { in kvm_vcpu_ioctl_x86_get_vcpu_events()
4224 events->exception.injected = 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4225 events->exception.pending = 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4227 events->exception.injected = vcpu->arch.exception.injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4228 events->exception.pending = vcpu->arch.exception.pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4234 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
4235 events->exception.injected |= in kvm_vcpu_ioctl_x86_get_vcpu_events()
4236 vcpu->arch.exception.pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4238 events->exception.nr = vcpu->arch.exception.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4239 events->exception.has_error_code = vcpu->arch.exception.has_error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4240 events->exception.error_code = vcpu->arch.exception.error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4241 events->exception_has_payload = vcpu->arch.exception.has_payload; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4242 events->exception_payload = vcpu->arch.exception.payload; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4244 events->interrupt.injected = in kvm_vcpu_ioctl_x86_get_vcpu_events()
4245 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4246 events->interrupt.nr = vcpu->arch.interrupt.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4247 events->interrupt.soft = 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4248 events->interrupt.shadow = kvm_x86_ops.get_interrupt_shadow(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4250 events->nmi.injected = vcpu->arch.nmi_injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4251 events->nmi.pending = vcpu->arch.nmi_pending != 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4252 events->nmi.masked = kvm_x86_ops.get_nmi_mask(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4253 events->nmi.pad = 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4255 events->sipi_vector = 0; /* never valid when reporting to user space */ in kvm_vcpu_ioctl_x86_get_vcpu_events()
4257 events->smi.smm = is_smm(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4258 events->smi.pending = vcpu->arch.smi_pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4259 events->smi.smm_inside_nmi = in kvm_vcpu_ioctl_x86_get_vcpu_events()
4260 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4261 events->smi.latched_init = kvm_lapic_latched_init(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4263 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING in kvm_vcpu_ioctl_x86_get_vcpu_events()
4266 if (vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
4267 events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4269 memset(&events->reserved, 0, sizeof(events->reserved)); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4277 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING in kvm_vcpu_ioctl_x86_set_vcpu_events()
4282 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4284 if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
4285 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4286 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4287 if (events->exception.pending) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4288 events->exception.injected = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4290 events->exception_has_payload = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4292 events->exception.pending = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4293 events->exception_has_payload = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4296 if ((events->exception.injected || events->exception.pending) && in kvm_vcpu_ioctl_x86_set_vcpu_events()
4297 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4298 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4301 if (events->flags & KVM_VCPUEVENT_VALID_SMM && in kvm_vcpu_ioctl_x86_set_vcpu_events()
4302 (events->smi.smm || events->smi.pending) && in kvm_vcpu_ioctl_x86_set_vcpu_events()
4303 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4304 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4307 vcpu->arch.exception.injected = events->exception.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4308 vcpu->arch.exception.pending = events->exception.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4309 vcpu->arch.exception.nr = events->exception.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4310 vcpu->arch.exception.has_error_code = events->exception.has_error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4311 vcpu->arch.exception.error_code = events->exception.error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4312 vcpu->arch.exception.has_payload = events->exception_has_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4313 vcpu->arch.exception.payload = events->exception_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4315 vcpu->arch.interrupt.injected = events->interrupt.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4316 vcpu->arch.interrupt.nr = events->interrupt.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4317 vcpu->arch.interrupt.soft = events->interrupt.soft; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4318 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4320 events->interrupt.shadow); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4322 vcpu->arch.nmi_injected = events->nmi.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4323 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4324 vcpu->arch.nmi_pending = events->nmi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4325 kvm_x86_ops.set_nmi_mask(vcpu, events->nmi.masked); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4327 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && in kvm_vcpu_ioctl_x86_set_vcpu_events()
4329 vcpu->arch.apic->sipi_vector = events->sipi_vector; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4331 if (events->flags & KVM_VCPUEVENT_VALID_SMM) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
4332 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
4333 if (events->smi.smm) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4334 vcpu->arch.hflags |= HF_SMM_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4336 vcpu->arch.hflags &= ~HF_SMM_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4340 vcpu->arch.smi_pending = events->smi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4342 if (events->smi.smm) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
4343 if (events->smi.smm_inside_nmi) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4344 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4346 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4350 if (events->smi.latched_init) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4351 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4353 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4367 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_get_debugregs()
4369 dbgregs->dr6 = val; in kvm_vcpu_ioctl_x86_get_debugregs()
4370 dbgregs->dr7 = vcpu->arch.dr7; in kvm_vcpu_ioctl_x86_get_debugregs()
4371 dbgregs->flags = 0; in kvm_vcpu_ioctl_x86_get_debugregs()
4372 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); in kvm_vcpu_ioctl_x86_get_debugregs()
4378 if (dbgregs->flags) in kvm_vcpu_ioctl_x86_set_debugregs()
4379 return -EINVAL; in kvm_vcpu_ioctl_x86_set_debugregs()
4381 if (dbgregs->dr6 & ~0xffffffffull) in kvm_vcpu_ioctl_x86_set_debugregs()
4382 return -EINVAL; in kvm_vcpu_ioctl_x86_set_debugregs()
4383 if (dbgregs->dr7 & ~0xffffffffull) in kvm_vcpu_ioctl_x86_set_debugregs()
4384 return -EINVAL; in kvm_vcpu_ioctl_x86_set_debugregs()
4386 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_set_debugregs()
4388 vcpu->arch.dr6 = dbgregs->dr6; in kvm_vcpu_ioctl_x86_set_debugregs()
4389 vcpu->arch.dr7 = dbgregs->dr7; in kvm_vcpu_ioctl_x86_set_debugregs()
4399 struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave; in fill_xsave()
4400 u64 xstate_bv = xsave->header.xfeatures; in fill_xsave()
4410 xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE; in fill_xsave()
4415 * non-compacted offset. in fill_xsave()
4419 u64 xfeature_mask = valid & -valid; in fill_xsave()
4420 int xfeature_nr = fls64(xfeature_mask) - 1; in fill_xsave()
4424 u32 size, offset, ecx, edx; in fill_xsave() local
4426 &size, &offset, &ecx, &edx); in fill_xsave()
4428 memcpy(dest + offset, &vcpu->arch.pkru, in fill_xsave()
4429 sizeof(vcpu->arch.pkru)); in fill_xsave()
4435 valid -= xfeature_mask; in fill_xsave()
4441 struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave; in load_xsave()
4452 xsave->header.xfeatures = xstate_bv; in load_xsave()
4454 xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED; in load_xsave()
4457 * Copy each region from the non-compacted offset to the in load_xsave()
4462 u64 xfeature_mask = valid & -valid; in load_xsave()
4463 int xfeature_nr = fls64(xfeature_mask) - 1; in load_xsave()
4467 u32 size, offset, ecx, edx; in load_xsave() local
4469 &size, &offset, &ecx, &edx); in load_xsave()
4471 memcpy(&vcpu->arch.pkru, src + offset, in load_xsave()
4472 sizeof(vcpu->arch.pkru)); in load_xsave()
4477 valid -= xfeature_mask; in load_xsave()
4486 fill_xsave((u8 *) guest_xsave->region, vcpu); in kvm_vcpu_ioctl_x86_get_xsave()
4488 memcpy(guest_xsave->region, in kvm_vcpu_ioctl_x86_get_xsave()
4489 &vcpu->arch.guest_fpu->state.fxsave, in kvm_vcpu_ioctl_x86_get_xsave()
4491 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = in kvm_vcpu_ioctl_x86_get_xsave()
4502 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; in kvm_vcpu_ioctl_x86_set_xsave()
4503 u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)]; in kvm_vcpu_ioctl_x86_set_xsave()
4512 return -EINVAL; in kvm_vcpu_ioctl_x86_set_xsave()
4513 load_xsave(vcpu, (u8 *)guest_xsave->region); in kvm_vcpu_ioctl_x86_set_xsave()
4517 return -EINVAL; in kvm_vcpu_ioctl_x86_set_xsave()
4518 memcpy(&vcpu->arch.guest_fpu->state.fxsave, in kvm_vcpu_ioctl_x86_set_xsave()
4519 guest_xsave->region, sizeof(struct fxregs_state)); in kvm_vcpu_ioctl_x86_set_xsave()
4528 guest_xcrs->nr_xcrs = 0; in kvm_vcpu_ioctl_x86_get_xcrs()
4532 guest_xcrs->nr_xcrs = 1; in kvm_vcpu_ioctl_x86_get_xcrs()
4533 guest_xcrs->flags = 0; in kvm_vcpu_ioctl_x86_get_xcrs()
4534 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; in kvm_vcpu_ioctl_x86_get_xcrs()
4535 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; in kvm_vcpu_ioctl_x86_get_xcrs()
4544 return -EINVAL; in kvm_vcpu_ioctl_x86_set_xcrs()
4546 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) in kvm_vcpu_ioctl_x86_set_xcrs()
4547 return -EINVAL; in kvm_vcpu_ioctl_x86_set_xcrs()
4549 for (i = 0; i < guest_xcrs->nr_xcrs; i++) in kvm_vcpu_ioctl_x86_set_xcrs()
4551 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { in kvm_vcpu_ioctl_x86_set_xcrs()
4553 guest_xcrs->xcrs[i].value); in kvm_vcpu_ioctl_x86_set_xcrs()
4557 r = -EINVAL; in kvm_vcpu_ioctl_x86_set_xcrs()
4569 if (!vcpu->arch.pv_time_enabled) in kvm_set_guest_paused()
4570 return -EINVAL; in kvm_set_guest_paused()
4571 vcpu->arch.pvclock_set_guest_stopped_request = true; in kvm_set_guest_paused()
4583 if (cap->flags) in kvm_vcpu_ioctl_enable_cap()
4584 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4586 switch (cap->cap) { in kvm_vcpu_ioctl_enable_cap()
4588 if (cap->args[0]) in kvm_vcpu_ioctl_enable_cap()
4589 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4593 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_enable_cap()
4594 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4595 return kvm_hv_activate_synic(vcpu, cap->cap == in kvm_vcpu_ioctl_enable_cap()
4598 if (!kvm_x86_ops.nested_ops->enable_evmcs) in kvm_vcpu_ioctl_enable_cap()
4599 return -ENOTTY; in kvm_vcpu_ioctl_enable_cap()
4600 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); in kvm_vcpu_ioctl_enable_cap()
4602 user_ptr = (void __user *)(uintptr_t)cap->args[0]; in kvm_vcpu_ioctl_enable_cap()
4605 r = -EFAULT; in kvm_vcpu_ioctl_enable_cap()
4610 return -ENOTTY; in kvm_vcpu_ioctl_enable_cap()
4615 vcpu->arch.pv_cpuid.enforce = cap->args[0]; in kvm_vcpu_ioctl_enable_cap()
4616 if (vcpu->arch.pv_cpuid.enforce) in kvm_vcpu_ioctl_enable_cap()
4622 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4629 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
4644 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4650 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
4656 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4663 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4678 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4696 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4699 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
4706 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4710 cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
4717 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4721 cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
4724 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4731 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4733 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4737 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4739 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4745 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4751 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4761 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4764 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4767 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4769 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4775 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4784 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4795 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4804 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4816 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4826 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4836 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
4842 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4860 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
4866 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4886 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4902 r = vcpu->arch.virtual_tsc_khz; in kvm_arch_vcpu_ioctl()
4912 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4922 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4923 if (!kvm_x86_ops.nested_ops->get_state) in kvm_arch_vcpu_ioctl()
4926 BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size)); in kvm_arch_vcpu_ioctl()
4927 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4928 if (get_user(user_data_size, &user_kvm_nested_state->size)) in kvm_arch_vcpu_ioctl()
4931 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state, in kvm_arch_vcpu_ioctl()
4937 if (put_user(r, &user_kvm_nested_state->size)) in kvm_arch_vcpu_ioctl()
4938 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4940 r = -E2BIG; in kvm_arch_vcpu_ioctl()
4952 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4953 if (!kvm_x86_ops.nested_ops->set_state) in kvm_arch_vcpu_ioctl()
4956 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4960 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4975 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4976 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state); in kvm_arch_vcpu_ioctl()
4977 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4984 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4989 cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
4993 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5000 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5018 if (addr > (unsigned int)(-3 * PAGE_SIZE)) in kvm_vm_ioctl_set_tss_addr()
5019 return -EINVAL; in kvm_vm_ioctl_set_tss_addr()
5034 return -EINVAL; in kvm_vm_ioctl_set_nr_mmu_pages()
5036 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_set_nr_mmu_pages()
5039 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; in kvm_vm_ioctl_set_nr_mmu_pages()
5041 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_set_nr_mmu_pages()
5047 return kvm->arch.n_max_mmu_pages; in kvm_vm_ioctl_get_nr_mmu_pages()
5052 struct kvm_pic *pic = kvm->arch.vpic; in kvm_vm_ioctl_get_irqchip()
5056 switch (chip->chip_id) { in kvm_vm_ioctl_get_irqchip()
5058 memcpy(&chip->chip.pic, &pic->pics[0], in kvm_vm_ioctl_get_irqchip()
5062 memcpy(&chip->chip.pic, &pic->pics[1], in kvm_vm_ioctl_get_irqchip()
5066 kvm_get_ioapic(kvm, &chip->chip.ioapic); in kvm_vm_ioctl_get_irqchip()
5069 r = -EINVAL; in kvm_vm_ioctl_get_irqchip()
5077 struct kvm_pic *pic = kvm->arch.vpic; in kvm_vm_ioctl_set_irqchip()
5081 switch (chip->chip_id) { in kvm_vm_ioctl_set_irqchip()
5083 spin_lock(&pic->lock); in kvm_vm_ioctl_set_irqchip()
5084 memcpy(&pic->pics[0], &chip->chip.pic, in kvm_vm_ioctl_set_irqchip()
5086 spin_unlock(&pic->lock); in kvm_vm_ioctl_set_irqchip()
5089 spin_lock(&pic->lock); in kvm_vm_ioctl_set_irqchip()
5090 memcpy(&pic->pics[1], &chip->chip.pic, in kvm_vm_ioctl_set_irqchip()
5092 spin_unlock(&pic->lock); in kvm_vm_ioctl_set_irqchip()
5095 kvm_set_ioapic(kvm, &chip->chip.ioapic); in kvm_vm_ioctl_set_irqchip()
5098 r = -EINVAL; in kvm_vm_ioctl_set_irqchip()
5107 struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state; in kvm_vm_ioctl_get_pit()
5109 BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels)); in kvm_vm_ioctl_get_pit()
5111 mutex_lock(&kps->lock); in kvm_vm_ioctl_get_pit()
5112 memcpy(ps, &kps->channels, sizeof(*ps)); in kvm_vm_ioctl_get_pit()
5113 mutex_unlock(&kps->lock); in kvm_vm_ioctl_get_pit()
5120 struct kvm_pit *pit = kvm->arch.vpit; in kvm_vm_ioctl_set_pit()
5122 mutex_lock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit()
5123 memcpy(&pit->pit_state.channels, ps, sizeof(*ps)); in kvm_vm_ioctl_set_pit()
5125 kvm_pit_load_count(pit, i, ps->channels[i].count, 0); in kvm_vm_ioctl_set_pit()
5126 mutex_unlock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit()
5132 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit2()
5133 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, in kvm_vm_ioctl_get_pit2()
5134 sizeof(ps->channels)); in kvm_vm_ioctl_get_pit2()
5135 ps->flags = kvm->arch.vpit->pit_state.flags; in kvm_vm_ioctl_get_pit2()
5136 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit2()
5137 memset(&ps->reserved, 0, sizeof(ps->reserved)); in kvm_vm_ioctl_get_pit2()
5146 struct kvm_pit *pit = kvm->arch.vpit; in kvm_vm_ioctl_set_pit2()
5148 mutex_lock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit2()
5149 prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; in kvm_vm_ioctl_set_pit2()
5150 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; in kvm_vm_ioctl_set_pit2()
5153 memcpy(&pit->pit_state.channels, &ps->channels, in kvm_vm_ioctl_set_pit2()
5154 sizeof(pit->pit_state.channels)); in kvm_vm_ioctl_set_pit2()
5155 pit->pit_state.flags = ps->flags; in kvm_vm_ioctl_set_pit2()
5157 kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count, in kvm_vm_ioctl_set_pit2()
5159 mutex_unlock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit2()
5166 struct kvm_pit *pit = kvm->arch.vpit; in kvm_vm_ioctl_reinject()
5168 /* pit->pit_state.lock was overloaded to prevent userspace from getting in kvm_vm_ioctl_reinject()
5172 mutex_lock(&pit->pit_state.lock); in kvm_vm_ioctl_reinject()
5173 kvm_pit_set_reinject(pit, control->pit_reinject); in kvm_vm_ioctl_reinject()
5174 mutex_unlock(&pit->pit_state.lock); in kvm_vm_ioctl_reinject()
5182 * Flush potentially hardware-cached dirty pages to dirty_bitmap. in kvm_arch_sync_dirty_log()
5192 return -ENXIO; in kvm_vm_ioctl_irq_line()
5194 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, in kvm_vm_ioctl_irq_line()
5195 irq_event->irq, irq_event->level, in kvm_vm_ioctl_irq_line()
5205 if (cap->flags) in kvm_vm_ioctl_enable_cap()
5206 return -EINVAL; in kvm_vm_ioctl_enable_cap()
5208 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
5210 kvm->arch.disabled_quirks = cap->args[0]; in kvm_vm_ioctl_enable_cap()
5214 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
5215 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
5216 if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS) in kvm_vm_ioctl_enable_cap()
5218 r = -EEXIST; in kvm_vm_ioctl_enable_cap()
5221 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
5228 kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT; in kvm_vm_ioctl_enable_cap()
5229 kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; in kvm_vm_ioctl_enable_cap()
5232 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
5236 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
5237 if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS) in kvm_vm_ioctl_enable_cap()
5240 if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS) in kvm_vm_ioctl_enable_cap()
5241 kvm->arch.x2apic_format = true; in kvm_vm_ioctl_enable_cap()
5242 if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) in kvm_vm_ioctl_enable_cap()
5243 kvm->arch.x2apic_broadcast_quirk_disabled = true; in kvm_vm_ioctl_enable_cap()
5248 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
5249 if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS) in kvm_vm_ioctl_enable_cap()
5252 if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) && in kvm_vm_ioctl_enable_cap()
5254 kvm->arch.mwait_in_guest = true; in kvm_vm_ioctl_enable_cap()
5255 if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) in kvm_vm_ioctl_enable_cap()
5256 kvm->arch.hlt_in_guest = true; in kvm_vm_ioctl_enable_cap()
5257 if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE) in kvm_vm_ioctl_enable_cap()
5258 kvm->arch.pause_in_guest = true; in kvm_vm_ioctl_enable_cap()
5259 if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE) in kvm_vm_ioctl_enable_cap()
5260 kvm->arch.cstate_in_guest = true; in kvm_vm_ioctl_enable_cap()
5264 kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; in kvm_vm_ioctl_enable_cap()
5268 kvm->arch.exception_payload_enabled = cap->args[0]; in kvm_vm_ioctl_enable_cap()
5272 kvm->arch.user_space_msr_mask = cap->args[0]; in kvm_vm_ioctl_enable_cap()
5276 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
5285 u32 count = kvm->arch.msr_filter.count; in kvm_clear_msr_filter()
5288 mutex_lock(&kvm->lock); in kvm_clear_msr_filter()
5289 kvm->arch.msr_filter.count = 0; in kvm_clear_msr_filter()
5290 memcpy(ranges, kvm->arch.msr_filter.ranges, count * sizeof(ranges[0])); in kvm_clear_msr_filter()
5291 mutex_unlock(&kvm->lock); in kvm_clear_msr_filter()
5292 synchronize_srcu(&kvm->srcu); in kvm_clear_msr_filter()
5300 struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges; in kvm_add_msr_filter()
5306 if (!user_range->nmsrs) in kvm_add_msr_filter()
5309 bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long); in kvm_add_msr_filter()
5311 return -EINVAL; in kvm_add_msr_filter()
5313 bitmap = memdup_user((__user u8*)user_range->bitmap, bitmap_size); in kvm_add_msr_filter()
5318 .flags = user_range->flags, in kvm_add_msr_filter()
5319 .base = user_range->base, in kvm_add_msr_filter()
5320 .nmsrs = user_range->nmsrs, in kvm_add_msr_filter()
5325 r = -EINVAL; in kvm_add_msr_filter()
5330 r = -EINVAL; in kvm_add_msr_filter()
5335 ranges[kvm->arch.msr_filter.count] = range; in kvm_add_msr_filter()
5338 kvm->arch.msr_filter.count++; in kvm_add_msr_filter()
5356 return -EFAULT; in kvm_vm_ioctl_set_msr_filter()
5363 return -EINVAL; in kvm_vm_ioctl_set_msr_filter()
5367 kvm->arch.msr_filter.default_allow = default_allow; in kvm_vm_ioctl_set_msr_filter()
5371 * a TOCTOU violation on kvm->arch.msr_filter.count. in kvm_vm_ioctl_set_msr_filter()
5373 mutex_lock(&kvm->lock); in kvm_vm_ioctl_set_msr_filter()
5381 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_set_msr_filter()
5389 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
5391 int r = -ENOTTY; in kvm_arch_vm_ioctl()
5393 * This union makes it completely explicit to gcc-3.x in kvm_arch_vm_ioctl()
5410 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5411 r = -EINVAL; in kvm_arch_vm_ioctl()
5412 if (kvm->created_vcpus) in kvm_arch_vm_ioctl()
5414 r = -EFAULT; in kvm_arch_vm_ioctl()
5419 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5429 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5431 r = -EEXIST; in kvm_arch_vm_ioctl()
5435 r = -EINVAL; in kvm_arch_vm_ioctl()
5436 if (kvm->created_vcpus) in kvm_arch_vm_ioctl()
5455 /* Write kvm->irq_routing before enabling irqchip_in_kernel. */ in kvm_arch_vm_ioctl()
5457 kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL; in kvm_arch_vm_ioctl()
5459 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5466 r = -EFAULT; in kvm_arch_vm_ioctl()
5471 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5472 r = -EEXIST; in kvm_arch_vm_ioctl()
5473 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
5475 r = -ENOMEM; in kvm_arch_vm_ioctl()
5476 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); in kvm_arch_vm_ioctl()
5477 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
5480 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5492 r = -ENXIO; in kvm_arch_vm_ioctl()
5498 r = -EFAULT; in kvm_arch_vm_ioctl()
5516 r = -ENXIO; in kvm_arch_vm_ioctl()
5525 r = -EFAULT; in kvm_arch_vm_ioctl()
5528 r = -ENXIO; in kvm_arch_vm_ioctl()
5529 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
5534 r = -EFAULT; in kvm_arch_vm_ioctl()
5541 r = -EFAULT; in kvm_arch_vm_ioctl()
5544 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5545 r = -ENXIO; in kvm_arch_vm_ioctl()
5546 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
5550 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5554 r = -ENXIO; in kvm_arch_vm_ioctl()
5555 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
5560 r = -EFAULT; in kvm_arch_vm_ioctl()
5567 r = -EFAULT; in kvm_arch_vm_ioctl()
5570 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5571 r = -ENXIO; in kvm_arch_vm_ioctl()
5572 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
5576 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5581 r = -EFAULT; in kvm_arch_vm_ioctl()
5584 r = -ENXIO; in kvm_arch_vm_ioctl()
5585 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
5592 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5593 if (kvm->created_vcpus) in kvm_arch_vm_ioctl()
5594 r = -EBUSY; in kvm_arch_vm_ioctl()
5596 kvm->arch.bsp_vcpu_id = arg; in kvm_arch_vm_ioctl()
5597 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5601 r = -EFAULT; in kvm_arch_vm_ioctl()
5604 r = -EINVAL; in kvm_arch_vm_ioctl()
5607 memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc)); in kvm_arch_vm_ioctl()
5615 r = -EFAULT; in kvm_arch_vm_ioctl()
5619 r = -EINVAL; in kvm_arch_vm_ioctl()
5631 kvm->arch.kvmclock_offset += user_ns.clock - now_ns; in kvm_arch_vm_ioctl()
5641 user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0; in kvm_arch_vm_ioctl()
5644 r = -EFAULT; in kvm_arch_vm_ioctl()
5651 r = -ENOTTY; in kvm_arch_vm_ioctl()
5659 r = -EFAULT; in kvm_arch_vm_ioctl()
5663 r = -ENOTTY; in kvm_arch_vm_ioctl()
5671 r = -EFAULT; in kvm_arch_vm_ioctl()
5675 r = -ENOTTY; in kvm_arch_vm_ioctl()
5683 r = -EFAULT; in kvm_arch_vm_ioctl()
5696 r = -ENOTTY; in kvm_arch_vm_ioctl()
5757 msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >= in kvm_init_msr_list()
5762 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >= in kvm_init_msr_list()
5767 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >= in kvm_init_msr_list()
5805 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) in vcpu_mmio_write()
5810 len -= n; in vcpu_mmio_write()
5825 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, in vcpu_mmio_read()
5832 len -= n; in vcpu_mmio_read()
5858 /* NPT walks are always user-walks */ in translate_nested_gpa()
5860 t_gpa = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception); in translate_nested_gpa()
5869 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_read()
5877 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_fetch()
5885 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_write()
5892 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); in kvm_mmu_gva_to_gpa_system()
5903 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, in kvm_read_guest_virt_helper()
5905 unsigned offset = addr & (PAGE_SIZE-1); in kvm_read_guest_virt_helper()
5906 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); in kvm_read_guest_virt_helper()
5918 bytes -= toread; in kvm_read_guest_virt_helper()
5937 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, in kvm_fetch_guest_virt()
5942 offset = addr & (PAGE_SIZE-1); in kvm_fetch_guest_virt()
5944 bytes = (unsigned)PAGE_SIZE - offset; in kvm_fetch_guest_virt()
6001 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, in kvm_write_guest_virt_helper()
6004 unsigned offset = addr & (PAGE_SIZE-1); in kvm_write_guest_virt_helper()
6005 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); in kvm_write_guest_virt_helper()
6016 bytes -= towrite; in kvm_write_guest_virt_helper()
6042 vcpu->arch.l1tf_flush_l1d = true; in kvm_write_guest_virt_system()
6099 && !permission_fault(vcpu, vcpu->arch.walk_mmu, in vcpu_mmio_gva_to_gpa()
6100 vcpu->arch.mmio_access, 0, access)) { in vcpu_mmio_gva_to_gpa()
6101 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | in vcpu_mmio_gva_to_gpa()
6102 (gva & (PAGE_SIZE - 1)); in vcpu_mmio_gva_to_gpa()
6107 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in vcpu_mmio_gva_to_gpa()
6110 return -1; in vcpu_mmio_gva_to_gpa()
6141 if (vcpu->mmio_read_completed) { in read_prepare()
6143 vcpu->mmio_fragments[0].gpa, val); in read_prepare()
6144 vcpu->mmio_read_completed = 0; in read_prepare()
6179 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; in write_exit_mmio()
6181 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); in write_exit_mmio()
6207 bool write = ops->write; in emulator_read_write_onepage()
6209 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in emulator_read_write_onepage()
6218 if (ctxt->gpa_available && emulator_can_use_gpa(ctxt) && in emulator_read_write_onepage()
6219 (addr & ~PAGE_MASK) == (ctxt->gpa_val & ~PAGE_MASK)) { in emulator_read_write_onepage()
6220 gpa = ctxt->gpa_val; in emulator_read_write_onepage()
6228 if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes)) in emulator_read_write_onepage()
6234 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); in emulator_read_write_onepage()
6239 bytes -= handled; in emulator_read_write_onepage()
6242 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); in emulator_read_write_onepage()
6243 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; in emulator_read_write_onepage()
6244 frag->gpa = gpa; in emulator_read_write_onepage()
6245 frag->data = val; in emulator_read_write_onepage()
6246 frag->len = bytes; in emulator_read_write_onepage()
6260 if (ops->read_write_prepare && in emulator_read_write()
6261 ops->read_write_prepare(vcpu, val, bytes)) in emulator_read_write()
6264 vcpu->mmio_nr_fragments = 0; in emulator_read_write()
6267 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { in emulator_read_write()
6270 now = -addr & ~PAGE_MASK; in emulator_read_write()
6277 if (ctxt->mode != X86EMUL_MODE_PROT64) in emulator_read_write()
6280 bytes -= now; in emulator_read_write()
6288 if (!vcpu->mmio_nr_fragments) in emulator_read_write()
6291 gpa = vcpu->mmio_fragments[0].gpa; in emulator_read_write()
6293 vcpu->mmio_needed = 1; in emulator_read_write()
6294 vcpu->mmio_cur_fragment = 0; in emulator_read_write()
6296 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); in emulator_read_write()
6297 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; in emulator_read_write()
6298 vcpu->run->exit_reason = KVM_EXIT_MMIO; in emulator_read_write()
6299 vcpu->run->mmio.phys_addr = gpa; in emulator_read_write()
6301 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); in emulator_read_write()
6349 if (bytes > 8 || (bytes & (bytes - 1))) in emulator_cmpxchg_emulated()
6363 page_line_mask = ~(cache_line_size() - 1); in emulator_cmpxchg_emulated()
6367 if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask)) in emulator_cmpxchg_emulated()
6411 for (i = 0; i < vcpu->arch.pio.count; i++) { in kernel_pio()
6412 if (vcpu->arch.pio.in) in kernel_pio()
6413 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, in kernel_pio()
6414 vcpu->arch.pio.size, pd); in kernel_pio()
6417 vcpu->arch.pio.port, vcpu->arch.pio.size, in kernel_pio()
6421 pd += vcpu->arch.pio.size; in kernel_pio()
6430 vcpu->arch.pio.port = port; in emulator_pio_in_out()
6431 vcpu->arch.pio.in = in; in emulator_pio_in_out()
6432 vcpu->arch.pio.count = count; in emulator_pio_in_out()
6433 vcpu->arch.pio.size = size; in emulator_pio_in_out()
6435 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { in emulator_pio_in_out()
6436 vcpu->arch.pio.count = 0; in emulator_pio_in_out()
6440 vcpu->run->exit_reason = KVM_EXIT_IO; in emulator_pio_in_out()
6441 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; in emulator_pio_in_out()
6442 vcpu->run->io.size = size; in emulator_pio_in_out()
6443 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; in emulator_pio_in_out()
6444 vcpu->run->io.count = count; in emulator_pio_in_out()
6445 vcpu->run->io.port = port; in emulator_pio_in_out()
6455 if (vcpu->arch.pio.count) in emulator_pio_in()
6458 memset(vcpu->arch.pio_data, 0, size * count); in emulator_pio_in()
6463 memcpy(val, vcpu->arch.pio_data, size * count); in emulator_pio_in()
6464 trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data); in emulator_pio_in()
6465 vcpu->arch.pio.count = 0; in emulator_pio_in()
6484 memcpy(vcpu->arch.pio_data, val, size * count); in emulator_pio_out()
6485 trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); in emulator_pio_out()
6514 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
6515 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, in kvm_emulate_wbinvd_noskip()
6518 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
6553 return (curr_cr & ~((1ULL << 32) - 1)) | new_val; in mk_cr_64()
6566 value = vcpu->arch.cr2; in emulator_get_cr()
6595 vcpu->arch.cr2 = val; in emulator_set_cr()
6608 res = -1; in emulator_set_cr()
6669 desc->type = var.type; in emulator_get_segment()
6670 desc->s = var.s; in emulator_get_segment()
6671 desc->dpl = var.dpl; in emulator_get_segment()
6672 desc->p = var.present; in emulator_get_segment()
6673 desc->avl = var.avl; in emulator_get_segment()
6674 desc->l = var.l; in emulator_get_segment()
6675 desc->d = var.db; in emulator_get_segment()
6676 desc->g = var.g; in emulator_get_segment()
6694 if (desc->g) in emulator_set_segment()
6696 var.type = desc->type; in emulator_set_segment()
6697 var.dpl = desc->dpl; in emulator_set_segment()
6698 var.db = desc->d; in emulator_set_segment()
6699 var.s = desc->s; in emulator_set_segment()
6700 var.l = desc->l; in emulator_set_segment()
6701 var.g = desc->g; in emulator_set_segment()
6702 var.avl = desc->avl; in emulator_set_segment()
6703 var.present = desc->p; in emulator_set_segment()
6747 return vcpu->arch.smbase; in emulator_get_smbase()
6754 vcpu->arch.smbase = smbase; in emulator_set_smbase()
6771 emul_to_vcpu(ctxt)->arch.halt_request = 1; in emulator_halt()
6779 &ctxt->exception); in emulator_intercept()
6783 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, in emulator_get_cpuid() argument
6786 return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, exact_only); in emulator_get_cpuid()
6821 return emul_to_vcpu(ctxt)->arch.hflags; in emulator_get_hflags()
6826 emul_to_vcpu(ctxt)->arch.hflags = emul_flags; in emulator_set_hflags()
6913 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in inject_emulated_exception()
6914 if (ctxt->exception.vector == PF_VECTOR) in inject_emulated_exception()
6915 return kvm_inject_emulated_page_fault(vcpu, &ctxt->exception); in inject_emulated_exception()
6917 if (ctxt->exception.error_code_valid) in inject_emulated_exception()
6918 kvm_queue_exception_e(vcpu, ctxt->exception.vector, in inject_emulated_exception()
6919 ctxt->exception.error_code); in inject_emulated_exception()
6921 kvm_queue_exception(vcpu, ctxt->exception.vector); in inject_emulated_exception()
6935 ctxt->vcpu = vcpu; in alloc_emulate_ctxt()
6936 ctxt->ops = &emulate_ops; in alloc_emulate_ctxt()
6937 vcpu->arch.emulate_ctxt = ctxt; in alloc_emulate_ctxt()
6944 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in init_emulate_ctxt()
6949 ctxt->gpa_available = false; in init_emulate_ctxt()
6950 ctxt->eflags = kvm_get_rflags(vcpu); in init_emulate_ctxt()
6951 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; in init_emulate_ctxt()
6953 ctxt->eip = kvm_rip_read(vcpu); in init_emulate_ctxt()
6954 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : in init_emulate_ctxt()
6955 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : in init_emulate_ctxt()
6964 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in init_emulate_ctxt()
6969 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_inject_realmode_interrupt()
6974 ctxt->op_bytes = 2; in kvm_inject_realmode_interrupt()
6975 ctxt->ad_bytes = 2; in kvm_inject_realmode_interrupt()
6976 ctxt->_eip = ctxt->eip + inc_eip; in kvm_inject_realmode_interrupt()
6982 ctxt->eip = ctxt->_eip; in kvm_inject_realmode_interrupt()
6983 kvm_rip_write(vcpu, ctxt->eip); in kvm_inject_realmode_interrupt()
6984 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_inject_realmode_interrupt()
6991 ++vcpu->stat.insn_emulation_fail; in handle_emulation_failure()
7000 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_emulation_failure()
7001 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in handle_emulation_failure()
7002 vcpu->run->internal.ndata = 0; in handle_emulation_failure()
7009 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_emulation_failure()
7010 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in handle_emulation_failure()
7011 vcpu->run->internal.ndata = 0; in handle_emulation_failure()
7032 if (!vcpu->arch.mmu->direct_map) { in reexecute_instruction()
7050 * retry instruction -> write #PF -> emulation fail -> retry in reexecute_instruction()
7051 * instruction -> ... in reexecute_instruction()
7053 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
7064 /* The instructions are well-emulated on direct mmu. */ in reexecute_instruction()
7065 if (vcpu->arch.mmu->direct_map) { in reexecute_instruction()
7068 spin_lock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
7069 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; in reexecute_instruction()
7070 spin_unlock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
7073 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
7080 * and it failed try to unshadow page and re-enter the in reexecute_instruction()
7083 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
7099 last_retry_eip = vcpu->arch.last_retry_eip; in retry_instruction()
7100 last_retry_addr = vcpu->arch.last_retry_addr; in retry_instruction()
7103 * If the emulation is caused by #PF and it is non-page_table in retry_instruction()
7104 * writing instruction, it means the VM-EXIT is caused by shadow in retry_instruction()
7108 * Note: if the guest uses a non-page-table modifying instruction in retry_instruction()
7115 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; in retry_instruction()
7127 if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa) in retry_instruction()
7130 vcpu->arch.last_retry_eip = ctxt->eip; in retry_instruction()
7131 vcpu->arch.last_retry_addr = cr2_or_gpa; in retry_instruction()
7133 if (!vcpu->arch.mmu->direct_map) in retry_instruction()
7136 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in retry_instruction()
7146 if (!(vcpu->arch.hflags & HF_SMM_MASK)) { in kvm_smm_changed()
7148 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false); in kvm_smm_changed()
7174 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_do_singlestep()
7176 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { in kvm_vcpu_do_singlestep()
7177 kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM; in kvm_vcpu_do_singlestep()
7178 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); in kvm_vcpu_do_singlestep()
7179 kvm_run->debug.arch.exception = DB_VECTOR; in kvm_vcpu_do_singlestep()
7180 kvm_run->exit_reason = KVM_EXIT_DEBUG; in kvm_vcpu_do_singlestep()
7212 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && in kvm_vcpu_check_breakpoint()
7213 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { in kvm_vcpu_check_breakpoint()
7214 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_check_breakpoint()
7217 vcpu->arch.guest_debug_dr7, in kvm_vcpu_check_breakpoint()
7218 vcpu->arch.eff_db); in kvm_vcpu_check_breakpoint()
7221 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; in kvm_vcpu_check_breakpoint()
7222 kvm_run->debug.arch.pc = eip; in kvm_vcpu_check_breakpoint()
7223 kvm_run->debug.arch.exception = DB_VECTOR; in kvm_vcpu_check_breakpoint()
7224 kvm_run->exit_reason = KVM_EXIT_DEBUG; in kvm_vcpu_check_breakpoint()
7230 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && in kvm_vcpu_check_breakpoint()
7234 vcpu->arch.dr7, in kvm_vcpu_check_breakpoint()
7235 vcpu->arch.db); in kvm_vcpu_check_breakpoint()
7249 switch (ctxt->opcode_len) { in is_vmware_backdoor_opcode()
7251 switch (ctxt->b) { in is_vmware_backdoor_opcode()
7268 switch (ctxt->b) { in is_vmware_backdoor_opcode()
7282 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in x86_emulate_instruction()
7289 vcpu->arch.l1tf_flush_l1d = true; in x86_emulate_instruction()
7295 write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; in x86_emulate_instruction()
7296 vcpu->arch.write_fault_to_shadow_pgtable = false; in x86_emulate_instruction()
7312 ctxt->interruptibility = 0; in x86_emulate_instruction()
7313 ctxt->have_exception = false; in x86_emulate_instruction()
7314 ctxt->exception.vector = -1; in x86_emulate_instruction()
7315 ctxt->perm_ok = false; in x86_emulate_instruction()
7317 ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; in x86_emulate_instruction()
7322 ++vcpu->stat.insn_emulation; in x86_emulate_instruction()
7333 if (ctxt->have_exception) { in x86_emulate_instruction()
7335 * #UD should result in just EMULATION_FAILED, and trap-like in x86_emulate_instruction()
7338 WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR || in x86_emulate_instruction()
7339 exception_type(ctxt->exception.vector) == EXCPT_TRAP); in x86_emulate_instruction()
7356 * updating interruptibility state and injecting single-step #DBs. in x86_emulate_instruction()
7359 kvm_rip_write(vcpu, ctxt->_eip); in x86_emulate_instruction()
7360 if (ctxt->eflags & X86_EFLAGS_RF) in x86_emulate_instruction()
7361 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); in x86_emulate_instruction()
7370 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { in x86_emulate_instruction()
7371 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in x86_emulate_instruction()
7378 ctxt->exception.address = cr2_or_gpa; in x86_emulate_instruction()
7381 if (vcpu->arch.mmu->direct_map) { in x86_emulate_instruction()
7382 ctxt->gpa_available = true; in x86_emulate_instruction()
7383 ctxt->gpa_val = cr2_or_gpa; in x86_emulate_instruction()
7387 ctxt->exception.address = 0; in x86_emulate_instruction()
7403 if (ctxt->have_exception) { in x86_emulate_instruction()
7407 } else if (vcpu->arch.pio.count) { in x86_emulate_instruction()
7408 if (!vcpu->arch.pio.in) { in x86_emulate_instruction()
7409 /* FIXME: return into emulator if single-stepping. */ in x86_emulate_instruction()
7410 vcpu->arch.pio.count = 0; in x86_emulate_instruction()
7413 vcpu->arch.complete_userspace_io = complete_emulated_pio; in x86_emulate_instruction()
7416 } else if (vcpu->mmio_needed) { in x86_emulate_instruction()
7417 ++vcpu->stat.mmio_exits; in x86_emulate_instruction()
7419 if (!vcpu->mmio_is_write) in x86_emulate_instruction()
7422 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in x86_emulate_instruction()
7430 toggle_interruptibility(vcpu, ctxt->interruptibility); in x86_emulate_instruction()
7431 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in x86_emulate_instruction()
7432 if (!ctxt->have_exception || in x86_emulate_instruction()
7433 exception_type(ctxt->exception.vector) == EXCPT_TRAP) { in x86_emulate_instruction()
7434 kvm_rip_write(vcpu, ctxt->eip); in x86_emulate_instruction()
7435 if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) in x86_emulate_instruction()
7439 __kvm_set_rflags(vcpu, ctxt->eflags); in x86_emulate_instruction()
7448 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF)) in x86_emulate_instruction()
7451 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; in x86_emulate_instruction()
7471 vcpu->arch.pio.count = 0; in complete_fast_pio_out_port_0x7e()
7477 vcpu->arch.pio.count = 0; in complete_fast_pio_out()
7479 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) in complete_fast_pio_out()
7499 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) { in kvm_fast_pio_out()
7500 vcpu->arch.complete_userspace_io = in kvm_fast_pio_out()
7504 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_out()
7505 vcpu->arch.complete_userspace_io = complete_fast_pio_out; in kvm_fast_pio_out()
7515 BUG_ON(vcpu->arch.pio.count != 1); in complete_fast_pio_in()
7517 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { in complete_fast_pio_in()
7518 vcpu->arch.pio.count = 0; in complete_fast_pio_in()
7523 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0; in complete_fast_pio_in()
7526 * Since vcpu->arch.pio.count == 1 let emulator_pio_in perform in complete_fast_pio_in()
7529 emulator_pio_in(vcpu, vcpu->arch.pio.size, vcpu->arch.pio.port, &val, 1); in complete_fast_pio_in()
7550 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_in()
7551 vcpu->arch.complete_userspace_io = complete_fast_pio_in; in kvm_fast_pio_in()
7580 khz = freq->new; in tsc_khz_changed()
7601 /* TSC frequency always matches when on Hyper-V */ in kvm_hyperv_tsc_notifier()
7607 struct kvm_arch *ka = &kvm->arch; in kvm_hyperv_tsc_notifier()
7609 spin_lock(&ka->pvclock_gtod_sync_lock); in kvm_hyperv_tsc_notifier()
7619 spin_unlock(&ka->pvclock_gtod_sync_lock); in kvm_hyperv_tsc_notifier()
7675 if (vcpu->cpu != cpu) in __kvmclock_cpufreq_notifier()
7678 if (vcpu->cpu != raw_smp_processor_id()) in __kvmclock_cpufreq_notifier()
7684 if (freq->old < freq->new && send_ipi) { in __kvmclock_cpufreq_notifier()
7707 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) in kvmclock_cpufreq_notifier()
7709 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) in kvmclock_cpufreq_notifier()
7712 for_each_cpu(cpu, freq->policy->cpus) in kvmclock_cpufreq_notifier()
7740 if (policy->cpuinfo.max_freq) in kvm_timer_init()
7741 max_tsc_khz = policy->cpuinfo.max_freq; in kvm_timer_init()
7788 (unsigned long *)&vcpu->arch.pmu.global_status); in kvm_handle_intel_pt_intr()
7830 if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) && in pvclock_gtod_notify()
7849 r = -EEXIST; in kvm_arch_init()
7853 if (!ops->cpu_has_kvm_support()) { in kvm_arch_init()
7855 r = -EOPNOTSUPP; in kvm_arch_init()
7858 if (ops->disabled_by_bios()) { in kvm_arch_init()
7860 r = -EOPNOTSUPP; in kvm_arch_init()
7871 r = -EOPNOTSUPP; in kvm_arch_init()
7875 r = -ENOMEM; in kvm_arch_init()
7913 if (pi_inject_timer == -1) in kvm_arch_init()
7958 ++vcpu->stat.halt_exits; in kvm_vcpu_halt()
7960 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; in kvm_vcpu_halt()
7963 vcpu->run->exit_reason = KVM_EXIT_HLT; in kvm_vcpu_halt()
7973 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered in kvm_emulate_halt()
7990 return -KVM_EOPNOTSUPP; in kvm_pv_clock_pairing()
7993 return -KVM_EOPNOTSUPP; in kvm_pv_clock_pairing()
8002 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, in kvm_pv_clock_pairing()
8004 ret = -KVM_EFAULT; in kvm_pv_clock_pairing()
8013 * @apicid - apicid of vcpu to be kicked.
8031 return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0); in kvm_apicv_activated()
8039 &kvm->arch.apicv_inhibit_reasons); in kvm_apicv_init()
8042 &kvm->arch.apicv_inhibit_reasons); in kvm_apicv_init()
8052 map = rcu_dereference(kvm->arch.apic_map); in kvm_sched_yield()
8054 if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id]) in kvm_sched_yield()
8055 target = map->phys_map[dest_id]->vcpu; in kvm_sched_yield()
8059 if (target && READ_ONCE(target->ready)) in kvm_sched_yield()
8068 if (kvm_hv_hypercall_enabled(vcpu->kvm)) in kvm_emulate_hypercall()
8089 ret = -KVM_EPERM; in kvm_emulate_hypercall()
8093 ret = -KVM_ENOSYS; in kvm_emulate_hypercall()
8103 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); in kvm_emulate_hypercall()
8104 kvm_sched_yield(vcpu->kvm, a1); in kvm_emulate_hypercall()
8116 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); in kvm_emulate_hypercall()
8122 kvm_sched_yield(vcpu->kvm, a0); in kvm_emulate_hypercall()
8126 ret = -KVM_ENOSYS; in kvm_emulate_hypercall()
8134 ++vcpu->stat.hypercalls; in kvm_emulate_hypercall()
8148 &ctxt->exception); in emulator_fix_hypercall()
8153 return vcpu->run->request_interrupt_window && in dm_request_for_irq_injection()
8154 likely(!pic_in_kernel(vcpu->kvm)); in dm_request_for_irq_injection()
8159 struct kvm_run *kvm_run = vcpu->run; in post_kvm_run_save()
8161 kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; in post_kvm_run_save()
8162 kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0; in post_kvm_run_save()
8163 kvm_run->cr8 = kvm_get_cr8(vcpu); in post_kvm_run_save()
8164 kvm_run->apic_base = kvm_get_apic_base(vcpu); in post_kvm_run_save()
8165 kvm_run->ready_for_interrupt_injection = in post_kvm_run_save()
8166 pic_in_kernel(vcpu->kvm) || in post_kvm_run_save()
8180 if (vcpu->arch.apicv_active) in update_cr8_intercept()
8183 if (!vcpu->arch.apic->vapic_addr) in update_cr8_intercept()
8186 max_irr = -1; in update_cr8_intercept()
8188 if (max_irr != -1) in update_cr8_intercept()
8203 if (vcpu->arch.exception.injected) { in inject_pending_event()
8211 * Trap-like exceptions, e.g. #DB, have higher priority than in inject_pending_event()
8214 * Fault-like exceptions, e.g. #GP and #PF, are the lowest in inject_pending_event()
8216 * execution, i.e. a pending fault-like exception means the in inject_pending_event()
8221 else if (!vcpu->arch.exception.pending) { in inject_pending_event()
8222 if (vcpu->arch.nmi_injected) { in inject_pending_event()
8225 } else if (vcpu->arch.interrupt.injected) { in inject_pending_event()
8231 WARN_ON_ONCE(vcpu->arch.exception.injected && in inject_pending_event()
8232 vcpu->arch.exception.pending); in inject_pending_event()
8236 * in order for caller to determine if it should require immediate-exit in inject_pending_event()
8241 r = kvm_x86_ops.nested_ops->check_events(vcpu); in inject_pending_event()
8247 if (vcpu->arch.exception.pending) { in inject_pending_event()
8248 trace_kvm_inj_exception(vcpu->arch.exception.nr, in inject_pending_event()
8249 vcpu->arch.exception.has_error_code, in inject_pending_event()
8250 vcpu->arch.exception.error_code); in inject_pending_event()
8252 vcpu->arch.exception.pending = false; in inject_pending_event()
8253 vcpu->arch.exception.injected = true; in inject_pending_event()
8255 if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) in inject_pending_event()
8259 if (vcpu->arch.exception.nr == DB_VECTOR) { in inject_pending_event()
8261 if (vcpu->arch.dr7 & DR7_GD) { in inject_pending_event()
8262 vcpu->arch.dr7 &= ~DR7_GD; in inject_pending_event()
8273 * due to architectural conditions (e.g. IF=0) a window-open exit in inject_pending_event()
8274 * will re-request KVM_REQ_EVENT. Sometimes however an event is pending in inject_pending_event()
8280 * The kvm_x86_ops hooks communicate this by returning -EBUSY. in inject_pending_event()
8282 if (vcpu->arch.smi_pending) { in inject_pending_event()
8283 r = can_inject ? kvm_x86_ops.smi_allowed(vcpu, true) : -EBUSY; in inject_pending_event()
8287 vcpu->arch.smi_pending = false; in inject_pending_event()
8288 ++vcpu->arch.smi_count; in inject_pending_event()
8295 if (vcpu->arch.nmi_pending) { in inject_pending_event()
8296 r = can_inject ? kvm_x86_ops.nmi_allowed(vcpu, true) : -EBUSY; in inject_pending_event()
8300 --vcpu->arch.nmi_pending; in inject_pending_event()
8301 vcpu->arch.nmi_injected = true; in inject_pending_event()
8306 if (vcpu->arch.nmi_pending) in inject_pending_event()
8311 r = can_inject ? kvm_x86_ops.interrupt_allowed(vcpu, true) : -EBUSY; in inject_pending_event()
8324 kvm_x86_ops.nested_ops->hv_timer_pending && in inject_pending_event()
8325 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) in inject_pending_event()
8328 WARN_ON(vcpu->arch.exception.pending); in inject_pending_event()
8345 if (kvm_x86_ops.get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) in process_nmi()
8348 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); in process_nmi()
8349 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); in process_nmi()
8356 flags |= seg->g << 23; in enter_smm_get_segment_flags()
8357 flags |= seg->db << 22; in enter_smm_get_segment_flags()
8358 flags |= seg->l << 21; in enter_smm_get_segment_flags()
8359 flags |= seg->avl << 20; in enter_smm_get_segment_flags()
8360 flags |= seg->present << 15; in enter_smm_get_segment_flags()
8361 flags |= seg->dpl << 13; in enter_smm_get_segment_flags()
8362 flags |= seg->s << 12; in enter_smm_get_segment_flags()
8363 flags |= seg->type << 8; in enter_smm_get_segment_flags()
8378 offset = 0x7f2c + (n - 3) * 12; in enter_smm_save_seg_32()
8450 put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); in enter_smm_save_state_32()
8462 put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i)); in enter_smm_save_state_64()
8476 put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase); in enter_smm_save_state_64()
8481 put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer); in enter_smm_save_state_64()
8515 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true); in enter_smm()
8525 * Give pre_enter_smm() a chance to make ISA-specific changes to the in enter_smm()
8527 * the SMM state-save area. in enter_smm()
8531 vcpu->arch.hflags |= HF_SMM_MASK; in enter_smm()
8532 kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); in enter_smm()
8535 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in enter_smm()
8542 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); in enter_smm()
8544 vcpu->arch.cr0 = cr0; in enter_smm()
8554 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; in enter_smm()
8555 cs.base = vcpu->arch.smbase; in enter_smm()
8590 vcpu->arch.smi_pending = true; in process_smi()
8617 vcpu->arch.apicv_active = kvm_apicv_activated(vcpu->kvm); in kvm_vcpu_update_apicv()
8626 * In particular, kvm_request_apicv_update() expects kvm->srcu not to be
8628 * synchronize_srcu(&kvm->srcu).
8639 old = READ_ONCE(kvm->arch.apicv_inhibit_reasons); in kvm_request_apicv_update()
8648 old = cmpxchg(&kvm->arch.apicv_inhibit_reasons, expected, new); in kvm_request_apicv_update()
8676 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); in vcpu_scan_ioapic()
8678 if (irqchip_split(vcpu->kvm)) in vcpu_scan_ioapic()
8679 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
8681 if (vcpu->arch.apicv_active) in vcpu_scan_ioapic()
8683 if (ioapic_in_kernel(vcpu->kvm)) in vcpu_scan_ioapic()
8684 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
8688 vcpu->arch.load_eoi_exitmap_pending = true; in vcpu_scan_ioapic()
8697 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) in vcpu_load_eoi_exitmap()
8700 bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors, in vcpu_load_eoi_exitmap()
8701 vcpu_to_synic(vcpu)->vec_bitmap, 256); in vcpu_load_eoi_exitmap()
8732 smp_send_reschedule(vcpu->cpu); in __kvm_request_immediate_exit()
8753 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { in vcpu_enter_guest()
8763 kvm_gen_update_masterclock(vcpu->kvm); in vcpu_enter_guest()
8787 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; in vcpu_enter_guest()
8792 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; in vcpu_enter_guest()
8793 vcpu->mmio_needed = 0; in vcpu_enter_guest()
8799 vcpu->arch.apf.halted = true; in vcpu_enter_guest()
8814 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); in vcpu_enter_guest()
8815 if (test_bit(vcpu->arch.pending_ioapic_eoi, in vcpu_enter_guest()
8816 vcpu->arch.ioapic_handled_vectors)) { in vcpu_enter_guest()
8817 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI; in vcpu_enter_guest()
8818 vcpu->run->eoi.vector = in vcpu_enter_guest()
8819 vcpu->arch.pending_ioapic_eoi; in vcpu_enter_guest()
8831 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
8832 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; in vcpu_enter_guest()
8837 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
8838 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; in vcpu_enter_guest()
8843 vcpu->run->exit_reason = KVM_EXIT_HYPERV; in vcpu_enter_guest()
8844 vcpu->run->hyperv = vcpu->arch.hyperv.exit; in vcpu_enter_guest()
8851 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers in vcpu_enter_guest()
8852 * depend on the guest clock being up-to-date in vcpu_enter_guest()
8865 ++vcpu->stat.req_event; in vcpu_enter_guest()
8867 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { in vcpu_enter_guest()
8897 vcpu->mode = IN_GUEST_MODE; in vcpu_enter_guest()
8899 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in vcpu_enter_guest()
8902 * 1) We should set ->mode before checking ->requests. Please see in vcpu_enter_guest()
8905 * 2) For APICv, we should set ->mode before checking PID.ON. This in vcpu_enter_guest()
8919 if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active) in vcpu_enter_guest()
8923 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
8927 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
8943 if (unlikely(vcpu->arch.switch_db_regs)) { in vcpu_enter_guest()
8945 set_debugreg(vcpu->arch.eff_db[0], 0); in vcpu_enter_guest()
8946 set_debugreg(vcpu->arch.eff_db[1], 1); in vcpu_enter_guest()
8947 set_debugreg(vcpu->arch.eff_db[2], 2); in vcpu_enter_guest()
8948 set_debugreg(vcpu->arch.eff_db[3], 3); in vcpu_enter_guest()
8949 set_debugreg(vcpu->arch.dr6, 6); in vcpu_enter_guest()
8950 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; in vcpu_enter_guest()
8961 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { in vcpu_enter_guest()
8962 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); in vcpu_enter_guest()
8966 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; in vcpu_enter_guest()
8979 vcpu->arch.last_vmentry_cpu = vcpu->cpu; in vcpu_enter_guest()
8980 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); in vcpu_enter_guest()
8982 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
8989 * VM-Exit on SVM and any ticks that occur between VM-Exit and now. in vcpu_enter_guest()
8996 ++vcpu->stat.exits; in vcpu_enter_guest()
9001 s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta; in vcpu_enter_guest()
9003 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, delta); in vcpu_enter_guest()
9004 vcpu->arch.apic->lapic_timer.advance_expire_delta = S64_MIN; in vcpu_enter_guest()
9011 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
9021 if (unlikely(vcpu->arch.tsc_always_catchup)) in vcpu_enter_guest()
9024 if (vcpu->arch.apic_attention) in vcpu_enter_guest()
9034 if (unlikely(vcpu->arch.apic_attention)) in vcpu_enter_guest()
9044 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_block()
9046 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_block()
9056 switch(vcpu->arch.mp_state) { in vcpu_block()
9058 vcpu->arch.pv.pv_unhalted = false; in vcpu_block()
9059 vcpu->arch.mp_state = in vcpu_block()
9063 vcpu->arch.apf.halted = false; in vcpu_block()
9068 return -EINTR; in vcpu_block()
9076 kvm_x86_ops.nested_ops->check_events(vcpu); in kvm_vcpu_running()
9078 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && in kvm_vcpu_running()
9079 !vcpu->arch.apf.halted); in kvm_vcpu_running()
9085 struct kvm *kvm = vcpu->kvm; in vcpu_run()
9087 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
9088 vcpu->arch.l1tf_flush_l1d = true; in vcpu_run()
9107 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; in vcpu_run()
9108 ++vcpu->stat.request_irq_exits; in vcpu_run()
9113 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
9117 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
9121 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
9130 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in complete_emulated_io()
9132 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in complete_emulated_io()
9138 BUG_ON(!vcpu->arch.pio.count); in complete_emulated_pio()
9163 struct kvm_run *run = vcpu->run; in complete_emulated_mmio()
9167 BUG_ON(!vcpu->mmio_needed); in complete_emulated_mmio()
9170 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; in complete_emulated_mmio()
9171 len = min(8u, frag->len); in complete_emulated_mmio()
9172 if (!vcpu->mmio_is_write) in complete_emulated_mmio()
9173 memcpy(frag->data, run->mmio.data, len); in complete_emulated_mmio()
9175 if (frag->len <= 8) { in complete_emulated_mmio()
9178 vcpu->mmio_cur_fragment++; in complete_emulated_mmio()
9181 frag->data += len; in complete_emulated_mmio()
9182 frag->gpa += len; in complete_emulated_mmio()
9183 frag->len -= len; in complete_emulated_mmio()
9186 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { in complete_emulated_mmio()
9187 vcpu->mmio_needed = 0; in complete_emulated_mmio()
9189 /* FIXME: return into emulator if single-stepping. */ in complete_emulated_mmio()
9190 if (vcpu->mmio_is_write) in complete_emulated_mmio()
9192 vcpu->mmio_read_completed = 1; in complete_emulated_mmio()
9196 run->exit_reason = KVM_EXIT_MMIO; in complete_emulated_mmio()
9197 run->mmio.phys_addr = frag->gpa; in complete_emulated_mmio()
9198 if (vcpu->mmio_is_write) in complete_emulated_mmio()
9199 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); in complete_emulated_mmio()
9200 run->mmio.len = min(8u, frag->len); in complete_emulated_mmio()
9201 run->mmio.is_write = vcpu->mmio_is_write; in complete_emulated_mmio()
9202 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in complete_emulated_mmio()
9213 memcpy(&fpu->state, &current->thread.fpu.state, in kvm_save_current_fpu()
9224 kvm_save_current_fpu(vcpu->arch.user_fpu); in kvm_load_guest_fpu()
9227 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state, in kvm_load_guest_fpu()
9241 kvm_save_current_fpu(vcpu->arch.guest_fpu); in kvm_put_guest_fpu()
9243 copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state); in kvm_put_guest_fpu()
9248 ++vcpu->stat.fpu_reload; in kvm_put_guest_fpu()
9254 struct kvm_run *kvm_run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
9261 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { in kvm_arch_vcpu_ioctl_run()
9262 if (kvm_run->immediate_exit) { in kvm_arch_vcpu_ioctl_run()
9263 r = -EINTR; in kvm_arch_vcpu_ioctl_run()
9269 r = -EAGAIN; in kvm_arch_vcpu_ioctl_run()
9271 r = -EINTR; in kvm_arch_vcpu_ioctl_run()
9272 kvm_run->exit_reason = KVM_EXIT_INTR; in kvm_arch_vcpu_ioctl_run()
9273 ++vcpu->stat.signal_exits; in kvm_arch_vcpu_ioctl_run()
9278 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) { in kvm_arch_vcpu_ioctl_run()
9279 r = -EINVAL; in kvm_arch_vcpu_ioctl_run()
9283 if (kvm_run->kvm_dirty_regs) { in kvm_arch_vcpu_ioctl_run()
9289 /* re-sync apic's tpr */ in kvm_arch_vcpu_ioctl_run()
9291 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { in kvm_arch_vcpu_ioctl_run()
9292 r = -EINVAL; in kvm_arch_vcpu_ioctl_run()
9297 if (unlikely(vcpu->arch.complete_userspace_io)) { in kvm_arch_vcpu_ioctl_run()
9298 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; in kvm_arch_vcpu_ioctl_run()
9299 vcpu->arch.complete_userspace_io = NULL; in kvm_arch_vcpu_ioctl_run()
9304 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); in kvm_arch_vcpu_ioctl_run()
9306 if (kvm_run->immediate_exit) in kvm_arch_vcpu_ioctl_run()
9307 r = -EINTR; in kvm_arch_vcpu_ioctl_run()
9313 if (kvm_run->kvm_valid_regs) in kvm_arch_vcpu_ioctl_run()
9324 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { in __get_regs()
9332 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt); in __get_regs()
9333 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __get_regs()
9335 regs->rax = kvm_rax_read(vcpu); in __get_regs()
9336 regs->rbx = kvm_rbx_read(vcpu); in __get_regs()
9337 regs->rcx = kvm_rcx_read(vcpu); in __get_regs()
9338 regs->rdx = kvm_rdx_read(vcpu); in __get_regs()
9339 regs->rsi = kvm_rsi_read(vcpu); in __get_regs()
9340 regs->rdi = kvm_rdi_read(vcpu); in __get_regs()
9341 regs->rsp = kvm_rsp_read(vcpu); in __get_regs()
9342 regs->rbp = kvm_rbp_read(vcpu); in __get_regs()
9344 regs->r8 = kvm_r8_read(vcpu); in __get_regs()
9345 regs->r9 = kvm_r9_read(vcpu); in __get_regs()
9346 regs->r10 = kvm_r10_read(vcpu); in __get_regs()
9347 regs->r11 = kvm_r11_read(vcpu); in __get_regs()
9348 regs->r12 = kvm_r12_read(vcpu); in __get_regs()
9349 regs->r13 = kvm_r13_read(vcpu); in __get_regs()
9350 regs->r14 = kvm_r14_read(vcpu); in __get_regs()
9351 regs->r15 = kvm_r15_read(vcpu); in __get_regs()
9354 regs->rip = kvm_rip_read(vcpu); in __get_regs()
9355 regs->rflags = kvm_get_rflags(vcpu); in __get_regs()
9368 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; in __set_regs()
9369 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __set_regs()
9371 kvm_rax_write(vcpu, regs->rax); in __set_regs()
9372 kvm_rbx_write(vcpu, regs->rbx); in __set_regs()
9373 kvm_rcx_write(vcpu, regs->rcx); in __set_regs()
9374 kvm_rdx_write(vcpu, regs->rdx); in __set_regs()
9375 kvm_rsi_write(vcpu, regs->rsi); in __set_regs()
9376 kvm_rdi_write(vcpu, regs->rdi); in __set_regs()
9377 kvm_rsp_write(vcpu, regs->rsp); in __set_regs()
9378 kvm_rbp_write(vcpu, regs->rbp); in __set_regs()
9380 kvm_r8_write(vcpu, regs->r8); in __set_regs()
9381 kvm_r9_write(vcpu, regs->r9); in __set_regs()
9382 kvm_r10_write(vcpu, regs->r10); in __set_regs()
9383 kvm_r11_write(vcpu, regs->r11); in __set_regs()
9384 kvm_r12_write(vcpu, regs->r12); in __set_regs()
9385 kvm_r13_write(vcpu, regs->r13); in __set_regs()
9386 kvm_r14_write(vcpu, regs->r14); in __set_regs()
9387 kvm_r15_write(vcpu, regs->r15); in __set_regs()
9390 kvm_rip_write(vcpu, regs->rip); in __set_regs()
9391 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); in __set_regs()
9393 vcpu->arch.exception.pending = false; in __set_regs()
9420 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __get_sregs()
9421 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __get_sregs()
9422 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __get_sregs()
9423 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __get_sregs()
9424 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __get_sregs()
9425 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __get_sregs()
9427 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __get_sregs()
9428 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __get_sregs()
9431 sregs->idt.limit = dt.size; in __get_sregs()
9432 sregs->idt.base = dt.address; in __get_sregs()
9434 sregs->gdt.limit = dt.size; in __get_sregs()
9435 sregs->gdt.base = dt.address; in __get_sregs()
9437 sregs->cr0 = kvm_read_cr0(vcpu); in __get_sregs()
9438 sregs->cr2 = vcpu->arch.cr2; in __get_sregs()
9439 sregs->cr3 = kvm_read_cr3(vcpu); in __get_sregs()
9440 sregs->cr4 = kvm_read_cr4(vcpu); in __get_sregs()
9441 sregs->cr8 = kvm_get_cr8(vcpu); in __get_sregs()
9442 sregs->efer = vcpu->arch.efer; in __get_sregs()
9443 sregs->apic_base = kvm_get_apic_base(vcpu); in __get_sregs()
9445 memset(sregs->interrupt_bitmap, 0, sizeof(sregs->interrupt_bitmap)); in __get_sregs()
9447 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) in __get_sregs()
9448 set_bit(vcpu->arch.interrupt.nr, in __get_sregs()
9449 (unsigned long *)sregs->interrupt_bitmap); in __get_sregs()
9469 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && in kvm_arch_vcpu_ioctl_get_mpstate()
9470 vcpu->arch.pv.pv_unhalted) in kvm_arch_vcpu_ioctl_get_mpstate()
9471 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_ioctl_get_mpstate()
9473 mp_state->mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
9484 int ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_mpstate()
9489 mp_state->mp_state != KVM_MP_STATE_RUNNABLE) in kvm_arch_vcpu_ioctl_set_mpstate()
9497 if ((kvm_vcpu_latch_init(vcpu) || vcpu->arch.smi_pending) && in kvm_arch_vcpu_ioctl_set_mpstate()
9498 (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED || in kvm_arch_vcpu_ioctl_set_mpstate()
9499 mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED)) in kvm_arch_vcpu_ioctl_set_mpstate()
9502 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { in kvm_arch_vcpu_ioctl_set_mpstate()
9503 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; in kvm_arch_vcpu_ioctl_set_mpstate()
9504 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); in kvm_arch_vcpu_ioctl_set_mpstate()
9506 vcpu->arch.mp_state = mp_state->mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
9518 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_task_switch()
9526 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_task_switch()
9527 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in kvm_task_switch()
9528 vcpu->run->internal.ndata = 0; in kvm_task_switch()
9532 kvm_rip_write(vcpu, ctxt->eip); in kvm_task_switch()
9533 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_task_switch()
9540 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) { in kvm_valid_sregs()
9543 * 64-bit mode (though maybe in a 32-bit code segment). in kvm_valid_sregs()
9546 if (!(sregs->cr4 & X86_CR4_PAE) in kvm_valid_sregs()
9547 || !(sregs->efer & EFER_LMA)) in kvm_valid_sregs()
9548 return -EINVAL; in kvm_valid_sregs()
9551 * Not in 64-bit mode: EFER.LMA is clear and the code in kvm_valid_sregs()
9552 * segment cannot be 64-bit. in kvm_valid_sregs()
9554 if (sregs->efer & EFER_LMA || sregs->cs.l) in kvm_valid_sregs()
9555 return -EINVAL; in kvm_valid_sregs()
9558 return kvm_valid_cr4(vcpu, sregs->cr4); in kvm_valid_sregs()
9568 int ret = -EINVAL; in __set_sregs()
9573 apic_base_msr.data = sregs->apic_base; in __set_sregs()
9578 dt.size = sregs->idt.limit; in __set_sregs()
9579 dt.address = sregs->idt.base; in __set_sregs()
9581 dt.size = sregs->gdt.limit; in __set_sregs()
9582 dt.address = sregs->gdt.base; in __set_sregs()
9585 vcpu->arch.cr2 = sregs->cr2; in __set_sregs()
9586 mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; in __set_sregs()
9587 vcpu->arch.cr3 = sregs->cr3; in __set_sregs()
9590 kvm_set_cr8(vcpu, sregs->cr8); in __set_sregs()
9592 mmu_reset_needed |= vcpu->arch.efer != sregs->efer; in __set_sregs()
9593 kvm_x86_ops.set_efer(vcpu, sregs->efer); in __set_sregs()
9595 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; in __set_sregs()
9596 kvm_x86_ops.set_cr0(vcpu, sregs->cr0); in __set_sregs()
9597 vcpu->arch.cr0 = sregs->cr0; in __set_sregs()
9599 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; in __set_sregs()
9600 cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) & in __set_sregs()
9602 kvm_x86_ops.set_cr4(vcpu, sregs->cr4); in __set_sregs()
9606 idx = srcu_read_lock(&vcpu->kvm->srcu); in __set_sregs()
9608 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); in __set_sregs()
9611 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __set_sregs()
9618 (const unsigned long *)sregs->interrupt_bitmap, max_bits); in __set_sregs()
9624 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __set_sregs()
9625 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __set_sregs()
9626 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __set_sregs()
9627 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __set_sregs()
9628 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __set_sregs()
9629 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __set_sregs()
9631 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __set_sregs()
9632 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __set_sregs()
9638 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && in __set_sregs()
9640 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in __set_sregs()
9668 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { in kvm_arch_vcpu_ioctl_set_guest_debug()
9669 r = -EBUSY; in kvm_arch_vcpu_ioctl_set_guest_debug()
9670 if (vcpu->arch.exception.pending) in kvm_arch_vcpu_ioctl_set_guest_debug()
9672 if (dbg->control & KVM_GUESTDBG_INJECT_DB) in kvm_arch_vcpu_ioctl_set_guest_debug()
9684 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
9685 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) in kvm_arch_vcpu_ioctl_set_guest_debug()
9686 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
9688 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { in kvm_arch_vcpu_ioctl_set_guest_debug()
9690 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
9691 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; in kvm_arch_vcpu_ioctl_set_guest_debug()
9694 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
9698 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_arch_vcpu_ioctl_set_guest_debug()
9699 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + in kvm_arch_vcpu_ioctl_set_guest_debug()
9723 unsigned long vaddr = tr->linear_address; in kvm_arch_vcpu_ioctl_translate()
9729 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_translate()
9731 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl_translate()
9732 tr->physical_address = gpa; in kvm_arch_vcpu_ioctl_translate()
9733 tr->valid = gpa != UNMAPPED_GVA; in kvm_arch_vcpu_ioctl_translate()
9734 tr->writeable = 1; in kvm_arch_vcpu_ioctl_translate()
9735 tr->usermode = 0; in kvm_arch_vcpu_ioctl_translate()
9747 fxsave = &vcpu->arch.guest_fpu->state.fxsave; in kvm_arch_vcpu_ioctl_get_fpu()
9748 memcpy(fpu->fpr, fxsave->st_space, 128); in kvm_arch_vcpu_ioctl_get_fpu()
9749 fpu->fcw = fxsave->cwd; in kvm_arch_vcpu_ioctl_get_fpu()
9750 fpu->fsw = fxsave->swd; in kvm_arch_vcpu_ioctl_get_fpu()
9751 fpu->ftwx = fxsave->twd; in kvm_arch_vcpu_ioctl_get_fpu()
9752 fpu->last_opcode = fxsave->fop; in kvm_arch_vcpu_ioctl_get_fpu()
9753 fpu->last_ip = fxsave->rip; in kvm_arch_vcpu_ioctl_get_fpu()
9754 fpu->last_dp = fxsave->rdp; in kvm_arch_vcpu_ioctl_get_fpu()
9755 memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space)); in kvm_arch_vcpu_ioctl_get_fpu()
9767 fxsave = &vcpu->arch.guest_fpu->state.fxsave; in kvm_arch_vcpu_ioctl_set_fpu()
9769 memcpy(fxsave->st_space, fpu->fpr, 128); in kvm_arch_vcpu_ioctl_set_fpu()
9770 fxsave->cwd = fpu->fcw; in kvm_arch_vcpu_ioctl_set_fpu()
9771 fxsave->swd = fpu->fsw; in kvm_arch_vcpu_ioctl_set_fpu()
9772 fxsave->twd = fpu->ftwx; in kvm_arch_vcpu_ioctl_set_fpu()
9773 fxsave->fop = fpu->last_opcode; in kvm_arch_vcpu_ioctl_set_fpu()
9774 fxsave->rip = fpu->last_ip; in kvm_arch_vcpu_ioctl_set_fpu()
9775 fxsave->rdp = fpu->last_dp; in kvm_arch_vcpu_ioctl_set_fpu()
9776 memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space)); in kvm_arch_vcpu_ioctl_set_fpu()
9786 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS) in store_regs()
9787 __get_regs(vcpu, &vcpu->run->s.regs.regs); in store_regs()
9789 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS) in store_regs()
9790 __get_sregs(vcpu, &vcpu->run->s.regs.sregs); in store_regs()
9792 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS) in store_regs()
9794 vcpu, &vcpu->run->s.regs.events); in store_regs()
9799 if (vcpu->run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS) in sync_regs()
9800 return -EINVAL; in sync_regs()
9802 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) { in sync_regs()
9803 __set_regs(vcpu, &vcpu->run->s.regs.regs); in sync_regs()
9804 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS; in sync_regs()
9806 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) { in sync_regs()
9807 if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs)) in sync_regs()
9808 return -EINVAL; in sync_regs()
9809 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS; in sync_regs()
9811 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) { in sync_regs()
9813 vcpu, &vcpu->run->s.regs.events)) in sync_regs()
9814 return -EINVAL; in sync_regs()
9815 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS; in sync_regs()
9823 fpstate_init(&vcpu->arch.guest_fpu->state); in fx_init()
9825 vcpu->arch.guest_fpu->state.xsave.header.xcomp_bv = in fx_init()
9831 vcpu->arch.xcr0 = XFEATURE_MASK_FP; in fx_init()
9833 vcpu->arch.cr0 |= X86_CR0_ET; in fx_init()
9838 if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) in kvm_arch_vcpu_precreate()
9850 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) in kvm_arch_vcpu_create()
9851 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_create()
9853 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; in kvm_arch_vcpu_create()
9861 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arch_vcpu_create()
9865 if (kvm_apicv_activated(vcpu->kvm)) in kvm_arch_vcpu_create()
9866 vcpu->arch.apicv_active = true; in kvm_arch_vcpu_create()
9870 r = -ENOMEM; in kvm_arch_vcpu_create()
9875 vcpu->arch.pio_data = page_address(page); in kvm_arch_vcpu_create()
9877 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, in kvm_arch_vcpu_create()
9879 if (!vcpu->arch.mce_banks) in kvm_arch_vcpu_create()
9881 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; in kvm_arch_vcpu_create()
9883 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, in kvm_arch_vcpu_create()
9890 vcpu->arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache, in kvm_arch_vcpu_create()
9892 if (!vcpu->arch.user_fpu) { in kvm_arch_vcpu_create()
9897 vcpu->arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, in kvm_arch_vcpu_create()
9899 if (!vcpu->arch.guest_fpu) { in kvm_arch_vcpu_create()
9905 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); in kvm_arch_vcpu_create()
9907 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; in kvm_arch_vcpu_create()
9912 vcpu->arch.pending_external_vector = -1; in kvm_arch_vcpu_create()
9913 vcpu->arch.preempted_in_kernel = false; in kvm_arch_vcpu_create()
9921 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); in kvm_arch_vcpu_create()
9922 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; in kvm_arch_vcpu_create()
9931 kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu); in kvm_arch_vcpu_create()
9933 kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu); in kvm_arch_vcpu_create()
9935 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_create()
9937 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_create()
9939 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_create()
9941 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_create()
9951 struct kvm *kvm = vcpu->kvm; in kvm_arch_vcpu_postcreate()
9955 if (mutex_lock_killable(&vcpu->mutex)) in kvm_arch_vcpu_postcreate()
9962 vcpu->arch.msr_kvm_poll_control = 1; in kvm_arch_vcpu_postcreate()
9964 mutex_unlock(&vcpu->mutex); in kvm_arch_vcpu_postcreate()
9966 if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0) in kvm_arch_vcpu_postcreate()
9967 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvm_arch_vcpu_postcreate()
9973 struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache; in kvm_arch_vcpu_destroy()
9976 kvm_release_pfn(cache->pfn, cache->dirty, cache); in kvm_arch_vcpu_destroy()
9982 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_destroy()
9983 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_destroy()
9984 kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu); in kvm_arch_vcpu_destroy()
9985 kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu); in kvm_arch_vcpu_destroy()
9989 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_destroy()
9991 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_destroy()
9993 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_destroy()
9994 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_destroy()
9995 kvfree(vcpu->arch.cpuid_entries); in kvm_arch_vcpu_destroy()
10004 vcpu->arch.hflags = 0; in kvm_vcpu_reset()
10006 vcpu->arch.smi_pending = 0; in kvm_vcpu_reset()
10007 vcpu->arch.smi_count = 0; in kvm_vcpu_reset()
10008 atomic_set(&vcpu->arch.nmi_queued, 0); in kvm_vcpu_reset()
10009 vcpu->arch.nmi_pending = 0; in kvm_vcpu_reset()
10010 vcpu->arch.nmi_injected = false; in kvm_vcpu_reset()
10014 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); in kvm_vcpu_reset()
10016 vcpu->arch.dr6 = DR6_INIT; in kvm_vcpu_reset()
10017 vcpu->arch.dr7 = DR7_FIXED_1; in kvm_vcpu_reset()
10020 vcpu->arch.cr2 = 0; in kvm_vcpu_reset()
10023 vcpu->arch.apf.msr_en_val = 0; in kvm_vcpu_reset()
10024 vcpu->arch.apf.msr_int_val = 0; in kvm_vcpu_reset()
10025 vcpu->arch.st.msr_val = 0; in kvm_vcpu_reset()
10031 vcpu->arch.apf.halted = false; in kvm_vcpu_reset()
10042 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave, in kvm_vcpu_reset()
10046 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave, in kvm_vcpu_reset()
10056 vcpu->arch.smbase = 0x30000; in kvm_vcpu_reset()
10058 vcpu->arch.msr_misc_features_enables = 0; in kvm_vcpu_reset()
10060 vcpu->arch.xcr0 = XFEATURE_MASK_FP; in kvm_vcpu_reset()
10063 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); in kvm_vcpu_reset()
10064 vcpu->arch.regs_avail = ~0; in kvm_vcpu_reset()
10065 vcpu->arch.regs_dirty = ~0; in kvm_vcpu_reset()
10067 vcpu->arch.ia32_xss = 0; in kvm_vcpu_reset()
10102 if (!stable && vcpu->cpu == smp_processor_id()) in kvm_arch_hardware_enable()
10104 if (stable && vcpu->arch.last_host_tsc > local_tsc) { in kvm_arch_hardware_enable()
10106 if (vcpu->arch.last_host_tsc > max_tsc) in kvm_arch_hardware_enable()
10107 max_tsc = vcpu->arch.last_host_tsc; in kvm_arch_hardware_enable()
10137 * N.B. - this code below runs only on platforms with reliable TSC, in kvm_arch_hardware_enable()
10151 u64 delta_cyc = max_tsc - local_tsc; in kvm_arch_hardware_enable()
10153 kvm->arch.backwards_tsc_observed = true; in kvm_arch_hardware_enable()
10155 vcpu->arch.tsc_offset_adjustment += delta_cyc; in kvm_arch_hardware_enable()
10156 vcpu->arch.last_host_tsc = local_tsc; in kvm_arch_hardware_enable()
10166 kvm->arch.last_tsc_nsec = 0; in kvm_arch_hardware_enable()
10167 kvm->arch.last_tsc_write = 0; in kvm_arch_hardware_enable()
10190 r = ops->hardware_setup(); in kvm_arch_hardware_setup()
10194 memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); in kvm_arch_hardware_setup()
10235 return -EIO; in kvm_arch_check_processor_compat()
10237 return ops->check_processor_compatibility(); in kvm_arch_check_processor_compat()
10242 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; in kvm_vcpu_is_reset_bsp()
10248 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; in kvm_vcpu_is_bsp()
10258 vcpu->arch.l1tf_flush_l1d = true; in kvm_arch_sched_in()
10259 if (pmu->version && unlikely(pmu->event_count)) { in kvm_arch_sched_in()
10260 pmu->need_cleanup = true; in kvm_arch_sched_in()
10268 kfree(kvm->arch.hyperv.hv_pa_pg); in kvm_arch_free_vm()
10276 return -EINVAL; in kvm_arch_init_vm()
10278 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); in kvm_arch_init_vm()
10279 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); in kvm_arch_init_vm()
10280 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); in kvm_arch_init_vm()
10281 INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages); in kvm_arch_init_vm()
10282 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); in kvm_arch_init_vm()
10283 atomic_set(&kvm->arch.noncoherent_dma_count, 0); in kvm_arch_init_vm()
10286 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); in kvm_arch_init_vm()
10287 /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ in kvm_arch_init_vm()
10289 &kvm->arch.irq_sources_bitmap); in kvm_arch_init_vm()
10291 raw_spin_lock_init(&kvm->arch.tsc_write_lock); in kvm_arch_init_vm()
10292 mutex_init(&kvm->arch.apic_map_lock); in kvm_arch_init_vm()
10293 spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); in kvm_arch_init_vm()
10295 kvm->arch.kvmclock_offset = -get_kvmclock_base_ns(); in kvm_arch_init_vm()
10298 kvm->arch.guest_can_read_msr_platform_info = true; in kvm_arch_init_vm()
10300 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); in kvm_arch_init_vm()
10301 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); in kvm_arch_init_vm()
10337 mutex_lock(&kvm->lock); in kvm_free_vcpus()
10338 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) in kvm_free_vcpus()
10339 kvm->vcpus[i] = NULL; in kvm_free_vcpus()
10341 atomic_set(&kvm->online_vcpus, 0); in kvm_free_vcpus()
10342 mutex_unlock(&kvm->lock); in kvm_free_vcpus()
10347 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); in kvm_arch_sync_events()
10348 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); in kvm_arch_sync_events()
10359 /* Called with kvm->slots_lock held. */ in __x86_set_memory_region()
10361 return -EINVAL; in __x86_set_memory_region()
10365 if (slot && slot->npages) in __x86_set_memory_region()
10366 return -EEXIST; in __x86_set_memory_region()
10377 if (!slot || !slot->npages) in __x86_set_memory_region()
10380 old_npages = slot->npages; in __x86_set_memory_region()
10413 if (current->mm == kvm->mm) { in kvm_arch_destroy_vm()
10419 mutex_lock(&kvm->slots_lock); in kvm_arch_destroy_vm()
10425 mutex_unlock(&kvm->slots_lock); in kvm_arch_destroy_vm()
10429 for (i = 0; i < kvm->arch.msr_filter.count; i++) in kvm_arch_destroy_vm()
10430 kfree(kvm->arch.msr_filter.ranges[i].bitmap); in kvm_arch_destroy_vm()
10434 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); in kvm_arch_destroy_vm()
10435 kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1)); in kvm_arch_destroy_vm()
10446 kvfree(slot->arch.rmap[i]); in kvm_arch_free_memslot()
10447 slot->arch.rmap[i] = NULL; in kvm_arch_free_memslot()
10452 kvfree(slot->arch.lpage_info[i - 1]); in kvm_arch_free_memslot()
10453 slot->arch.lpage_info[i - 1] = NULL; in kvm_arch_free_memslot()
10469 memset(&slot->arch, 0, sizeof(slot->arch)); in kvm_alloc_memslot_metadata()
10477 lpages = gfn_to_index(slot->base_gfn + npages - 1, in kvm_alloc_memslot_metadata()
10478 slot->base_gfn, level) + 1; in kvm_alloc_memslot_metadata()
10480 slot->arch.rmap[i] = in kvm_alloc_memslot_metadata()
10481 kvcalloc(lpages, sizeof(*slot->arch.rmap[i]), in kvm_alloc_memslot_metadata()
10483 if (!slot->arch.rmap[i]) in kvm_alloc_memslot_metadata()
10492 slot->arch.lpage_info[i - 1] = linfo; in kvm_alloc_memslot_metadata()
10494 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) in kvm_alloc_memslot_metadata()
10496 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) in kvm_alloc_memslot_metadata()
10497 linfo[lpages - 1].disallow_lpage = 1; in kvm_alloc_memslot_metadata()
10498 ugfn = slot->userspace_addr >> PAGE_SHIFT; in kvm_alloc_memslot_metadata()
10503 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) { in kvm_alloc_memslot_metadata()
10518 kvfree(slot->arch.rmap[i]); in kvm_alloc_memslot_metadata()
10519 slot->arch.rmap[i] = NULL; in kvm_alloc_memslot_metadata()
10523 kvfree(slot->arch.lpage_info[i - 1]); in kvm_alloc_memslot_metadata()
10524 slot->arch.lpage_info[i - 1] = NULL; in kvm_alloc_memslot_metadata()
10526 return -ENOMEM; in kvm_alloc_memslot_metadata()
10535 * memslots->generation has been incremented. in kvm_arch_memslots_updated()
10540 /* Force re-initialization of steal_time cache */ in kvm_arch_memslots_updated()
10552 mem->memory_size >> PAGE_SHIFT); in kvm_arch_prepare_memory_region()
10565 if ((change != KVM_MR_FLAGS_ONLY) || (new->flags & KVM_MEM_READONLY)) in kvm_mmu_slot_apply_flags()
10577 * which can be collapsed into a single large-page spte. Later in kvm_mmu_slot_apply_flags()
10578 * page faults will create the large-page sptes. in kvm_mmu_slot_apply_flags()
10585 if ((old->flags & KVM_MEM_LOG_DIRTY_PAGES) && in kvm_mmu_slot_apply_flags()
10586 !(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) in kvm_mmu_slot_apply_flags()
10602 * When disabling dirty logging with PML enabled, the D-bit is set in kvm_mmu_slot_apply_flags()
10609 * When enabling dirty logging, large sptes are write-protected in kvm_mmu_slot_apply_flags()
10614 * initial-all-set state. Otherwise, depending on whether pml in kvm_mmu_slot_apply_flags()
10615 * is enabled the D-bit or the W-bit will be cleared. in kvm_mmu_slot_apply_flags()
10617 if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { in kvm_mmu_slot_apply_flags()
10626 * If we're with initial-all-set, we don't need in kvm_mmu_slot_apply_flags()
10629 * we still need to write-protect huge pages in kvm_mmu_slot_apply_flags()
10647 if (!kvm->arch.n_requested_mmu_pages) in kvm_arch_commit_memory_region()
10652 * FIXME: const-ify all uses of struct kvm_memory_slot. in kvm_arch_commit_memory_region()
10681 if (!list_empty_careful(&vcpu->async_pf.done)) in kvm_vcpu_has_events()
10687 if (vcpu->arch.pv.pv_unhalted) in kvm_vcpu_has_events()
10690 if (vcpu->arch.exception.pending) in kvm_vcpu_has_events()
10694 (vcpu->arch.nmi_pending && in kvm_vcpu_has_events()
10699 (vcpu->arch.smi_pending && in kvm_vcpu_has_events()
10712 kvm_x86_ops.nested_ops->hv_timer_pending && in kvm_vcpu_has_events()
10713 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) in kvm_vcpu_has_events()
10726 if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) in kvm_arch_dy_runnable()
10734 if (vcpu->arch.apicv_active && kvm_x86_ops.dy_apicv_has_pending_interrupt(vcpu)) in kvm_arch_dy_runnable()
10742 return vcpu->arch.preempted_in_kernel; in kvm_arch_vcpu_in_kernel()
10775 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_get_rflags()
10783 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && in __kvm_set_rflags()
10784 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) in __kvm_set_rflags()
10800 if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) || in kvm_arch_async_page_ready()
10801 work->wakeup_all) in kvm_arch_async_page_ready()
10808 if (!vcpu->arch.mmu->direct_map && in kvm_arch_async_page_ready()
10809 work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu)) in kvm_arch_async_page_ready()
10812 kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true); in kvm_arch_async_page_ready()
10824 return (key + 1) & (ASYNC_PF_PER_VCPU - 1); in kvm_async_pf_next_probe()
10831 while (vcpu->arch.apf.gfns[key] != ~0) in kvm_add_async_pf_gfn()
10834 vcpu->arch.apf.gfns[key] = gfn; in kvm_add_async_pf_gfn()
10843 (vcpu->arch.apf.gfns[key] != gfn && in kvm_async_pf_gfn_slot()
10844 vcpu->arch.apf.gfns[key] != ~0); i++) in kvm_async_pf_gfn_slot()
10852 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; in kvm_find_async_pf_gfn()
10861 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn)) in kvm_del_async_pf_gfn()
10865 vcpu->arch.apf.gfns[i] = ~0; in kvm_del_async_pf_gfn()
10868 if (vcpu->arch.apf.gfns[j] == ~0) in kvm_del_async_pf_gfn()
10870 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); in kvm_del_async_pf_gfn()
10877 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; in kvm_del_async_pf_gfn()
10886 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason, in apf_put_user_notpresent()
10894 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_put_user_ready()
10903 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_pageready_slot_free()
10912 if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu)) in kvm_can_deliver_async_pf()
10916 (vcpu->arch.apf.send_user_only && kvm_x86_ops.get_cpl(vcpu) == 0)) in kvm_can_deliver_async_pf()
10926 vcpu->arch.exception.pending)) in kvm_can_do_async_pf()
10929 if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu)) in kvm_can_do_async_pf()
10944 trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa); in kvm_arch_async_page_not_present()
10945 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_not_present()
10953 fault.address = work->arch.token; in kvm_arch_async_page_not_present()
10976 .vector = vcpu->arch.apf.vec in kvm_arch_async_page_present()
10979 if (work->wakeup_all) in kvm_arch_async_page_present()
10980 work->arch.token = ~0; /* broadcast wakeup */ in kvm_arch_async_page_present()
10982 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_present()
10983 trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa); in kvm_arch_async_page_present()
10985 if ((work->wakeup_all || work->notpresent_injected) && in kvm_arch_async_page_present()
10987 !apf_put_user_ready(vcpu, work->arch.token)) { in kvm_arch_async_page_present()
10988 vcpu->arch.apf.pageready_pending = true; in kvm_arch_async_page_present()
10992 vcpu->arch.apf.halted = false; in kvm_arch_async_page_present()
10993 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_async_page_present()
10999 if (!vcpu->arch.apf.pageready_pending) in kvm_arch_async_page_present_queued()
11013 atomic_inc(&kvm->arch.assigned_device_count); in kvm_arch_start_assignment()
11019 atomic_dec(&kvm->arch.assigned_device_count); in kvm_arch_end_assignment()
11025 return atomic_read(&kvm->arch.assigned_device_count); in kvm_arch_has_assigned_device()
11031 atomic_inc(&kvm->arch.noncoherent_dma_count); in kvm_arch_register_noncoherent_dma()
11037 atomic_dec(&kvm->arch.noncoherent_dma_count); in kvm_arch_unregister_noncoherent_dma()
11043 return atomic_read(&kvm->arch.noncoherent_dma_count); in kvm_arch_has_noncoherent_dma()
11059 irqfd->producer = prod; in kvm_arch_irq_bypass_add_producer()
11060 kvm_arch_start_assignment(irqfd->kvm); in kvm_arch_irq_bypass_add_producer()
11061 ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, in kvm_arch_irq_bypass_add_producer()
11062 prod->irq, irqfd->gsi, 1); in kvm_arch_irq_bypass_add_producer()
11065 kvm_arch_end_assignment(irqfd->kvm); in kvm_arch_irq_bypass_add_producer()
11077 WARN_ON(irqfd->producer != prod); in kvm_arch_irq_bypass_del_producer()
11078 irqfd->producer = NULL; in kvm_arch_irq_bypass_del_producer()
11082 * remapped mode, so we can re-use the current implementation in kvm_arch_irq_bypass_del_producer()
11086 ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0); in kvm_arch_irq_bypass_del_producer()
11089 " fails: %d\n", irqfd->consumer.token, ret); in kvm_arch_irq_bypass_del_producer()
11091 kvm_arch_end_assignment(irqfd->kvm); in kvm_arch_irq_bypass_del_producer()
11107 return (vcpu->arch.msr_kvm_poll_control & 1) == 0; in kvm_arch_no_poll()
11145 vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, &fault) != UNMAPPED_GVA) { in kvm_fixup_and_inject_pf_error()
11147 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page in kvm_fixup_and_inject_pf_error()
11157 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault); in kvm_fixup_and_inject_pf_error()
11178 * doesn't seem to be a real use-case behind such requests, just return in kvm_handle_memory_failure()
11181 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_handle_memory_failure()
11182 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in kvm_handle_memory_failure()
11183 vcpu->run->internal.ndata = 0; in kvm_handle_memory_failure()
11234 if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].pgd) in kvm_handle_invpcid()
11238 kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free); in kvm_handle_invpcid()
11250 * page tables, so a non-global flush just degenerates to a in kvm_handle_invpcid()