| /Linux-v5.4/arch/x86/kvm/ |
| D | pmu.c | 186 config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc), in reprogram_gp_counter() 228 kvm_x86_ops->pmu_ops->find_fixed_event(idx), in reprogram_fixed_counter() 237 struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx); in reprogram_counter() 262 struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit); in kvm_pmu_handle_event() 276 return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx); in kvm_pmu_is_valid_msr_idx() 326 pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask); in kvm_pmu_rdpmc() 342 return kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr); in kvm_pmu_is_valid_msr() 347 return kvm_x86_ops->pmu_ops->get_msr(vcpu, msr, data); in kvm_pmu_get_msr() 352 return kvm_x86_ops->pmu_ops->set_msr(vcpu, msr_info); in kvm_pmu_set_msr() 361 kvm_x86_ops->pmu_ops->refresh(vcpu); in kvm_pmu_refresh() [all …]
|
| D | x86.c | 108 struct kvm_x86_ops *kvm_x86_ops __read_mostly; 109 EXPORT_SYMBOL_GPL(kvm_x86_ops); 630 if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) in kvm_require_cpl() 777 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in kvm_set_cr0() 790 kvm_x86_ops->set_cr0(vcpu, cr0); in kvm_set_cr0() 879 if (kvm_x86_ops->get_cpl(vcpu) != 0 || in kvm_set_xcr() 944 if (kvm_x86_ops->set_cr4(vcpu, cr4)) in kvm_set_cr4() 1028 kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6); in kvm_update_dr6() 1039 kvm_x86_ops->set_dr7(vcpu, dr7); in kvm_update_dr7() 1105 *val = kvm_x86_ops->get_dr6(vcpu); in kvm_get_dr() [all …]
|
| D | kvm_cache_regs.h | 44 kvm_x86_ops->cache_reg(vcpu, reg); in BUILD_KVM_GPR_ACCESSORS() 84 kvm_x86_ops->cache_reg(vcpu, (enum kvm_reg)VCPU_EXREG_PDPTR); in kvm_pdptr_read() 93 kvm_x86_ops->decache_cr0_guest_bits(vcpu); in kvm_read_cr0_bits() 106 kvm_x86_ops->decache_cr4_guest_bits(vcpu); in kvm_read_cr4_bits() 113 kvm_x86_ops->decache_cr3(vcpu); in kvm_read_cr3()
|
| D | cpuid.c | 51 && kvm_x86_ops->mpx_supported()); in kvm_mpx_supported() 235 kvm_x86_ops->cpuid_update(vcpu); in kvm_vcpu_ioctl_set_cpuid() 258 kvm_x86_ops->cpuid_update(vcpu); in kvm_vcpu_ioctl_set_cpuid2() 350 unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0; in do_cpuid_7_mask() 352 unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0; in do_cpuid_7_mask() 353 unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0; in do_cpuid_7_mask() 435 unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL) in __do_cpuid_func() 442 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0; in __do_cpuid_func() 443 unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; in __do_cpuid_func() 444 unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0; in __do_cpuid_func() [all …]
|
| D | mmu.h | 160 int cpl = kvm_x86_ops->get_cpl(vcpu); in permission_fault() 161 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); in permission_fault()
|
| D | lapic.c | 454 kvm_x86_ops->hwapic_irr_update(vcpu, in apic_clear_irr() 479 kvm_x86_ops->hwapic_isr_update(vcpu, vec); in apic_set_isr() 527 kvm_x86_ops->hwapic_isr_update(vcpu, in apic_clear_isr() 670 highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu); in apic_has_interrupt_for_ppr() 1060 kvm_x86_ops->deliver_posted_interrupt(vcpu, vector); in __apic_accept_irq() 1704 kvm_x86_ops->cancel_hv_timer(apic->vcpu); in cancel_hv_timer() 1715 if (!kvm_x86_ops->set_hv_timer) in start_hv_timer() 1721 if (kvm_x86_ops->set_hv_timer(vcpu, ktimer->tscdeadline, &expired)) in start_hv_timer() 2141 kvm_x86_ops->set_virtual_apic_mode(vcpu); in kvm_lapic_set_base() 2204 kvm_x86_ops->apicv_post_state_restore(vcpu); in kvm_lapic_reset() [all …]
|
| D | pmu.h | 79 return kvm_x86_ops->pmu_ops->pmc_is_enabled(pmc); in pmc_is_enabled()
|
| D | mmu.c | 316 return kvm_x86_ops->tlb_remote_flush_with_range; in kvm_available_flush_tlb_with_range() 324 if (range && kvm_x86_ops->tlb_remote_flush_with_range) in kvm_flush_remote_tlbs_with_range() 325 ret = kvm_x86_ops->tlb_remote_flush_with_range(kvm, range); in kvm_flush_remote_tlbs_with_range() 1345 max_level = min(kvm_x86_ops->get_lpage_level(), host_level); in mapping_level() 1803 if (kvm_x86_ops->enable_log_dirty_pt_masked) in kvm_arch_mmu_enable_log_dirty_pt_masked() 1804 kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset, in kvm_arch_mmu_enable_log_dirty_pt_masked() 1819 if (kvm_x86_ops->write_log_dirty) in kvm_arch_write_log_dirty() 1820 return kvm_x86_ops->write_log_dirty(vcpu); in kvm_arch_write_log_dirty() 3088 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, in set_spte() 4398 kvm_x86_ops->tlb_flush(vcpu, true); in fast_cr3_switch() [all …]
|
| D | x86.h | 99 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in is_64_bit_mode()
|
| D | hyperv.c | 1021 kvm_x86_ops->patch_hypercall(vcpu, instructions); in kvm_hv_set_msr_pw() 1606 if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { in kvm_hv_hypercall() 1799 if (kvm_x86_ops->nested_get_evmcs_version) in kvm_vcpu_ioctl_get_hv_cpuid() 1800 evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu); in kvm_vcpu_ioctl_get_hv_cpuid()
|
| D | trace.h | 243 kvm_x86_ops->get_exit_info(vcpu, &__entry->info1, 747 __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
|
| D | svm.c | 7181 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
|
| /Linux-v5.4/arch/x86/include/asm/ |
| D | kvm_host.h | 1007 struct kvm_x86_ops { struct 1228 extern struct kvm_x86_ops *kvm_x86_ops; argument 1234 return kvm_x86_ops->vm_alloc(); in kvm_arch_alloc_vm() 1239 return kvm_x86_ops->vm_free(kvm); in kvm_arch_free_vm() 1245 if (kvm_x86_ops->tlb_remote_flush && in kvm_arch_flush_remote_tlb() 1246 !kvm_x86_ops->tlb_remote_flush(kvm)) in kvm_arch_flush_remote_tlb() 1616 if (kvm_x86_ops->vcpu_blocking) in kvm_arch_vcpu_blocking() 1617 kvm_x86_ops->vcpu_blocking(vcpu); in kvm_arch_vcpu_blocking() 1622 if (kvm_x86_ops->vcpu_unblocking) in kvm_arch_vcpu_unblocking() 1623 kvm_x86_ops->vcpu_unblocking(vcpu); in kvm_arch_vcpu_unblocking()
|
| /Linux-v5.4/arch/x86/kvm/vmx/ |
| D | vmx.c | 3006 if (kvm_x86_ops->tlb_remote_flush) { in vmx_set_cr3() 7409 if (kvm_x86_ops->set_hv_timer) in vmx_post_block() 7632 kvm_x86_ops->set_apic_access_page_addr = NULL; in hardware_setup() 7635 kvm_x86_ops->update_cr8_intercept = NULL; in hardware_setup() 7643 kvm_x86_ops->tlb_remote_flush = hv_remote_flush_tlb; in hardware_setup() 7644 kvm_x86_ops->tlb_remote_flush_with_range = in hardware_setup() 7659 kvm_x86_ops->sync_pir_to_irr = NULL; in hardware_setup() 7683 kvm_x86_ops->slot_enable_log_dirty = NULL; in hardware_setup() 7684 kvm_x86_ops->slot_disable_log_dirty = NULL; in hardware_setup() 7685 kvm_x86_ops->flush_log_dirty = NULL; in hardware_setup() [all …]
|
| D | pmu_intel.c | 311 if (kvm_x86_ops->pt_supported()) in intel_pmu_refresh()
|
| D | nested.c | 6063 kvm_x86_ops->check_nested_events = vmx_check_nested_events; in nested_vmx_hardware_setup() 6064 kvm_x86_ops->get_nested_state = vmx_get_nested_state; in nested_vmx_hardware_setup() 6065 kvm_x86_ops->set_nested_state = vmx_set_nested_state; in nested_vmx_hardware_setup() 6066 kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages, in nested_vmx_hardware_setup() 6067 kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs; in nested_vmx_hardware_setup() 6068 kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version; in nested_vmx_hardware_setup()
|