/Linux-v5.10/arch/x86/kvm/ |
D | pmu.c | 214 config = kvm_x86_ops.pmu_ops->find_arch_event(pmc_to_pmu(pmc), in reprogram_gp_counter() 268 kvm_x86_ops.pmu_ops->find_fixed_event(idx), in reprogram_fixed_counter() 277 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx); in reprogram_counter() 299 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, bit); in kvm_pmu_handle_event() 321 return kvm_x86_ops.pmu_ops->is_valid_rdpmc_ecx(vcpu, idx); in kvm_pmu_is_valid_rdpmc_ecx() 371 pmc = kvm_x86_ops.pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask); in kvm_pmu_rdpmc() 376 (kvm_x86_ops.get_cpl(vcpu) != 0) && in kvm_pmu_rdpmc() 392 return kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr) || in kvm_pmu_is_valid_msr() 393 kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, msr); in kvm_pmu_is_valid_msr() 399 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr); in kvm_pmu_mark_pmc_in_use() [all …]
|
D | x86.c | 113 struct kvm_x86_ops kvm_x86_ops __read_mostly; 114 EXPORT_SYMBOL_GPL(kvm_x86_ops); 693 if (kvm_x86_ops.get_cpl(vcpu) <= required_cpl) in kvm_require_cpl() 835 kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in kvm_set_cr0() 848 kvm_x86_ops.set_cr0(vcpu, cr0); in kvm_set_cr0() 958 if (kvm_x86_ops.get_cpl(vcpu) != 0 || in kvm_set_xcr() 1009 if (kvm_x86_ops.set_cr4(vcpu, cr4)) in kvm_set_cr4() 1098 kvm_x86_ops.set_dr7(vcpu, dr7); in kvm_update_dr7() 1402 return kvm_x86_ops.get_msr_feature(msr); in kvm_get_msr_feature() 1478 r = kvm_x86_ops.set_efer(vcpu, efer); in set_efer() [all …]
|
D | kvm_cache_regs.h | 71 kvm_x86_ops.cache_reg(vcpu, reg); in kvm_register_read() 111 kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_PDPTR); in kvm_pdptr_read() 121 kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR0); in kvm_read_cr0_bits() 135 kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR4); in kvm_read_cr4_bits() 142 kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR3); in kvm_read_cr3()
|
D | mmu.h | 98 kvm_x86_ops.load_mmu_pgd(vcpu, root_hpa | kvm_get_active_pcid(vcpu), in kvm_mmu_load_pgd() 170 int cpl = kvm_x86_ops.get_cpl(vcpu); in permission_fault() 171 unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); in permission_fault()
|
D | irq.c | 146 if (kvm_x86_ops.migrate_timers) in __kvm_migrate_timers() 147 kvm_x86_ops.migrate_timers(vcpu); in __kvm_migrate_timers()
|
D | x86.h | 100 kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in is_64_bit_mode() 131 kvm_x86_ops.tlb_flush_current(vcpu); in kvm_vcpu_flush_tlb_current() 246 return is_smm(vcpu) || kvm_x86_ops.apic_init_signal_blocked(vcpu); in kvm_vcpu_latch_init()
|
D | lapic.c | 121 return kvm_x86_ops.set_hv_timer in kvm_can_use_hv_timer() 487 kvm_x86_ops.hwapic_irr_update(vcpu, in apic_clear_irr() 518 kvm_x86_ops.hwapic_isr_update(vcpu, vec); in apic_set_isr() 566 kvm_x86_ops.hwapic_isr_update(vcpu, in apic_clear_isr() 704 highest_irr = kvm_x86_ops.sync_pir_to_irr(apic->vcpu); in apic_has_interrupt_for_ppr() 1093 if (kvm_x86_ops.deliver_posted_interrupt(vcpu, vector)) { in __apic_accept_irq() 1817 kvm_x86_ops.cancel_hv_timer(apic->vcpu); in cancel_hv_timer() 1834 if (kvm_x86_ops.set_hv_timer(vcpu, ktimer->tscdeadline, &expired)) in start_hv_timer() 2264 kvm_x86_ops.set_virtual_apic_mode(vcpu); in kvm_lapic_set_base() 2341 kvm_x86_ops.apicv_post_state_restore(vcpu); in kvm_lapic_reset() [all …]
|
D | pmu.h | 93 return kvm_x86_ops.pmu_ops->pmc_is_enabled(pmc); in pmc_is_enabled()
|
D | hyperv.c | 1157 kvm_x86_ops.patch_hypercall(vcpu, instructions); in kvm_hv_set_msr_pw() 1748 if (kvm_x86_ops.get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { in kvm_hv_hypercall() 1972 if (kvm_x86_ops.nested_ops->get_evmcs_version) in kvm_vcpu_ioctl_get_hv_cpuid() 1973 evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu); in kvm_vcpu_ioctl_get_hv_cpuid()
|
D | trace.h | 259 kvm_x86_ops.get_exit_info(vcpu, &__entry->info1, \ 741 __entry->csbase = kvm_x86_ops.get_segment_base(vcpu, VCPU_SREG_CS);
|
D | cpuid.c | 184 kvm_x86_ops.vcpu_after_set_cpuid(vcpu); in kvm_vcpu_after_set_cpuid()
|
/Linux-v5.10/arch/x86/include/asm/ |
D | kvm_host.h | 1086 struct kvm_x86_ops { struct 1308 struct kvm_x86_ops *runtime_ops; 1320 extern struct kvm_x86_ops kvm_x86_ops; 1325 return __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); in kvm_arch_alloc_vm() 1332 if (kvm_x86_ops.tlb_remote_flush && in kvm_arch_flush_remote_tlb() 1333 !kvm_x86_ops.tlb_remote_flush(kvm)) in kvm_arch_flush_remote_tlb() 1718 if (kvm_x86_ops.vcpu_blocking) in kvm_arch_vcpu_blocking() 1719 kvm_x86_ops.vcpu_blocking(vcpu); in kvm_arch_vcpu_blocking() 1724 if (kvm_x86_ops.vcpu_unblocking) in kvm_arch_vcpu_unblocking() 1725 kvm_x86_ops.vcpu_unblocking(vcpu); in kvm_arch_vcpu_unblocking()
|
/Linux-v5.10/arch/x86/kvm/mmu/ |
D | spte.c | 123 spte |= kvm_x86_ops.get_mt_mask(vcpu, gfn, in make_spte()
|
D | mmu.c | 184 return kvm_x86_ops.tlb_remote_flush_with_range; in kvm_available_flush_tlb_with_range() 192 if (range && kvm_x86_ops.tlb_remote_flush_with_range) in kvm_flush_remote_tlbs_with_range() 193 ret = kvm_x86_ops.tlb_remote_flush_with_range(kvm, range); in kvm_flush_remote_tlbs_with_range() 1285 if (kvm_x86_ops.enable_log_dirty_pt_masked) in kvm_arch_mmu_enable_log_dirty_pt_masked() 1286 kvm_x86_ops.enable_log_dirty_pt_masked(kvm, slot, gfn_offset, in kvm_arch_mmu_enable_log_dirty_pt_masked() 4799 kvm_x86_ops.tlb_flush_current(vcpu); in kvm_mmu_load() 5113 kvm_x86_ops.tlb_flush_gva(vcpu, gva); in kvm_mmu_invalidate_gva() 5170 kvm_x86_ops.tlb_flush_gva(vcpu, gva); in kvm_mmu_invpcid_gva()
|
D | paging_tmpl.h | 263 if (kvm_x86_ops.nested_ops->write_log_dirty(vcpu, addr)) in FNAME()
|
/Linux-v5.10/arch/x86/kvm/vmx/ |
D | vmx.c | 3075 if (kvm_x86_ops.tlb_remote_flush) { in vmx_load_mmu_pgd() 7496 if (kvm_x86_ops.set_hv_timer) in vmx_post_block() 7590 static struct kvm_x86_ops vmx_x86_ops __initdata = {
|
D | nested.c | 4686 if (kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)) { in nested_vmx_pmu_entry_exit_ctls_update()
|
/Linux-v5.10/arch/x86/kvm/svm/ |
D | svm.c | 4189 static struct kvm_x86_ops svm_x86_ops __initdata = {
|