Lines Matching refs:kvm_x86_ops
113 struct kvm_x86_ops kvm_x86_ops __read_mostly;
114 EXPORT_SYMBOL_GPL(kvm_x86_ops);
693 if (kvm_x86_ops.get_cpl(vcpu) <= required_cpl) in kvm_require_cpl()
835 kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in kvm_set_cr0()
848 kvm_x86_ops.set_cr0(vcpu, cr0); in kvm_set_cr0()
958 if (kvm_x86_ops.get_cpl(vcpu) != 0 || in kvm_set_xcr()
1009 if (kvm_x86_ops.set_cr4(vcpu, cr4)) in kvm_set_cr4()
1098 kvm_x86_ops.set_dr7(vcpu, dr7); in kvm_update_dr7()
1402 return kvm_x86_ops.get_msr_feature(msr); in kvm_get_msr_feature()
1478 r = kvm_x86_ops.set_efer(vcpu, efer); in set_efer()
1575 return kvm_x86_ops.set_msr(vcpu, &msr); in __kvm_set_msr()
1608 ret = kvm_x86_ops.get_msr(vcpu, &msr); in __kvm_get_msr()
2196 vcpu->arch.tsc_offset = kvm_x86_ops.write_l1_tsc_offset(vcpu, offset); in kvm_vcpu_write_tsc_offset()
2942 kvm_x86_ops.tlb_flush_all(vcpu); in kvm_vcpu_flush_tlb_all()
2948 kvm_x86_ops.tlb_flush_guest(vcpu); in kvm_vcpu_flush_tlb_guest()
3765 r = kvm_x86_ops.has_emulated_msr(MSR_IA32_SMBASE); in kvm_vm_ioctl_check_extension()
3768 r = !kvm_x86_ops.cpu_has_accelerated_tpr(); in kvm_vm_ioctl_check_extension()
3795 r = kvm_x86_ops.nested_ops->get_state ? in kvm_vm_ioctl_check_extension()
3796 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0; in kvm_vm_ioctl_check_extension()
3799 r = kvm_x86_ops.enable_direct_tlbflush != NULL; in kvm_vm_ioctl_check_extension()
3802 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL; in kvm_vm_ioctl_check_extension()
3924 if (kvm_x86_ops.has_wbinvd_exit()) in kvm_arch_vcpu_load()
3931 kvm_x86_ops.vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
4001 vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu); in kvm_arch_vcpu_put()
4020 kvm_x86_ops.vcpu_put(vcpu); in kvm_arch_vcpu_put()
4034 kvm_x86_ops.sync_pir_to_irr(vcpu); in kvm_vcpu_ioctl_get_lapic()
4144 kvm_x86_ops.setup_mce(vcpu); in kvm_vcpu_ioctl_x86_setup_mce()
4248 events->interrupt.shadow = kvm_x86_ops.get_interrupt_shadow(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4252 events->nmi.masked = kvm_x86_ops.get_nmi_mask(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4319 kvm_x86_ops.set_interrupt_shadow(vcpu, in kvm_vcpu_ioctl_x86_set_vcpu_events()
4325 kvm_x86_ops.set_nmi_mask(vcpu, events->nmi.masked); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4598 if (!kvm_x86_ops.nested_ops->enable_evmcs) in kvm_vcpu_ioctl_enable_cap()
4600 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); in kvm_vcpu_ioctl_enable_cap()
4609 if (!kvm_x86_ops.enable_direct_tlbflush) in kvm_vcpu_ioctl_enable_cap()
4612 return kvm_x86_ops.enable_direct_tlbflush(vcpu); in kvm_vcpu_ioctl_enable_cap()
4923 if (!kvm_x86_ops.nested_ops->get_state) in kvm_arch_vcpu_ioctl()
4931 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state, in kvm_arch_vcpu_ioctl()
4953 if (!kvm_x86_ops.nested_ops->set_state) in kvm_arch_vcpu_ioctl()
4976 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state); in kvm_arch_vcpu_ioctl()
5020 ret = kvm_x86_ops.set_tss_addr(kvm, addr); in kvm_vm_ioctl_set_tss_addr()
5027 return kvm_x86_ops.set_identity_map_addr(kvm, ident_addr); in kvm_vm_ioctl_set_identity_map_addr()
5184 if (kvm_x86_ops.flush_log_dirty) in kvm_arch_sync_dirty_log()
5185 kvm_x86_ops.flush_log_dirty(kvm); in kvm_arch_sync_dirty_log()
5652 if (kvm_x86_ops.mem_enc_op) in kvm_arch_vm_ioctl()
5653 r = kvm_x86_ops.mem_enc_op(kvm, argp); in kvm_arch_vm_ioctl()
5664 if (kvm_x86_ops.mem_enc_reg_region) in kvm_arch_vm_ioctl()
5665 r = kvm_x86_ops.mem_enc_reg_region(kvm, ®ion); in kvm_arch_vm_ioctl()
5676 if (kvm_x86_ops.mem_enc_unreg_region) in kvm_arch_vm_ioctl()
5677 r = kvm_x86_ops.mem_enc_unreg_region(kvm, ®ion); in kvm_arch_vm_ioctl()
5779 if (!kvm_x86_ops.has_emulated_msr(emulated_msrs_all[i])) in kvm_init_msr_list()
5842 kvm_x86_ops.set_segment(vcpu, var, seg); in kvm_set_segment()
5848 kvm_x86_ops.get_segment(vcpu, var, seg); in kvm_get_segment()
5868 u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_read()
5875 u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_fetch()
5883 u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_write()
5932 u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_fetch_guest_virt()
5957 u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_read_guest_virt()
5978 if (!system && kvm_x86_ops.get_cpl(vcpu) == 3) in emulator_read_std()
6031 if (!system && kvm_x86_ops.get_cpl(vcpu) == 3) in emulator_write_std()
6056 if (unlikely(!kvm_x86_ops.can_emulate_instruction(vcpu, NULL, 0))) in handle_ud()
6090 u32 access = ((kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) in vcpu_mmio_gva_to_gpa()
6498 return kvm_x86_ops.get_segment_base(vcpu, seg); in get_segment_base()
6511 if (kvm_x86_ops.has_wbinvd_exit()) { in kvm_emulate_wbinvd_noskip()
6616 return kvm_x86_ops.get_cpl(emul_to_vcpu(ctxt)); in emulator_get_cpl()
6621 kvm_x86_ops.get_gdt(emul_to_vcpu(ctxt), dt); in emulator_get_gdt()
6626 kvm_x86_ops.get_idt(emul_to_vcpu(ctxt), dt); in emulator_get_idt()
6631 kvm_x86_ops.set_gdt(emul_to_vcpu(ctxt), dt); in emulator_set_gdt()
6636 kvm_x86_ops.set_idt(emul_to_vcpu(ctxt), dt); in emulator_set_idt()
6778 return kvm_x86_ops.check_intercept(emul_to_vcpu(ctxt), info, stage, in emulator_intercept()
6816 kvm_x86_ops.set_nmi_mask(emul_to_vcpu(ctxt), masked); in emulator_set_nmi_mask()
6832 return kvm_x86_ops.pre_leave_smm(emul_to_vcpu(ctxt), smstate); in emulator_pre_leave_smm()
6894 u32 int_shadow = kvm_x86_ops.get_interrupt_shadow(vcpu); in toggle_interruptibility()
6905 kvm_x86_ops.set_interrupt_shadow(vcpu, mask); in toggle_interruptibility()
6947 kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in init_emulate_ctxt()
7008 if (!is_guest_mode(vcpu) && kvm_x86_ops.get_cpl(vcpu) == 0) { in handle_emulation_failure()
7189 unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); in kvm_skip_emulated_instruction()
7192 r = kvm_x86_ops.skip_emulated_instruction(vcpu); in kvm_skip_emulated_instruction()
7286 if (unlikely(!kvm_x86_ops.can_emulate_instruction(vcpu, insn, insn_len))) in x86_emulate_instruction()
7429 unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); in x86_emulate_instruction()
7437 if (kvm_x86_ops.update_emulated_instruction) in x86_emulate_instruction()
7438 kvm_x86_ops.update_emulated_instruction(vcpu); in x86_emulate_instruction()
7767 user_mode = kvm_x86_ops.get_cpl(__this_cpu_read(current_vcpu)); in kvm_is_user_mode()
7847 if (kvm_x86_ops.hardware_enable) { in kvm_arch_init()
7950 kvm_x86_ops.hardware_enable = NULL; in kvm_arch_exit()
8088 if (kvm_x86_ops.get_cpl(vcpu) != 0) { in kvm_emulate_hypercall()
8145 kvm_x86_ops.patch_hypercall(vcpu, instruction); in emulator_fix_hypercall()
8174 if (!kvm_x86_ops.update_cr8_intercept) in update_cr8_intercept()
8193 kvm_x86_ops.update_cr8_intercept(vcpu, tpr, max_irr); in update_cr8_intercept()
8204 kvm_x86_ops.queue_exception(vcpu); in inject_pending_event()
8223 kvm_x86_ops.set_nmi(vcpu); in inject_pending_event()
8226 kvm_x86_ops.set_irq(vcpu); in inject_pending_event()
8241 r = kvm_x86_ops.nested_ops->check_events(vcpu); in inject_pending_event()
8267 kvm_x86_ops.queue_exception(vcpu); in inject_pending_event()
8283 r = can_inject ? kvm_x86_ops.smi_allowed(vcpu, true) : -EBUSY; in inject_pending_event()
8292 kvm_x86_ops.enable_smi_window(vcpu); in inject_pending_event()
8296 r = can_inject ? kvm_x86_ops.nmi_allowed(vcpu, true) : -EBUSY; in inject_pending_event()
8302 kvm_x86_ops.set_nmi(vcpu); in inject_pending_event()
8304 WARN_ON(kvm_x86_ops.nmi_allowed(vcpu, true) < 0); in inject_pending_event()
8307 kvm_x86_ops.enable_nmi_window(vcpu); in inject_pending_event()
8311 r = can_inject ? kvm_x86_ops.interrupt_allowed(vcpu, true) : -EBUSY; in inject_pending_event()
8316 kvm_x86_ops.set_irq(vcpu); in inject_pending_event()
8317 WARN_ON(kvm_x86_ops.interrupt_allowed(vcpu, true) < 0); in inject_pending_event()
8320 kvm_x86_ops.enable_irq_window(vcpu); in inject_pending_event()
8324 kvm_x86_ops.nested_ops->hv_timer_pending && in inject_pending_event()
8325 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) in inject_pending_event()
8345 if (kvm_x86_ops.get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) in process_nmi()
8435 kvm_x86_ops.get_gdt(vcpu, &dt); in enter_smm_save_state_32()
8439 kvm_x86_ops.get_idt(vcpu, &dt); in enter_smm_save_state_32()
8489 kvm_x86_ops.get_idt(vcpu, &dt); in enter_smm_save_state_64()
8499 kvm_x86_ops.get_gdt(vcpu, &dt); in enter_smm_save_state_64()
8529 kvm_x86_ops.pre_enter_smm(vcpu, buf); in enter_smm()
8534 if (kvm_x86_ops.get_nmi_mask(vcpu)) in enter_smm()
8537 kvm_x86_ops.set_nmi_mask(vcpu, true); in enter_smm()
8543 kvm_x86_ops.set_cr0(vcpu, cr0); in enter_smm()
8546 kvm_x86_ops.set_cr4(vcpu, 0); in enter_smm()
8550 kvm_x86_ops.set_idt(vcpu, &dt); in enter_smm()
8581 kvm_x86_ops.set_efer(vcpu, 0); in enter_smm()
8619 kvm_x86_ops.refresh_apicv_exec_ctrl(vcpu); in kvm_vcpu_update_apicv()
8635 if (!kvm_x86_ops.check_apicv_inhibit_reasons || in kvm_request_apicv_update()
8636 !kvm_x86_ops.check_apicv_inhibit_reasons(bit)) in kvm_request_apicv_update()
8655 if (kvm_x86_ops.pre_update_apicv_exec_ctrl) in kvm_request_apicv_update()
8656 kvm_x86_ops.pre_update_apicv_exec_ctrl(kvm, activate); in kvm_request_apicv_update()
8682 kvm_x86_ops.sync_pir_to_irr(vcpu); in vcpu_scan_ioapic()
8702 kvm_x86_ops.load_eoi_exitmap(vcpu, eoi_exit_bitmap); in vcpu_load_eoi_exitmap()
8724 if (!kvm_x86_ops.set_apic_access_page_addr) in kvm_vcpu_reload_apic_access_page()
8727 kvm_x86_ops.set_apic_access_page_addr(vcpu); in kvm_vcpu_reload_apic_access_page()
8753 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { in vcpu_enter_guest()
8861 kvm_x86_ops.msr_filter_changed(vcpu); in vcpu_enter_guest()
8874 kvm_x86_ops.enable_irq_window(vcpu); in vcpu_enter_guest()
8889 kvm_x86_ops.prepare_guest_switch(vcpu); in vcpu_enter_guest()
8920 kvm_x86_ops.sync_pir_to_irr(vcpu); in vcpu_enter_guest()
8934 kvm_x86_ops.request_immediate_exit(vcpu); in vcpu_enter_guest()
8953 exit_fastpath = kvm_x86_ops.run(vcpu); in vcpu_enter_guest()
8963 kvm_x86_ops.sync_dirty_debug_regs(vcpu); in vcpu_enter_guest()
8985 kvm_x86_ops.handle_exit_irqoff(vcpu); in vcpu_enter_guest()
9027 r = kvm_x86_ops.handle_exit(vcpu, exit_fastpath); in vcpu_enter_guest()
9033 kvm_x86_ops.cancel_injection(vcpu); in vcpu_enter_guest()
9043 (!kvm_x86_ops.pre_block || kvm_x86_ops.pre_block(vcpu) == 0)) { in vcpu_block()
9048 if (kvm_x86_ops.post_block) in vcpu_block()
9049 kvm_x86_ops.post_block(vcpu); in vcpu_block()
9076 kvm_x86_ops.nested_ops->check_events(vcpu); in kvm_vcpu_running()
9430 kvm_x86_ops.get_idt(vcpu, &dt); in __get_sregs()
9433 kvm_x86_ops.get_gdt(vcpu, &dt); in __get_sregs()
9580 kvm_x86_ops.set_idt(vcpu, &dt); in __set_sregs()
9583 kvm_x86_ops.set_gdt(vcpu, &dt); in __set_sregs()
9593 kvm_x86_ops.set_efer(vcpu, sregs->efer); in __set_sregs()
9596 kvm_x86_ops.set_cr0(vcpu, sregs->cr0); in __set_sregs()
9602 kvm_x86_ops.set_cr4(vcpu, sregs->cr4); in __set_sregs()
9708 kvm_x86_ops.update_exception_bitmap(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
9917 r = kvm_x86_ops.vcpu_create(vcpu); in kvm_arch_vcpu_create()
9980 kvm_x86_ops.vcpu_free(vcpu); in kvm_arch_vcpu_destroy()
10069 kvm_x86_ops.vcpu_reset(vcpu, init_event); in kvm_vcpu_reset()
10094 ret = kvm_x86_ops.hardware_enable(); in kvm_arch_hardware_enable()
10176 kvm_x86_ops.hardware_disable(); in kvm_arch_hardware_disable()
10194 memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); in kvm_arch_hardware_setup()
10223 kvm_x86_ops.hardware_unsetup(); in kvm_arch_hardware_unsetup()
10263 kvm_x86_ops.sched_in(vcpu, cpu); in kvm_arch_sched_in()
10307 return kvm_x86_ops.vm_init(kvm); in kvm_arch_init_vm()
10427 if (kvm_x86_ops.vm_destroy) in kvm_arch_destroy_vm()
10428 kvm_x86_ops.vm_destroy(kvm); in kvm_arch_destroy_vm()
10618 if (kvm_x86_ops.slot_enable_log_dirty) { in kvm_mmu_slot_apply_flags()
10619 kvm_x86_ops.slot_enable_log_dirty(kvm, new); in kvm_mmu_slot_apply_flags()
10636 if (kvm_x86_ops.slot_disable_log_dirty) in kvm_mmu_slot_apply_flags()
10637 kvm_x86_ops.slot_disable_log_dirty(kvm, new); in kvm_mmu_slot_apply_flags()
10675 kvm_x86_ops.guest_apic_has_interrupt && in kvm_guest_apic_has_interrupt()
10676 kvm_x86_ops.guest_apic_has_interrupt(vcpu)); in kvm_guest_apic_has_interrupt()
10695 kvm_x86_ops.nmi_allowed(vcpu, false))) in kvm_vcpu_has_events()
10700 kvm_x86_ops.smi_allowed(vcpu, false))) in kvm_vcpu_has_events()
10712 kvm_x86_ops.nested_ops->hv_timer_pending && in kvm_vcpu_has_events()
10713 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) in kvm_vcpu_has_events()
10734 if (vcpu->arch.apicv_active && kvm_x86_ops.dy_apicv_has_pending_interrupt(vcpu)) in kvm_arch_dy_runnable()
10752 return kvm_x86_ops.interrupt_allowed(vcpu, false); in kvm_arch_interrupt_allowed()
10774 rflags = kvm_x86_ops.get_rflags(vcpu); in kvm_get_rflags()
10786 kvm_x86_ops.set_rflags(vcpu, rflags); in __kvm_set_rflags()
10916 (vcpu->arch.apf.send_user_only && kvm_x86_ops.get_cpl(vcpu) == 0)) in kvm_can_deliver_async_pf()
11061 ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, in kvm_arch_irq_bypass_add_producer()
11086 ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0); in kvm_arch_irq_bypass_del_producer()
11097 return kvm_x86_ops.update_pi_irte(kvm, host_irq, guest_irq, set); in kvm_arch_update_irqfd_routing()