Lines Matching refs:kvm_x86_ops

108 struct kvm_x86_ops *kvm_x86_ops __read_mostly;
109 EXPORT_SYMBOL_GPL(kvm_x86_ops);
630 if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) in kvm_require_cpl()
777 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in kvm_set_cr0()
790 kvm_x86_ops->set_cr0(vcpu, cr0); in kvm_set_cr0()
879 if (kvm_x86_ops->get_cpl(vcpu) != 0 || in kvm_set_xcr()
944 if (kvm_x86_ops->set_cr4(vcpu, cr4)) in kvm_set_cr4()
1028 kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6); in kvm_update_dr6()
1039 kvm_x86_ops->set_dr7(vcpu, dr7); in kvm_update_dr7()
1105 *val = kvm_x86_ops->get_dr6(vcpu); in kvm_get_dr()
1347 if (kvm_x86_ops->get_msr_feature(msr)) in kvm_get_msr_feature()
1415 kvm_x86_ops->set_efer(vcpu, efer); in set_efer()
1471 return kvm_x86_ops->set_msr(vcpu, &msr); in __kvm_set_msr()
1489 ret = kvm_x86_ops->get_msr(vcpu, &msr); in __kvm_get_msr()
1810 u64 curr_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); in update_ia32_tsc_adjust_msr()
1852 u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); in kvm_read_l1_tsc()
1860 vcpu->arch.tsc_offset = kvm_x86_ops->write_l1_tsc_offset(vcpu, offset); in kvm_vcpu_write_tsc_offset()
1984 u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); in adjust_tsc_offset_guest()
2578 kvm_x86_ops->tlb_flush(vcpu, invalidate_gpa); in kvm_vcpu_flush_tlb()
3278 r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE); in kvm_vm_ioctl_check_extension()
3281 r = !kvm_x86_ops->cpu_has_accelerated_tpr(); in kvm_vm_ioctl_check_extension()
3308 r = kvm_x86_ops->get_nested_state ? in kvm_vm_ioctl_check_extension()
3309 kvm_x86_ops->get_nested_state(NULL, NULL, 0) : 0; in kvm_vm_ioctl_check_extension()
3312 r = kvm_x86_ops->enable_direct_tlbflush != NULL; in kvm_vm_ioctl_check_extension()
3315 r = kvm_x86_ops->nested_enable_evmcs != NULL; in kvm_vm_ioctl_check_extension()
3431 if (kvm_x86_ops->has_wbinvd_exit()) in kvm_arch_vcpu_load()
3438 kvm_x86_ops->vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
3499 vcpu->arch.preempted_in_kernel = !kvm_x86_ops->get_cpl(vcpu); in kvm_arch_vcpu_put()
3518 kvm_x86_ops->vcpu_put(vcpu); in kvm_arch_vcpu_put()
3532 kvm_x86_ops->sync_pir_to_irr(vcpu); in kvm_vcpu_ioctl_get_lapic()
3640 kvm_x86_ops->setup_mce(vcpu); in kvm_vcpu_ioctl_x86_setup_mce()
3729 events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
3733 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
3800 kvm_x86_ops->set_interrupt_shadow(vcpu, in kvm_vcpu_ioctl_x86_set_vcpu_events()
3806 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4080 if (!kvm_x86_ops->nested_enable_evmcs) in kvm_vcpu_ioctl_enable_cap()
4082 r = kvm_x86_ops->nested_enable_evmcs(vcpu, &vmcs_version); in kvm_vcpu_ioctl_enable_cap()
4091 if (!kvm_x86_ops->enable_direct_tlbflush) in kvm_vcpu_ioctl_enable_cap()
4094 return kvm_x86_ops->enable_direct_tlbflush(vcpu); in kvm_vcpu_ioctl_enable_cap()
4397 if (!kvm_x86_ops->get_nested_state) in kvm_arch_vcpu_ioctl()
4405 r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state, in kvm_arch_vcpu_ioctl()
4426 if (!kvm_x86_ops->set_nested_state) in kvm_arch_vcpu_ioctl()
4447 r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state); in kvm_arch_vcpu_ioctl()
4490 ret = kvm_x86_ops->set_tss_addr(kvm, addr); in kvm_vm_ioctl_set_tss_addr()
4497 return kvm_x86_ops->set_identity_map_addr(kvm, ident_addr); in kvm_vm_ioctl_set_identity_map_addr()
4681 if (kvm_x86_ops->flush_log_dirty) in kvm_vm_ioctl_get_dirty_log()
4682 kvm_x86_ops->flush_log_dirty(kvm); in kvm_vm_ioctl_get_dirty_log()
4708 if (kvm_x86_ops->flush_log_dirty) in kvm_vm_ioctl_clear_dirty_log()
4709 kvm_x86_ops->flush_log_dirty(kvm); in kvm_vm_ioctl_clear_dirty_log()
5075 if (kvm_x86_ops->mem_enc_op) in kvm_arch_vm_ioctl()
5076 r = kvm_x86_ops->mem_enc_op(kvm, argp); in kvm_arch_vm_ioctl()
5087 if (kvm_x86_ops->mem_enc_reg_region) in kvm_arch_vm_ioctl()
5088 r = kvm_x86_ops->mem_enc_reg_region(kvm, &region); in kvm_arch_vm_ioctl()
5099 if (kvm_x86_ops->mem_enc_unreg_region) in kvm_arch_vm_ioctl()
5100 r = kvm_x86_ops->mem_enc_unreg_region(kvm, &region); in kvm_arch_vm_ioctl()
5151 if (!kvm_x86_ops->rdtscp_supported()) in kvm_init_msr_list()
5156 if (!kvm_x86_ops->pt_supported()) in kvm_init_msr_list()
5160 if (!kvm_x86_ops->pt_supported() || in kvm_init_msr_list()
5166 if (!kvm_x86_ops->pt_supported() || in kvm_init_msr_list()
5172 if (!kvm_x86_ops->pt_supported() || in kvm_init_msr_list()
5195 if (!kvm_x86_ops->has_emulated_msr(emulated_msrs_all[i])) in kvm_init_msr_list()
5258 kvm_x86_ops->set_segment(vcpu, var, seg); in kvm_set_segment()
5264 kvm_x86_ops->get_segment(vcpu, var, seg); in kvm_get_segment()
5284 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_read()
5291 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_fetch()
5299 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_write()
5348 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_fetch_guest_virt()
5373 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_read_guest_virt()
5394 if (!system && kvm_x86_ops->get_cpl(vcpu) == 3) in emulator_read_std()
5447 if (!system && kvm_x86_ops->get_cpl(vcpu) == 3) in emulator_write_std()
5509 u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) in vcpu_mmio_gva_to_gpa()
5897 return kvm_x86_ops->get_segment_base(vcpu, seg); in get_segment_base()
5910 if (kvm_x86_ops->has_wbinvd_exit()) { in kvm_emulate_wbinvd_noskip()
6015 return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt)); in emulator_get_cpl()
6020 kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt); in emulator_get_gdt()
6025 kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt); in emulator_get_idt()
6030 kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt); in emulator_set_gdt()
6035 kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt); in emulator_set_idt()
6157 return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); in emulator_intercept()
6178 kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked); in emulator_set_nmi_mask()
6194 return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smstate); in emulator_pre_leave_smm()
6253 u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); in toggle_interruptibility()
6264 kvm_x86_ops->set_interrupt_shadow(vcpu, mask); in toggle_interruptibility()
6289 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in init_emulate_ctxt()
6349 if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) { in handle_emulation_failure()
6528 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); in kvm_skip_emulated_instruction()
6531 r = kvm_x86_ops->skip_emulated_instruction(vcpu); in kvm_skip_emulated_instruction()
6757 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); in x86_emulate_instruction()
7093 user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu)); in kvm_is_user_mode()
7171 struct kvm_x86_ops *ops = opaque; in kvm_arch_init()
7173 if (kvm_x86_ops) { in kvm_arch_init()
7220 kvm_x86_ops = ops; in kvm_arch_init()
7268 kvm_x86_ops = NULL; in kvm_arch_exit()
7357 kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu); in kvm_vcpu_deactivate_apicv()
7402 if (kvm_x86_ops->get_cpl(vcpu) != 0) { in kvm_emulate_hypercall()
7448 kvm_x86_ops->patch_hypercall(vcpu, instruction); in emulator_fix_hypercall()
7477 if (!kvm_x86_ops->update_cr8_intercept) in update_cr8_intercept()
7496 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); in update_cr8_intercept()
7506 kvm_x86_ops->queue_exception(vcpu); in inject_pending_event()
7523 kvm_x86_ops->set_nmi(vcpu); in inject_pending_event()
7525 kvm_x86_ops->set_irq(vcpu); in inject_pending_event()
7534 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { in inject_pending_event()
7535 r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); in inject_pending_event()
7572 kvm_x86_ops->queue_exception(vcpu); in inject_pending_event()
7580 kvm_x86_ops->smi_allowed(vcpu)) { in inject_pending_event()
7584 } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) { in inject_pending_event()
7587 kvm_x86_ops->set_nmi(vcpu); in inject_pending_event()
7596 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { in inject_pending_event()
7597 r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); in inject_pending_event()
7601 if (kvm_x86_ops->interrupt_allowed(vcpu)) { in inject_pending_event()
7604 kvm_x86_ops->set_irq(vcpu); in inject_pending_event()
7620 if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) in process_nmi()
7710 kvm_x86_ops->get_gdt(vcpu, &dt); in enter_smm_save_state_32()
7714 kvm_x86_ops->get_idt(vcpu, &dt); in enter_smm_save_state_32()
7764 kvm_x86_ops->get_idt(vcpu, &dt); in enter_smm_save_state_64()
7774 kvm_x86_ops->get_gdt(vcpu, &dt); in enter_smm_save_state_64()
7804 kvm_x86_ops->pre_enter_smm(vcpu, buf); in enter_smm()
7809 if (kvm_x86_ops->get_nmi_mask(vcpu)) in enter_smm()
7812 kvm_x86_ops->set_nmi_mask(vcpu, true); in enter_smm()
7818 kvm_x86_ops->set_cr0(vcpu, cr0); in enter_smm()
7821 kvm_x86_ops->set_cr4(vcpu, 0); in enter_smm()
7825 kvm_x86_ops->set_idt(vcpu, &dt); in enter_smm()
7856 kvm_x86_ops->set_efer(vcpu, 0); in enter_smm()
7885 kvm_x86_ops->sync_pir_to_irr(vcpu); in vcpu_scan_ioapic()
7905 kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap); in vcpu_load_eoi_exitmap()
7932 if (!kvm_x86_ops->set_apic_access_page_addr) in kvm_vcpu_reload_apic_access_page()
7938 kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page)); in kvm_vcpu_reload_apic_access_page()
7970 if (unlikely(!kvm_x86_ops->get_vmcs12_pages(vcpu))) { in vcpu_enter_guest()
8092 if (!kvm_x86_ops->enable_smi_window(vcpu)) in vcpu_enter_guest()
8095 kvm_x86_ops->enable_nmi_window(vcpu); in vcpu_enter_guest()
8097 kvm_x86_ops->enable_irq_window(vcpu); in vcpu_enter_guest()
8114 kvm_x86_ops->prepare_guest_switch(vcpu); in vcpu_enter_guest()
8145 kvm_x86_ops->sync_pir_to_irr(vcpu); in vcpu_enter_guest()
8160 kvm_x86_ops->request_immediate_exit(vcpu); in vcpu_enter_guest()
8179 kvm_x86_ops->run(vcpu); in vcpu_enter_guest()
8189 kvm_x86_ops->sync_dirty_debug_regs(vcpu); in vcpu_enter_guest()
8211 kvm_x86_ops->handle_exit_irqoff(vcpu); in vcpu_enter_guest()
8255 r = kvm_x86_ops->handle_exit(vcpu); in vcpu_enter_guest()
8259 kvm_x86_ops->cancel_injection(vcpu); in vcpu_enter_guest()
8269 (!kvm_x86_ops->pre_block || kvm_x86_ops->pre_block(vcpu) == 0)) { in vcpu_block()
8274 if (kvm_x86_ops->post_block) in vcpu_block()
8275 kvm_x86_ops->post_block(vcpu); in vcpu_block()
8302 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) in kvm_vcpu_running()
8303 kvm_x86_ops->check_nested_events(vcpu, false); in kvm_vcpu_running()
8647 kvm_x86_ops->get_idt(vcpu, &dt); in __get_sregs()
8650 kvm_x86_ops->get_gdt(vcpu, &dt); in __get_sregs()
8790 kvm_x86_ops->set_idt(vcpu, &dt); in __set_sregs()
8793 kvm_x86_ops->set_gdt(vcpu, &dt); in __set_sregs()
8803 kvm_x86_ops->set_efer(vcpu, sregs->efer); in __set_sregs()
8806 kvm_x86_ops->set_cr0(vcpu, sregs->cr0); in __set_sregs()
8812 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); in __set_sregs()
8918 kvm_x86_ops->update_bp_intercept(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
9052 kvm_x86_ops->vcpu_free(vcpu); in kvm_arch_vcpu_free()
9066 vcpu = kvm_x86_ops->vcpu_create(kvm, id); in kvm_arch_vcpu_create()
9119 kvm_x86_ops->vcpu_free(vcpu); in kvm_arch_vcpu_destroy()
9192 kvm_x86_ops->vcpu_reset(vcpu, init_event); in kvm_vcpu_reset()
9217 ret = kvm_x86_ops->hardware_enable(); in kvm_arch_hardware_enable()
9299 kvm_x86_ops->hardware_disable(); in kvm_arch_hardware_disable()
9307 r = kvm_x86_ops->hardware_setup(); in kvm_arch_hardware_setup()
9331 kvm_x86_ops->hardware_unsetup(); in kvm_arch_hardware_unsetup()
9336 return kvm_x86_ops->check_processor_compatibility(); in kvm_arch_check_processor_compat()
9378 vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu); in kvm_arch_vcpu_init()
9448 kvm_x86_ops->sched_in(vcpu, cpu); in kvm_arch_sched_in()
9485 return kvm_x86_ops->vm_init(kvm); in kvm_arch_init_vm()
9611 if (kvm_x86_ops->vm_destroy) in kvm_arch_destroy_vm()
9612 kvm_x86_ops->vm_destroy(kvm); in kvm_arch_destroy_vm()
9768 if (kvm_x86_ops->slot_enable_log_dirty) in kvm_mmu_slot_apply_flags()
9769 kvm_x86_ops->slot_enable_log_dirty(kvm, new); in kvm_mmu_slot_apply_flags()
9773 if (kvm_x86_ops->slot_disable_log_dirty) in kvm_mmu_slot_apply_flags()
9774 kvm_x86_ops->slot_disable_log_dirty(kvm, new); in kvm_mmu_slot_apply_flags()
9838 kvm_x86_ops->guest_apic_has_interrupt && in kvm_guest_apic_has_interrupt()
9839 kvm_x86_ops->guest_apic_has_interrupt(vcpu)); in kvm_guest_apic_has_interrupt()
9858 kvm_x86_ops->nmi_allowed(vcpu))) in kvm_vcpu_has_events()
9891 if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu)) in kvm_arch_dy_runnable()
9909 return kvm_x86_ops->interrupt_allowed(vcpu); in kvm_arch_interrupt_allowed()
9931 rflags = kvm_x86_ops->get_rflags(vcpu); in kvm_get_rflags()
9943 kvm_x86_ops->set_rflags(vcpu, rflags); in __kvm_set_rflags()
10054 kvm_x86_ops->get_cpl(vcpu) == 0)) in kvm_can_deliver_async_pf()
10074 return kvm_x86_ops->interrupt_allowed(vcpu); in kvm_can_do_async_pf()
10203 return kvm_x86_ops->update_pi_irte(irqfd->kvm, in kvm_arch_irq_bypass_add_producer()
10223 ret = kvm_x86_ops->update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0); in kvm_arch_irq_bypass_del_producer()
10232 return kvm_x86_ops->update_pi_irte(kvm, host_irq, guest_irq, set); in kvm_arch_update_irqfd_routing()