Lines Matching refs:kvm_x86_ops

108 struct kvm_x86_ops *kvm_x86_ops __read_mostly;
109 EXPORT_SYMBOL_GPL(kvm_x86_ops);
536 if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) in kvm_require_cpl()
678 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in kvm_set_cr0()
691 kvm_x86_ops->set_cr0(vcpu, cr0); in kvm_set_cr0()
778 if (kvm_x86_ops->get_cpl(vcpu) != 0 || in kvm_set_xcr()
835 if (kvm_x86_ops->set_cr4(vcpu, cr4)) in kvm_set_cr4()
919 kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6); in kvm_update_dr6()
930 kvm_x86_ops->set_dr7(vcpu, dr7); in kvm_update_dr7()
996 *val = kvm_x86_ops->get_dr6(vcpu); in kvm_get_dr()
1144 if (kvm_x86_ops->get_msr_feature(msr)) in kvm_get_msr_feature()
1194 kvm_x86_ops->set_efer(vcpu, efer); in set_efer()
1241 return kvm_x86_ops->set_msr(vcpu, msr); in kvm_set_msr()
1535 u64 curr_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); in update_ia32_tsc_adjust_msr()
1577 u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); in kvm_read_l1_tsc()
1585 kvm_x86_ops->write_tsc_offset(vcpu, offset); in kvm_vcpu_write_tsc_offset()
2289 kvm_x86_ops->tlb_flush(vcpu, invalidate_gpa); in kvm_vcpu_flush_tlb()
2581 return kvm_x86_ops->get_msr(vcpu, msr); in kvm_get_msr()
2955 r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE); in kvm_vm_ioctl_check_extension()
2958 r = !kvm_x86_ops->cpu_has_accelerated_tpr(); in kvm_vm_ioctl_check_extension()
2985 r = kvm_x86_ops->get_nested_state ? in kvm_vm_ioctl_check_extension()
2986 kvm_x86_ops->get_nested_state(NULL, 0, 0) : 0; in kvm_vm_ioctl_check_extension()
3102 if (kvm_x86_ops->has_wbinvd_exit()) in kvm_arch_vcpu_load()
3109 kvm_x86_ops->vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
3166 vcpu->arch.preempted_in_kernel = !kvm_x86_ops->get_cpl(vcpu); in kvm_arch_vcpu_put()
3185 kvm_x86_ops->vcpu_put(vcpu); in kvm_arch_vcpu_put()
3199 kvm_x86_ops->sync_pir_to_irr(vcpu); in kvm_vcpu_ioctl_get_lapic()
3307 if (kvm_x86_ops->setup_mce) in kvm_vcpu_ioctl_x86_setup_mce()
3308 kvm_x86_ops->setup_mce(vcpu); in kvm_vcpu_ioctl_x86_setup_mce()
3383 events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
3387 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
3437 kvm_x86_ops->set_interrupt_shadow(vcpu, in kvm_vcpu_ioctl_x86_set_vcpu_events()
3443 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4009 if (!kvm_x86_ops->get_nested_state) in kvm_arch_vcpu_ioctl()
4017 r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state, in kvm_arch_vcpu_ioctl()
4038 if (!kvm_x86_ops->set_nested_state) in kvm_arch_vcpu_ioctl()
4057 r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state); in kvm_arch_vcpu_ioctl()
4081 ret = kvm_x86_ops->set_tss_addr(kvm, addr); in kvm_vm_ioctl_set_tss_addr()
4088 return kvm_x86_ops->set_identity_map_addr(kvm, ident_addr); in kvm_vm_ioctl_set_identity_map_addr()
4272 if (kvm_x86_ops->flush_log_dirty) in kvm_vm_ioctl_get_dirty_log()
4273 kvm_x86_ops->flush_log_dirty(kvm); in kvm_vm_ioctl_get_dirty_log()
4642 if (kvm_x86_ops->mem_enc_op) in kvm_arch_vm_ioctl()
4643 r = kvm_x86_ops->mem_enc_op(kvm, argp); in kvm_arch_vm_ioctl()
4654 if (kvm_x86_ops->mem_enc_reg_region) in kvm_arch_vm_ioctl()
4655 r = kvm_x86_ops->mem_enc_reg_region(kvm, &region); in kvm_arch_vm_ioctl()
4666 if (kvm_x86_ops->mem_enc_unreg_region) in kvm_arch_vm_ioctl()
4667 r = kvm_x86_ops->mem_enc_unreg_region(kvm, &region); in kvm_arch_vm_ioctl()
4705 if (!kvm_x86_ops->rdtscp_supported()) in kvm_init_msr_list()
4719 if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i])) in kvm_init_msr_list()
4788 kvm_x86_ops->set_segment(vcpu, var, seg); in kvm_set_segment()
4794 kvm_x86_ops->get_segment(vcpu, var, seg); in kvm_get_segment()
4814 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_read()
4821 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_fetch()
4829 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_write()
4878 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_fetch_guest_virt()
4903 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_read_guest_virt()
4917 if (!system && kvm_x86_ops->get_cpl(vcpu) == 3) in emulator_read_std()
4970 if (!system && kvm_x86_ops->get_cpl(vcpu) == 3) in emulator_write_std()
5031 u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) in vcpu_mmio_gva_to_gpa()
5421 return kvm_x86_ops->get_segment_base(vcpu, seg); in get_segment_base()
5434 if (kvm_x86_ops->has_wbinvd_exit()) { in kvm_emulate_wbinvd_noskip()
5539 return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt)); in emulator_get_cpl()
5544 kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt); in emulator_get_gdt()
5549 kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt); in emulator_get_idt()
5554 kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt); in emulator_set_gdt()
5559 kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt); in emulator_set_idt()
5696 return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); in emulator_intercept()
5717 kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked); in emulator_set_nmi_mask()
5732 return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smbase); in emulator_pre_leave_smm()
5779 u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); in toggle_interruptibility()
5790 kvm_x86_ops->set_interrupt_shadow(vcpu, mask); in toggle_interruptibility()
5815 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in init_emulate_ctxt()
5867 if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) { in handle_emulation_failure()
6065 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); in kvm_skip_emulated_instruction()
6068 kvm_x86_ops->skip_emulated_instruction(vcpu); in kvm_skip_emulated_instruction()
6274 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); in x86_emulate_instruction()
6570 user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu)); in kvm_is_user_mode()
6667 struct kvm_x86_ops *ops = opaque; in kvm_arch_init()
6669 if (kvm_x86_ops) { in kvm_arch_init()
6699 kvm_x86_ops = ops; in kvm_arch_init()
6743 kvm_x86_ops = NULL; in kvm_arch_exit()
6823 kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu); in kvm_vcpu_deactivate_apicv()
6851 if (kvm_x86_ops->get_cpl(vcpu) != 0) { in kvm_emulate_hypercall()
6892 kvm_x86_ops->patch_hypercall(vcpu, instruction); in emulator_fix_hypercall()
6921 if (!kvm_x86_ops->update_cr8_intercept) in update_cr8_intercept()
6940 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); in update_cr8_intercept()
6950 kvm_x86_ops->queue_exception(vcpu); in inject_pending_event()
6967 kvm_x86_ops->set_nmi(vcpu); in inject_pending_event()
6969 kvm_x86_ops->set_irq(vcpu); in inject_pending_event()
6978 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { in inject_pending_event()
6979 r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); in inject_pending_event()
7004 kvm_x86_ops->queue_exception(vcpu); in inject_pending_event()
7012 kvm_x86_ops->smi_allowed(vcpu)) { in inject_pending_event()
7016 } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) { in inject_pending_event()
7019 kvm_x86_ops->set_nmi(vcpu); in inject_pending_event()
7028 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { in inject_pending_event()
7029 r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); in inject_pending_event()
7033 if (kvm_x86_ops->interrupt_allowed(vcpu)) { in inject_pending_event()
7036 kvm_x86_ops->set_irq(vcpu); in inject_pending_event()
7052 if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) in process_nmi()
7142 kvm_x86_ops->get_gdt(vcpu, &dt); in enter_smm_save_state_32()
7146 kvm_x86_ops->get_idt(vcpu, &dt); in enter_smm_save_state_32()
7196 kvm_x86_ops->get_idt(vcpu, &dt); in enter_smm_save_state_64()
7206 kvm_x86_ops->get_gdt(vcpu, &dt); in enter_smm_save_state_64()
7236 kvm_x86_ops->pre_enter_smm(vcpu, buf); in enter_smm()
7241 if (kvm_x86_ops->get_nmi_mask(vcpu)) in enter_smm()
7244 kvm_x86_ops->set_nmi_mask(vcpu, true); in enter_smm()
7250 kvm_x86_ops->set_cr0(vcpu, cr0); in enter_smm()
7253 kvm_x86_ops->set_cr4(vcpu, 0); in enter_smm()
7257 kvm_x86_ops->set_idt(vcpu, &dt); in enter_smm()
7287 kvm_x86_ops->set_efer(vcpu, 0); in enter_smm()
7315 kvm_x86_ops->sync_pir_to_irr(vcpu); in vcpu_scan_ioapic()
7334 kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap); in vcpu_load_eoi_exitmap()
7361 if (!kvm_x86_ops->set_apic_access_page_addr) in kvm_vcpu_reload_apic_access_page()
7367 kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page)); in kvm_vcpu_reload_apic_access_page()
7399 kvm_x86_ops->get_vmcs12_pages(vcpu); in vcpu_enter_guest()
7517 if (!kvm_x86_ops->enable_smi_window(vcpu)) in vcpu_enter_guest()
7520 kvm_x86_ops->enable_nmi_window(vcpu); in vcpu_enter_guest()
7522 kvm_x86_ops->enable_irq_window(vcpu); in vcpu_enter_guest()
7539 kvm_x86_ops->prepare_guest_switch(vcpu); in vcpu_enter_guest()
7570 kvm_x86_ops->sync_pir_to_irr(vcpu); in vcpu_enter_guest()
7587 kvm_x86_ops->request_immediate_exit(vcpu); in vcpu_enter_guest()
7605 kvm_x86_ops->run(vcpu); in vcpu_enter_guest()
7615 kvm_x86_ops->sync_dirty_debug_regs(vcpu); in vcpu_enter_guest()
7640 kvm_x86_ops->handle_external_intr(vcpu); in vcpu_enter_guest()
7667 r = kvm_x86_ops->handle_exit(vcpu); in vcpu_enter_guest()
7671 kvm_x86_ops->cancel_injection(vcpu); in vcpu_enter_guest()
7681 (!kvm_x86_ops->pre_block || kvm_x86_ops->pre_block(vcpu) == 0)) { in vcpu_block()
7686 if (kvm_x86_ops->post_block) in vcpu_block()
7687 kvm_x86_ops->post_block(vcpu); in vcpu_block()
7713 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) in kvm_vcpu_running()
7714 kvm_x86_ops->check_nested_events(vcpu, false); in kvm_vcpu_running()
8051 kvm_x86_ops->get_idt(vcpu, &dt); in __get_sregs()
8054 kvm_x86_ops->get_gdt(vcpu, &dt); in __get_sregs()
8195 kvm_x86_ops->set_idt(vcpu, &dt); in __set_sregs()
8198 kvm_x86_ops->set_gdt(vcpu, &dt); in __set_sregs()
8208 kvm_x86_ops->set_efer(vcpu, sregs->efer); in __set_sregs()
8211 kvm_x86_ops->set_cr0(vcpu, sregs->cr0); in __set_sregs()
8217 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); in __set_sregs()
8323 kvm_x86_ops->update_bp_intercept(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
8457 kvm_x86_ops->vcpu_free(vcpu); in kvm_arch_vcpu_free()
8471 vcpu = kvm_x86_ops->vcpu_create(kvm, id); in kvm_arch_vcpu_create()
8518 kvm_x86_ops->vcpu_free(vcpu); in kvm_arch_vcpu_destroy()
8592 kvm_x86_ops->vcpu_reset(vcpu, init_event); in kvm_vcpu_reset()
8617 ret = kvm_x86_ops->hardware_enable(); in kvm_arch_hardware_enable()
8699 kvm_x86_ops->hardware_disable(); in kvm_arch_hardware_disable()
8707 r = kvm_x86_ops->hardware_setup(); in kvm_arch_hardware_setup()
8731 kvm_x86_ops->hardware_unsetup(); in kvm_arch_hardware_unsetup()
8736 kvm_x86_ops->check_processor_compatibility(rtn); in kvm_arch_check_processor_compat()
8758 vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu); in kvm_arch_vcpu_init()
8847 kvm_x86_ops->sched_in(vcpu, cpu); in kvm_arch_sched_in()
8883 if (kvm_x86_ops->vm_init) in kvm_arch_init_vm()
8884 return kvm_x86_ops->vm_init(kvm); in kvm_arch_init_vm()
9002 if (kvm_x86_ops->vm_destroy) in kvm_arch_destroy_vm()
9003 kvm_x86_ops->vm_destroy(kvm); in kvm_arch_destroy_vm()
9158 if (kvm_x86_ops->slot_enable_log_dirty) in kvm_mmu_slot_apply_flags()
9159 kvm_x86_ops->slot_enable_log_dirty(kvm, new); in kvm_mmu_slot_apply_flags()
9163 if (kvm_x86_ops->slot_disable_log_dirty) in kvm_mmu_slot_apply_flags()
9164 kvm_x86_ops->slot_disable_log_dirty(kvm, new); in kvm_mmu_slot_apply_flags()
9227 kvm_x86_ops->guest_apic_has_interrupt && in kvm_guest_apic_has_interrupt()
9228 kvm_x86_ops->guest_apic_has_interrupt(vcpu)); in kvm_guest_apic_has_interrupt()
9247 kvm_x86_ops->nmi_allowed(vcpu))) in kvm_vcpu_has_events()
9282 return kvm_x86_ops->interrupt_allowed(vcpu); in kvm_arch_interrupt_allowed()
9304 rflags = kvm_x86_ops->get_rflags(vcpu); in kvm_get_rflags()
9316 kvm_x86_ops->set_rflags(vcpu, rflags); in __kvm_set_rflags()
9430 kvm_x86_ops->get_cpl(vcpu) == 0)) in kvm_arch_async_page_not_present()
9526 return kvm_x86_ops->update_pi_irte != NULL; in kvm_arch_has_irq_bypass()
9537 return kvm_x86_ops->update_pi_irte(irqfd->kvm, in kvm_arch_irq_bypass_add_producer()
9557 ret = kvm_x86_ops->update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0); in kvm_arch_irq_bypass_del_producer()
9566 if (!kvm_x86_ops->update_pi_irte) in kvm_arch_update_irqfd_routing()
9569 return kvm_x86_ops->update_pi_irte(kvm, host_irq, guest_irq, set); in kvm_arch_update_irqfd_routing()