Lines Matching +full:0 +full:x8000000a
125 * emulated by KVM. When setting APIC LVTT (0x832) register bit 18,
127 * intercept the MSR 0x832, and do not setup direct_access_msr.
259 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
270 for (i = 0; i < NUM_MSR_MAPS; i++) { in svm_msrpm_offset()
346 return 0; in svm_set_efer()
352 u32 ret = 0; in svm_get_interrupt_shadow()
363 if (mask == 0) in svm_set_interrupt_shadow()
383 if (nrips && svm->vmcb->control.next_rip != 0) { in __svm_skip_emulated_instruction()
393 return 0; in __svm_skip_emulated_instruction()
403 svm_set_interrupt_shadow(vcpu, 0); in __svm_skip_emulated_instruction()
455 return 0; in svm_update_soft_interrupt_rip()
471 | (ex->has_error_code ? SVM_EVTINJ_VALID_ERR : 0) in svm_inject_exception()
511 * all osvw.status bits inside that length, including bit 0 (which is in svm_init_osvw()
513 * osvw_len is 0 then osvw_status[0] carries no information. We need to in svm_init_osvw()
517 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10) in svm_init_osvw()
527 return 0; in has_svm()
532 return 0; in has_svm()
610 uint64_t len, status = 0; in svm_hardware_enable()
619 osvw_status = osvw_len = 0; in svm_hardware_enable()
627 osvw_status = osvw_len = 0; in svm_hardware_enable()
633 return 0; in svm_hardware_enable()
645 sd->save_area_pa = 0; in svm_cpu_uninit()
654 memset(sd, 0, sizeof(struct svm_cpu_data)); in svm_cpu_init()
664 return 0; in svm_cpu_init()
677 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) in direct_access_msr_slot()
730 bit_write = 2 * (msr & 0x0f) + 1; in msr_write_intercepted()
754 read = 0; in set_msr_interception_bitmap()
757 write = 0; in set_msr_interception_bitmap()
760 bit_read = 2 * (msr & 0x0f); in set_msr_interception_bitmap()
761 bit_write = 2 * (msr & 0x0f) + 1; in set_msr_interception_bitmap()
792 memset(msrpm, 0xff, PAGE_SIZE * (1 << order)); in svm_vcpu_alloc_msrpm()
801 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { in svm_vcpu_init_msrpm()
819 for (i = 0; i < MAX_DIRECT_ACCESS_MSRS; i++) { in svm_set_x2apic_msr_interception()
823 (index > APIC_BASE_MSR + 0xff)) in svm_set_x2apic_msr_interception()
847 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { in svm_msr_filter_changed()
860 for (i = 0; i < MSRPM_OFFSETS; ++i) { in add_msr_offset()
887 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets)); in init_msrpm_offsets()
889 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { in init_msrpm_offsets()
930 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0); in svm_disable_lbrv()
931 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0); in svm_disable_lbrv()
932 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0); in svm_disable_lbrv()
933 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0); in svm_disable_lbrv()
969 "%s: Unknown MSR 0x%x", __func__, index); in svm_get_lbr_msr()
970 return 0; in svm_get_lbr_msr()
1063 iopm_base = 0; in svm_hardware_unsetup()
1068 seg->selector = 0; in init_seg()
1071 seg->limit = 0xffff; in init_seg()
1072 seg->base = 0; in init_seg()
1077 seg->selector = 0; in init_sys_seg()
1079 seg->limit = 0xffff; in init_sys_seg()
1080 seg->base = 0; in init_sys_seg()
1150 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0); in init_vmcb_after_set_cpuid()
1151 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0); in init_vmcb_after_set_cpuid()
1249 save->cs.selector = 0xf000; in init_vmcb()
1250 save->cs.base = 0xffff0000; in init_vmcb()
1254 save->cs.limit = 0xffff; in init_vmcb()
1256 save->gdtr.base = 0; in init_vmcb()
1257 save->gdtr.limit = 0xffff; in init_vmcb()
1258 save->idtr.base = 0; in init_vmcb()
1259 save->idtr.limit = 0xffff; in init_vmcb()
1272 save->cr3 = 0; in init_vmcb()
1274 svm->current_vmcb->asid_generation = 0; in init_vmcb()
1275 svm->asid = 0; in init_vmcb()
1325 vcpu->arch.microcode_version = 0x01000065; in __svm_vcpu_reset()
1336 svm->spec_ctrl = 0; in svm_vcpu_reset()
1337 svm->virt_spec_ctrl = 0; in svm_vcpu_reset()
1358 BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0); in svm_vcpu_create()
1405 return 0; in svm_vcpu_create()
1462 hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400); in svm_prepare_switch_to_guest()
1470 if (likely(tsc_aux_uret_slot >= 0)) in svm_prepare_switch_to_guest()
1575 control->int_vector = 0x0; in svm_set_vintr()
1578 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); in svm_set_vintr()
1653 var->g = s->limit > 0xfffff; in svm_get_segment()
1667 var->type |= 0x2; in svm_get_segment()
1681 var->type |= 0x1; in svm_get_segment()
1691 var->db = 0; in svm_get_segment()
1925 get_debugreg(vcpu->arch.db[0], 0); in svm_sync_dirty_debug_regs()
2004 return 0; in db_interception()
2018 return 0; in bp_interception()
2028 kvm_queue_exception_e(vcpu, AC_VECTOR, 0); in ac_interception()
2047 if (value != 0xb600000000010015ULL) in is_erratum_383()
2051 for (i = 0; i < 6; ++i) in is_erratum_383()
2052 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0); in is_erratum_383()
2121 return 0; in shutdown_interception()
2132 string = (io_info & SVM_IOIO_STR_MASK) != 0; in io_interception()
2133 in = (io_info & SVM_IOIO_TYPE_MASK) != 0; in io_interception()
2141 return kvm_emulate_instruction(vcpu, 0); in io_interception()
2178 kvm_inject_gp(vcpu, 0); in vmload_vmsave_interception()
2188 svm->sysenter_eip_hi = 0; in vmload_vmsave_interception()
2189 svm->sysenter_esp_hi = 0; in vmload_vmsave_interception()
2229 if (ctxt->b != 0x1 || ctxt->opcode_len != 2) in svm_instr_opcode()
2233 case 0xd8: /* VMRUN */ in svm_instr_opcode()
2235 case 0xda: /* VMLOAD */ in svm_instr_opcode()
2237 case 0xdb: /* VMSAVE */ in svm_instr_opcode()
2262 /* Returns '1' or -errno on failure, '0' on success. */ in emulate_svm_instr()
2290 if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK) in gp_interception()
2414 u32 error_code = 0; in task_switch_interception()
2457 return 0; in task_switch_interception()
2484 return kvm_emulate_instruction(vcpu, 0); in invlpg_interception()
2492 return kvm_emulate_instruction(vcpu, 0); in emulate_on_interception()
2534 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) in cr_interception()
2543 err = 0; in cr_interception()
2549 case 0: in cr_interception()
2572 case 0: in cr_interception()
2603 int ret = 0; in cr_trap()
2609 case 0: in cr_trap()
2638 int err = 0; in dr_interception()
2640 if (vcpu->guest_debug == 0) { in dr_interception()
2680 return 0; in cr8_write_interception()
2704 msr->data = 0; in svm_get_msr_feature()
2712 return 0; in svm_get_msr_feature()
2717 return 0; in svm_get_msr_feature()
2800 if (family < 0 || model < 0) in svm_get_msr()
2803 msr_info->data = 0; in svm_get_msr()
2805 if (family == 0x15 && in svm_get_msr()
2806 (model >= 0x2 && model < 0x20)) in svm_get_msr()
2807 msr_info->data = 0x1E; in svm_get_msr()
2816 return 0; in svm_get_msr()
2855 return 0; in svm_set_vm_cr()
2876 * Due to bug in qemu 6.2.0, it would try to set in svm_set_msr()
2877 * this msr to 0 if tsc scaling is not enabled. in svm_set_msr()
2880 if (data != 0 && data != svm->tsc_ratio_msr) in svm_set_msr()
2944 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); in svm_set_msr()
2985 svm->sysenter_eip_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0; in svm_set_msr()
2989 svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0; in svm_set_msr()
3007 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n", in svm_set_msr()
3037 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); in svm_set_msr()
3060 return 0; in svm_set_msr()
3102 in_kernel = !sev_es_guest(vcpu->kvm) && svm_get_cpl(vcpu) == 0; in pause_interception()
3221 pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff); in dump_vmcb()
3223 pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff); in dump_vmcb()
3337 vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%llx\n", exit_code); in svm_handle_invalid_exit()
3342 vcpu->run->internal.data[0] = exit_code; in svm_handle_invalid_exit()
3344 return 0; in svm_handle_invalid_exit()
3381 *error_code = 0; in svm_get_exit_info()
3420 return 0; in svm_handle_exit()
3448 svm->current_vmcb->asid_generation = 0; in pre_svm_run()
3596 return 0; in svm_nmi_allowed()
3658 return 0; in svm_interrupt_allowed()
3675 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes in svm_enable_irq_window()
3881 control->event_inj = 0; in svm_cancel_injection()
3989 vcpu->arch.regs_dirty = 0; in svm_vcpu_run()
4004 svm->next_rip = 0; in svm_vcpu_run()
4013 svm->nested.nested_run_pending = 0; in svm_vcpu_run()
4075 return 0; in is_disabled()
4084 hypercall[0] = 0x0f; in svm_patch_hypercall()
4085 hypercall[1] = 0x01; in svm_patch_hypercall()
4086 hypercall[2] = 0xd9; in svm_patch_hypercall()
4091 return 0; in svm_check_processor_compat()
4146 best = kvm_find_cpuid_entry(vcpu, 0x8000001F); in svm_vcpu_after_set_cpuid()
4148 vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f)); in svm_vcpu_after_set_cpuid()
4264 cr0 &= 0xfUL; in svm_check_intercept()
4265 val &= 0xfUL; in svm_check_intercept()
4284 vmcb->control.exit_info_1 = 0; in svm_check_intercept()
4300 exit_info = ((info->src_val & 0xffff) << 16) | in svm_check_intercept()
4304 exit_info = (info->dst_val & 0xffff) << 16; in svm_check_intercept()
4358 vcpu->arch.mcg_cap &= 0x1ff; in svm_setup_mce()
4379 return 0; in svm_smi_allowed()
4395 return 0; in svm_enter_smm()
4398 put_smstate(u64, smstate, 0x7ed8, 1); in svm_enter_smm()
4400 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa); in svm_enter_smm()
4416 * by 0x400 (matches the offset of 'struct vmcb_save_area' in svm_enter_smm()
4426 BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400); in svm_enter_smm()
4428 svm_copy_vmrun_state(map_save.hva + 0x400, in svm_enter_smm()
4432 return 0; in svm_enter_smm()
4444 return 0; in svm_leave_smm()
4447 if (!GET_SMSTATE(u64, smstate, 0x7ed8)) in svm_leave_smm()
4448 return 0; in svm_leave_smm()
4453 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0); in svm_leave_smm()
4457 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0); in svm_leave_smm()
4473 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400); in svm_leave_smm()
4578 * be '0'. This happens because microcode reads CS:RIP using a _data_ in svm_can_emulate_instruction()
4579 * loap uop with CPL=0 privileges. If the load hits a SMAP #PF, ucode in svm_can_emulate_instruction()
4584 * triggered emulation (e.g. for MMIO), and the CPU returned 0 in the in svm_can_emulate_instruction()
4594 * 2. CR4.SMEP=0 || CPL=3. If SMEP=1 and CPL<3, the erratum cannot in svm_can_emulate_instruction()
4625 kvm_inject_gp(vcpu, 0); in svm_can_emulate_instruction()
4634 * if the fault is at CPL=0, it's the lesser of all evils. Exiting to in svm_can_emulate_instruction()
4680 return 0; in svm_vm_init()
4821 if (cpuid_eax(0x80000000) < 0x8000001f) in svm_adjust_mmio_mask()
4829 enc_bit = cpuid_ebx(0x8000001f) & 0x3f; in svm_adjust_mmio_mask()
4845 mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0; in svm_adjust_mmio_mask()
4854 kvm_caps.supported_xss = 0; in svm_set_cpu_caps()
4856 /* CPUID 0x80000001 and 0x8000000A (SVM features) */ in svm_set_cpu_caps()
4888 /* CPUID 0x80000008 */ in svm_set_cpu_caps()
4897 /* CPUID 0x8000001F (SME/SEV features) */ in svm_set_cpu_caps()
4925 memset(iopm_va, 0xff, PAGE_SIZE * (1 << order)); in svm_hardware_setup()
4951 pause_filter_count = 0; in svm_hardware_setup()
4952 pause_filter_thresh = 0; in svm_hardware_setup()
4954 pause_filter_thresh = 0; in svm_hardware_setup()
5057 return 0; in svm_hardware_setup()