Lines Matching +full:0 +full:x8ff
23 static bool __read_mostly nested_early_check = 0;
72 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); in init_vmcs_shadow_fields()
73 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); in init_vmcs_shadow_fields()
75 for (i = j = 0; i < max_shadow_read_only_fields; i++) { in init_vmcs_shadow_fields()
96 for (i = j = 0; i < max_shadow_read_write_fields; i++) { in init_vmcs_shadow_fields()
279 vcpu->arch.regs_dirty = 0; in vmx_switch_vmcs()
358 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { in nested_ept_invalidate_addr()
396 nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification); in nested_ept_inject_page_fault()
435 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; in nested_vmx_is_page_fault_vmexit()
464 return 0; in nested_vmx_check_io_bitmap_controls()
470 return 0; in nested_vmx_check_io_bitmap_controls()
477 return 0; in nested_vmx_check_msr_bitmap_controls()
482 return 0; in nested_vmx_check_msr_bitmap_controls()
489 return 0; in nested_vmx_check_tpr_shadow_controls()
494 return 0; in nested_vmx_check_tpr_shadow_controls()
517 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { in enable_x2apic_msr_intercepts()
520 msr_bitmap[word] = ~0; in enable_x2apic_msr_intercepts()
521 msr_bitmap[word + (0x800 / sizeof(long))] = ~0; in enable_x2apic_msr_intercepts()
600 * L0 need not intercept reads for MSRs between 0x800 in nested_vmx_prepare_msr_bitmap()
601 * and 0x8ff, it just lets the processor take the value in nested_vmx_prepare_msr_bitmap()
605 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { in nested_vmx_prepare_msr_bitmap()
711 return 0; in nested_vmx_check_apic_access_controls()
721 return 0; in nested_vmx_check_apicv_controls()
743 * bits 5:0 of posted_intr_desc_addr should be zero. in nested_vmx_check_apicv_controls()
748 CC((vmcs12->posted_intr_nv & 0xff00)) || in nested_vmx_check_apicv_controls()
756 return 0; in nested_vmx_check_apicv_controls()
762 if (count == 0) in nested_vmx_check_msr_switch()
763 return 0; in nested_vmx_check_msr_switch()
769 return 0; in nested_vmx_check_msr_switch()
783 return 0; in nested_vmx_check_exit_msr_switch_controls()
794 return 0; in nested_vmx_check_entry_msr_switch_controls()
801 return 0; in nested_vmx_check_pml_controls()
807 return 0; in nested_vmx_check_pml_controls()
816 return 0; in nested_vmx_check_unrestricted_guest_controls()
825 return 0; in nested_vmx_check_mode_based_ept_exec_controls()
832 return 0; in nested_vmx_check_shadow_vmcs_controls()
838 return 0; in nested_vmx_check_shadow_vmcs_controls()
845 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)) in nested_vmx_msr_check_common()
850 if (CC(e->reserved != 0)) in nested_vmx_msr_check_common()
852 return 0; in nested_vmx_msr_check_common()
863 return 0; in nested_vmx_load_msr_check()
872 return 0; in nested_vmx_store_msr_check()
886 * return 0 for success, entry index for failure.
899 for (i = 0; i < count; i++) { in nested_vmx_load_msr()
906 "%s cannot read MSR entry (%u, 0x%08llx)\n", in nested_vmx_load_msr()
912 "%s check failed (%u, 0x%x, 0x%x)\n", in nested_vmx_load_msr()
918 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", in nested_vmx_load_msr()
923 return 0; in nested_vmx_load_msr()
944 if (i >= 0) { in nested_vmx_get_vmexit_msr_value()
953 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__, in nested_vmx_get_vmexit_msr_value()
967 "%s cannot read MSR entry (%u, 0x%08llx)\n", in read_and_check_msr_entry()
973 "%s check failed (%u, 0x%x, 0x%x)\n", in read_and_check_msr_entry()
987 for (i = 0; i < count; i++) { in nested_vmx_store_msr()
1002 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", in nested_vmx_store_msr()
1007 return 0; in nested_vmx_store_msr()
1018 for (i = 0; i < count; i++) { in nested_msr_store_list_has_msr()
1039 in_autostore_list = msr_autostore_slot >= 0; in prepare_vmx_msr_autostore_list()
1098 return 0; in nested_vmx_load_cr3()
1208 return 0; in vmx_restore_vmx_basic()
1251 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0))) in vmx_restore_control_msr()
1254 /* Check must-be-0 bits are still 0. */ in vmx_restore_control_msr()
1261 return 0; in vmx_restore_control_msr()
1296 return 0; in vmx_restore_vmx_misc()
1310 return 0; in vmx_restore_vmx_ept_vpid_cap()
1337 return 0; in vmx_restore_fixed0_msr()
1343 * Returns 0 on success, non-0 otherwise.
1367 * If userspace wants to emulate VMX_BASIC[55]=0, userspace in vmx_set_vmx_msr()
1395 return 0; in vmx_set_vmx_msr()
1400 return 0; in vmx_set_vmx_msr()
1409 /* Returns 0 on success, non-0 otherwise. */
1484 return 0; in vmx_get_vmx_msr()
1510 for (i = 0; i < max_shadow_read_write_fields; i++) { in copy_shadow_to_vmcs12()
1543 for (q = 0; q < ARRAY_SIZE(fields); q++) { in copy_vmcs12_to_shadow()
1544 for (i = 0; i < max_fields[q]; i++) { in copy_vmcs12_to_shadow()
2003 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is in nested_vmx_handle_enlightened_vmptrld()
2012 * CPUID.0x4000000A.EAX[0:15]. in nested_vmx_handle_enlightened_vmptrld()
2035 memset(vmcs12, 0, sizeof(*vmcs12)); in nested_vmx_handle_enlightened_vmptrld()
2104 if (preemption_timeout == 0) { in vmx_start_preemption_timer()
2109 if (vcpu->arch.virtual_tsc_khz == 0) in vmx_start_preemption_timer()
2152 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL)); in prepare_vmcs02_constant_state()
2156 vmcs_write64(VM_FUNCTION_CONTROL, 0); in prepare_vmcs02_constant_state()
2170 vmcs_write64(PML_ADDRESS, 0); in prepare_vmcs02_constant_state()
2369 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); in prepare_vmcs02_early()
2416 vmx->segment_cache.bitmask = 0; in prepare_vmcs02_rare()
2453 * setting MASK=MATCH=0 and (see below) EB.PF=1. in prepare_vmcs02_rare()
2456 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when in prepare_vmcs02_rare()
2464 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); in prepare_vmcs02_rare()
2465 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); in prepare_vmcs02_rare()
2499 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2643 return 0; in prepare_vmcs02()
2656 return 0; in nested_vmx_check_nmi_controls()
2692 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f))) in nested_vmx_check_eptp()
2760 return 0; in nested_check_vm_execution_controls()
2777 return 0; in nested_check_vm_exit_controls()
2818 CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) in nested_check_vm_entry_controls()
2843 CC(vmcs12->vm_entry_instruction_len == 0 && in nested_check_vm_entry_controls()
2852 return 0; in nested_check_vm_entry_controls()
2866 return 0; in nested_vmx_check_controls()
2877 return 0; in nested_vmx_check_address_space_size()
2926 CC(vmcs12->host_cs_selector == 0) || in nested_vmx_check_host_state()
2927 CC(vmcs12->host_tr_selector == 0) || in nested_vmx_check_host_state()
2928 CC(vmcs12->host_ss_selector == 0 && !ia32e)) in nested_vmx_check_host_state()
2941 * IA32_EFER MSR must be 0 in the field for that register. In addition, in nested_vmx_check_host_state()
2952 return 0; in nested_vmx_check_host_state()
2963 return 0; in nested_vmx_check_vmcs_link_ptr()
2982 return 0; in nested_vmx_check_vmcs_link_ptr()
2995 return 0; in nested_check_guest_non_reg_state()
3031 * - Bits reserved in the IA32_EFER MSR must be 0. in nested_vmx_check_guest_state()
3039 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; in nested_vmx_check_guest_state()
3055 return 0; in nested_vmx_check_guest_state()
3065 return 0; in nested_vmx_check_vmentry_hw()
3068 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); in nested_vmx_check_vmentry_hw()
3070 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); in nested_vmx_check_vmentry_hw()
3082 vmcs_writel(GUEST_RFLAGS, 0); in nested_vmx_check_vmentry_hw()
3133 return 0; in nested_vmx_check_vmentry_hw()
3193 vcpu->run->internal.ndata = 0; in nested_get_vmcs12_pages()
3260 vcpu->run->internal.ndata = 0; in vmx_get_nested_state_pages()
3278 return 0; in nested_vmx_write_pml_buffer()
3289 return 0; in nested_vmx_write_pml_buffer()
3296 gpa &= ~0xFFFull; in nested_vmx_write_pml_buffer()
3301 return 0; in nested_vmx_write_pml_buffer()
3305 return 0; in nested_vmx_write_pml_buffer()
3319 return 0; in nested_vmx_check_permission()
3323 kvm_inject_gp(vcpu, 0); in nested_vmx_check_permission()
3324 return 0; in nested_vmx_check_permission()
3335 return ((rvi & 0xf0) > (vppr & 0xf0)); in vmx_has_apicv_interrupt()
3632 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3637 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3647 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3649 return 0; in nested_vmx_run()
3720 vmcs12->idt_vectoring_info_field = 0; in vmcs12_save_pending_event()
3755 vmcs12->idt_vectoring_info_field = 0; in vmcs12_save_pending_event()
3789 return 0; in vmx_complete_nested_posted_interrupt()
3797 return 0; in vmx_complete_nested_posted_interrupt()
3808 if ((u8)max_irr > ((u8)status & 0xff)) { in vmx_complete_nested_posted_interrupt()
3809 status &= ~0xff; in vmx_complete_nested_posted_interrupt()
3816 return 0; in vmx_complete_nested_posted_interrupt()
3839 exit_qual = 0; in nested_vmx_inject_exception_vmexit()
3887 return 0; in vmx_get_pending_dbg_trap()
4041 nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0); in vmx_check_nested_events()
4045 return 0; in vmx_check_nested_events()
4055 nested_vmx_vmexit(vcpu, EXIT_REASON_SIPI_SIGNAL, 0, in vmx_check_nested_events()
4056 apic->sipi_vector & 0xFFUL); in vmx_check_nested_events()
4057 return 0; in vmx_check_nested_events()
4078 return 0; in vmx_check_nested_events()
4092 nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0); in vmx_check_nested_events()
4093 return 0; in vmx_check_nested_events()
4101 return 0; in vmx_check_nested_events()
4113 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); in vmx_check_nested_events()
4114 return 0; in vmx_check_nested_events()
4131 INTR_INFO_VALID_MASK, 0); in vmx_check_nested_events()
4136 vcpu->arch.nmi_pending = 0; in vmx_check_nested_events()
4138 return 0; in vmx_check_nested_events()
4146 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); in vmx_check_nested_events()
4147 return 0; in vmx_check_nested_events()
4160 if (ktime_to_ns(remaining) <= 0) in vmx_get_preemption_timer_value()
4161 return 0; in vmx_get_preemption_timer_value()
4443 vmx_set_interrupt_shadow(vcpu, 0); in load_vmcs12_host_state()
4475 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); in load_vmcs12_host_state()
4476 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); in load_vmcs12_host_state()
4480 vmcs_write64(GUEST_BNDCFGS, 0); in load_vmcs12_host_state()
4494 .base = 0, in load_vmcs12_host_state()
4495 .limit = 0xFFFFFFFF, in load_vmcs12_host_state()
4508 .base = 0, in load_vmcs12_host_state()
4509 .limit = 0xFFFFFFFF, in load_vmcs12_host_state()
4530 .limit = 0x67, in load_vmcs12_host_state()
4537 memset(&seg, 0, sizeof(seg)); in load_vmcs12_host_state()
4541 kvm_set_dr(vcpu, 7, 0x400); in load_vmcs12_host_state()
4542 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); in load_vmcs12_host_state()
4562 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { in nested_vmx_get_vmcs01_guest_efer()
4635 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { in nested_vmx_restore_host_state()
4639 "%s read MSR index failed (%u, 0x%08llx)\n", in nested_vmx_restore_host_state()
4644 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { in nested_vmx_restore_host_state()
4648 "%s read MSR failed (%u, 0x%08llx)\n", in nested_vmx_restore_host_state()
4659 "%s check failed (%u, 0x%x, 0x%x)\n", in nested_vmx_restore_host_state()
4666 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n", in nested_vmx_restore_host_state()
4817 WARN_ON(irq < 0); in nested_vmx_vmexit()
4852 vmx->fail = 0; in nested_vmx_vmexit()
4858 nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0); in nested_vmx_triple_fault()
4864 * On success, returns 0. When the operand is invalid, returns 1 and throws
4886 int index_reg = (vmx_instruction_info >> 18) & 0xf; in get_vmx_mem_address()
4888 int base_reg = (vmx_instruction_info >> 23) & 0xf; in get_vmx_mem_address()
4901 else if (addr_size == 0) in get_vmx_mem_address()
4916 off &= 0xffffffff; in get_vmx_mem_address()
4917 else if (addr_size == 0) /* 16 bit */ in get_vmx_mem_address()
4918 off &= 0xffff; in get_vmx_mem_address()
4933 /* Long mode: #GP(0)/#SS(0) if the memory address is in a in get_vmx_mem_address()
4944 *ret = (s.base + off) & 0xffffffff; in get_vmx_mem_address()
4948 * - segment type check (#GP(0) may be thrown) in get_vmx_mem_address()
4949 * - usability check (#GP(0)/#SS(0)) in get_vmx_mem_address()
4950 * - limit check (#GP(0)/#SS(0)) in get_vmx_mem_address()
4953 /* #GP(0) if the destination operand is located in a in get_vmx_mem_address()
4956 exn = ((s.type & 0xa) == 0 || (s.type & 8)); in get_vmx_mem_address()
4958 /* #GP(0) if the source operand is located in an in get_vmx_mem_address()
4961 exn = ((s.type & 0xa) == 8); in get_vmx_mem_address()
4963 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in get_vmx_mem_address()
4966 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. in get_vmx_mem_address()
4968 exn = (s.unusable != 0); in get_vmx_mem_address()
4971 * Protected mode: #GP(0)/#SS(0) if the memory operand is in get_vmx_mem_address()
4973 * limit checks for flat segments, i.e. segments with base==0, in get_vmx_mem_address()
4974 * limit==0xffffffff and of type expand-up data or code. in get_vmx_mem_address()
4976 if (!(s.base == 0 && s.limit == 0xffffffff && in get_vmx_mem_address()
4984 0); in get_vmx_mem_address()
4988 return 0; in get_vmx_mem_address()
5011 return 0; in nested_vmx_get_vmptr()
5048 if (r < 0) in enter_vmx_operation()
5073 vmx->pt_desc.guest.ctl = 0; in enter_vmx_operation()
5077 return 0; in enter_vmx_operation()
5119 * CPL=0 and all other checks that are lower priority than VM-Exit must in handle_vmxon()
5123 kvm_inject_gp(vcpu, 0); in handle_vmxon()
5132 kvm_inject_gp(vcpu, 0); in handle_vmxon()
5182 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); in nested_release_vmcs12()
5207 u32 zero = 0; in handle_vmclear()
5273 gva_t gva = 0; in handle_vmread()
5281 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf)); in handle_vmread()
5294 if (offset < 0) in handle_vmread()
5317 if (offset < 0) in handle_vmread()
5330 kvm_register_write(vcpu, (((instr_info) >> 3) & 0xf), value); in handle_vmread()
5336 /* _system ok, nested_vmx_check_permission has verified cpl=0 */ in handle_vmread()
5389 u64 value = 0; in handle_vmwrite()
5404 value = kvm_register_read(vcpu, (((instr_info) >> 3) & 0xf)); in handle_vmwrite()
5415 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf)); in handle_vmwrite()
5418 if (offset < 0) in handle_vmwrite()
5445 value &= 0x1f0ff; in handle_vmwrite()
5578 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ in handle_vmptrst()
5642 roots_to_free = 0; in handle_invept()
5647 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { in handle_invept()
5787 return 0; in nested_vmx_eptp_switching()
5821 case 0: in handle_vmfunc()
5856 while (size > 0) { in nested_vmx_check_io_bitmaps()
5857 if (port < 0x8000) in nested_vmx_check_io_bitmaps()
5859 else if (port < 0x10000) in nested_vmx_check_io_bitmaps()
5863 bitmap += (port & 0x7fff) / 8; in nested_vmx_check_io_bitmaps()
5921 if (msr_index >= 0xc0000000) { in nested_vmx_exit_handled_msr()
5922 msr_index -= 0xc0000000; in nested_vmx_exit_handled_msr()
5950 case 0: /* mov to cr */ in nested_vmx_exit_handled_cr()
5954 case 0: in nested_vmx_exit_handled_cr()
5995 * lmsw can change bits 1..3 of cr0, and only set bit 0 of in nested_vmx_exit_handled_cr()
5998 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; in nested_vmx_exit_handled_cr()
5999 if (vmcs12->cr0_guest_host_mask & 0xe & in nested_vmx_exit_handled_cr()
6002 if ((vmcs12->cr0_guest_host_mask & 0x1) && in nested_vmx_exit_handled_cr()
6003 !(vmcs12->cr0_read_shadow & 0x1) && in nested_vmx_exit_handled_cr()
6004 (val & 0x1)) in nested_vmx_exit_handled_cr()
6038 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); in nested_vmx_exit_handled_vmcs_access()
6059 * interruption-type to 7 (other event) and the vector field to 0. Such in nested_vmx_exit_handled_mtf()
6286 exit_intr_info = 0; in nested_vmx_reflect_vmexit()
6287 exit_qual = 0; in nested_vmx_reflect_vmexit()
6328 .flags = 0, in vmx_get_nested_state()
6331 .hdr.vmx.flags = 0, in vmx_get_nested_state()
6334 .hdr.vmx.preemption_timer_deadline = 0, in vmx_get_nested_state()
6337 &user_kvm_nested_state->data.vmx[0]; in vmx_get_nested_state()
6418 copy_enlightened_to_vmcs12(vmx, 0); in vmx_get_nested_state()
6447 to_vmx(vcpu)->nested.nested_run_pending = 0; in vmx_leave_nested()
6448 nested_vmx_vmexit(vcpu, -1, 0, 0); in vmx_leave_nested()
6461 &user_kvm_nested_state->data.vmx[0]; in vmx_set_nested_state()
6526 return 0; in vmx_set_nested_state()
6541 return 0; in vmx_set_nested_state()
6579 return 0; in vmx_set_nested_state()
6630 return 0; in vmx_set_nested_state()
6633 vmx->nested.nested_run_pending = 0; in vmx_set_nested_state()
6665 max_idx = 0; in nested_vmx_calc_vmcs_enum_msr()
6666 for (i = 0; i < nr_vmcs12_fields; i++) { in nested_vmx_calc_vmcs_enum_msr()
6700 * be set to 0, meaning that L1 may turn off any of these bits. The in nested_vmx_setup_ctls_msrs()
6717 (enable_apicv ? PIN_BASED_POSTED_INTR : 0); in nested_vmx_setup_ctls_msrs()
6797 msrs->secondary_ctls_low = 0; in nested_vmx_setup_ctls_msrs()
6885 msrs->misc_high = 0; in nested_vmx_setup_ctls_msrs()
6927 for (i = 0; i < VMX_BITMAP_NR; i++) in nested_vmx_hardware_unsetup()
6937 enable_shadow_vmcs = 0; in nested_vmx_hardware_setup()
6939 for (i = 0; i < VMX_BITMAP_NR; i++) { in nested_vmx_hardware_setup()
6968 return 0; in nested_vmx_hardware_setup()