/Linux-v5.15/arch/x86/kvm/svm/ |
D | svm.h | 100 struct vmcb *ptr; 129 struct vmcb *vmcb; member 211 struct vmcb *current_vmcb; 214 struct vmcb **sev_vmcbs; 248 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb) in vmcb_mark_all_dirty() argument 250 vmcb->control.clean = 0; in vmcb_mark_all_dirty() 253 static inline void vmcb_mark_all_clean(struct vmcb *vmcb) in vmcb_mark_all_clean() argument 255 vmcb->control.clean = VMCB_ALL_CLEAN_MASK in vmcb_mark_all_clean() 259 static inline bool vmcb_is_clean(struct vmcb *vmcb, int bit) in vmcb_is_clean() argument 261 return (vmcb->control.clean & (1 << bit)); in vmcb_is_clean() [all …]
|
D | svm_onhyperv.h | 41 static inline void svm_hv_init_vmcb(struct vmcb *vmcb) in svm_hv_init_vmcb() argument 44 (struct hv_enlightenments *)vmcb->control.reserved_sw; in svm_hv_init_vmcb() 82 struct vmcb *vmcb = to_svm(vcpu)->vmcb; in svm_hv_vmcb_dirty_nested_enlightenments() local 84 (struct hv_enlightenments *)vmcb->control.reserved_sw; in svm_hv_vmcb_dirty_nested_enlightenments() 92 if (vmcb_is_clean(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS) && in svm_hv_vmcb_dirty_nested_enlightenments() 94 vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS); in svm_hv_vmcb_dirty_nested_enlightenments() 97 static inline void svm_hv_update_vp_id(struct vmcb *vmcb, in svm_hv_update_vp_id() argument 101 (struct hv_enlightenments *)vmcb->control.reserved_sw; in svm_hv_update_vp_id() 106 vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS); in svm_hv_update_vp_id() 111 static inline void svm_hv_init_vmcb(struct vmcb *vmcb) in svm_hv_init_vmcb() argument [all …]
|
D | nested.c | 39 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) { in nested_svm_inject_npf_exit() 44 svm->vmcb->control.exit_code = SVM_EXIT_NPF; in nested_svm_inject_npf_exit() 45 svm->vmcb->control.exit_code_hi = 0; in nested_svm_inject_npf_exit() 46 svm->vmcb->control.exit_info_1 = (1ULL << 32); in nested_svm_inject_npf_exit() 47 svm->vmcb->control.exit_info_2 = fault->address; in nested_svm_inject_npf_exit() 50 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL; in nested_svm_inject_npf_exit() 51 svm->vmcb->control.exit_info_1 |= fault->error_code; in nested_svm_inject_npf_exit() 63 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR; in svm_inject_page_fault_nested() 64 svm->vmcb->control.exit_code_hi = 0; in svm_inject_page_fault_nested() 65 svm->vmcb->control.exit_info_1 = fault->error_code; in svm_inject_page_fault_nested() [all …]
|
D | svm.c | 311 svm->vmcb->save.efer = efer | EFER_SVME; in svm_set_efer() 312 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_set_efer() 327 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) in svm_get_interrupt_shadow() 337 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow() 339 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow() 354 if (nrips && svm->vmcb->control.next_rip != 0) { in skip_emulated_instruction() 356 svm->next_rip = svm->vmcb->control.next_rip; in skip_emulated_instruction() 393 svm->int3_rip = rip + svm->vmcb->save.cs.base; in svm_queue_exception() 397 svm->vmcb->control.event_inj = nr in svm_queue_exception() 401 svm->vmcb->control.event_inj_err = error_code; in svm_queue_exception() [all …]
|
D | avic.c | 190 struct vmcb *vmcb = svm->vmcb; in avic_init_vmcb() local 196 vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK; in avic_init_vmcb() 197 vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK; in avic_init_vmcb() 198 vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK; in avic_init_vmcb() 199 vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT; in avic_init_vmcb() 200 vmcb->control.avic_vapic_bar = APIC_DEFAULT_PHYS_BASE & VMCB_AVIC_APIC_BAR_MASK; in avic_init_vmcb() 203 vmcb->control.int_ctl |= AVIC_ENABLE_MASK; in avic_init_vmcb() 205 vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK; in avic_init_vmcb() 312 u32 icrh = svm->vmcb->control.exit_info_1 >> 32; in avic_incomplete_ipi_interception() 313 u32 icrl = svm->vmcb->control.exit_info_1; in avic_incomplete_ipi_interception() [all …]
|
D | svm_onhyperv.c | 30 hve = (struct hv_enlightenments *)to_svm(vcpu)->vmcb->control.reserved_sw; in svm_hv_enable_direct_tlbflush() 36 vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS); in svm_hv_enable_direct_tlbflush()
|
D | sev.c | 554 struct vmcb_save_area *save = &svm->vmcb->save; in sev_es_sync_vmsa() 2061 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa); in dump_ghcb() 2094 struct vmcb_control_area *control = &svm->vmcb->control; in sev_es_sync_from_ghcb() 2119 svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb); in sev_es_sync_from_ghcb() 2304 if (sd->sev_vmcbs[asid] == svm->vmcb && in pre_sev_run() 2308 sd->sev_vmcbs[asid] = svm->vmcb; in pre_sev_run() 2309 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; in pre_sev_run() 2310 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); in pre_sev_run() 2316 struct vmcb_control_area *control = &svm->vmcb->control; in setup_vmgexit_scratch() 2396 svm->vmcb->control.ghcb_gpa &= ~(mask << pos); in set_ghcb_msr_bits() [all …]
|
/Linux-v5.15/tools/testing/selftests/kvm/lib/x86_64/ |
D | svm.c | 36 svm->vmcb = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_svm() 37 svm->vmcb_hva = addr_gva2hva(vm, (uintptr_t)svm->vmcb); in vcpu_alloc_svm() 38 svm->vmcb_gpa = addr_gva2gpa(vm, (uintptr_t)svm->vmcb); in vcpu_alloc_svm() 59 struct vmcb *vmcb = svm->vmcb; in generic_svm_setup() local 61 struct vmcb_save_area *save = &vmcb->save; in generic_svm_setup() 62 struct vmcb_control_area *ctrl = &vmcb->control; in generic_svm_setup() 73 memset(vmcb, 0, sizeof(*vmcb)); in generic_svm_setup() 96 vmcb->save.rip = (u64)guest_rip; in generic_svm_setup() 97 vmcb->save.rsp = (u64)guest_rsp; in generic_svm_setup() 128 void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa) in run_guest() argument [all …]
|
/Linux-v5.15/tools/testing/selftests/kvm/x86_64/ |
D | svm_int_ctl_test.c | 66 struct vmcb *vmcb = svm->vmcb; in l1_guest_code() local 75 vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; in l1_guest_code() 78 vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR | INTERCEPT_VINTR); in l1_guest_code() 81 vmcb->control.int_ctl |= V_IRQ_MASK | (0x1 << V_INTR_PRIO_SHIFT); in l1_guest_code() 82 vmcb->control.int_vector = VINTR_IRQ_NUMBER; in l1_guest_code() 84 run_guest(vmcb, svm->vmcb_gpa); in l1_guest_code() 85 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); in l1_guest_code()
|
D | svm_vmcall_test.c | 28 struct vmcb *vmcb = svm->vmcb; in l1_guest_code() local 34 run_guest(vmcb, svm->vmcb_gpa); in l1_guest_code() 36 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); in l1_guest_code()
|
D | state_test.c | 39 struct vmcb *vmcb = svm->vmcb; in svm_l1_guest_code() local 47 run_guest(vmcb, svm->vmcb_gpa); in svm_l1_guest_code() 48 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); in svm_l1_guest_code() 50 vmcb->save.rip += 3; in svm_l1_guest_code() 51 run_guest(vmcb, svm->vmcb_gpa); in svm_l1_guest_code() 52 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); in svm_l1_guest_code()
|
D | smm_test.c | 107 run_guest(svm->vmcb, svm->vmcb_gpa); in guest_code() 108 svm->vmcb->save.rip += 3; in guest_code() 109 run_guest(svm->vmcb, svm->vmcb_gpa); in guest_code()
|
/Linux-v5.15/tools/testing/selftests/kvm/include/x86_64/ |
D | svm_util.h | 23 struct vmcb *vmcb; /* gva */ member 35 void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa);
|
D | svm.h | 209 struct __attribute__ ((__packed__)) vmcb { struct
|
/Linux-v5.15/arch/x86/kvm/ |
D | trace.h | 579 TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl, 581 TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt), 585 __field( __u64, vmcb ) 594 __entry->vmcb = vmcb; 603 __entry->rip, __entry->vmcb, __entry->nested_rip,
|
/Linux-v5.15/arch/x86/include/asm/ |
D | svm.h | 335 struct vmcb { struct
|
/Linux-v5.15/Documentation/virt/kvm/ |
D | locking.rst | 229 - tsc offset in vmcb
|