Lines Matching refs:svm

564 static int sev_es_sync_vmsa(struct vcpu_svm *svm)  in sev_es_sync_vmsa()  argument
566 struct sev_es_save_area *save = svm->sev_es.vmsa; in sev_es_sync_vmsa()
569 if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1)) in sev_es_sync_vmsa()
578 memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save)); in sev_es_sync_vmsa()
581 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX]; in sev_es_sync_vmsa()
582 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX]; in sev_es_sync_vmsa()
583 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; in sev_es_sync_vmsa()
584 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX]; in sev_es_sync_vmsa()
585 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP]; in sev_es_sync_vmsa()
586 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP]; in sev_es_sync_vmsa()
587 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI]; in sev_es_sync_vmsa()
588 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI]; in sev_es_sync_vmsa()
590 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8]; in sev_es_sync_vmsa()
591 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9]; in sev_es_sync_vmsa()
592 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10]; in sev_es_sync_vmsa()
593 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11]; in sev_es_sync_vmsa()
594 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12]; in sev_es_sync_vmsa()
595 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13]; in sev_es_sync_vmsa()
596 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14]; in sev_es_sync_vmsa()
597 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15]; in sev_es_sync_vmsa()
599 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP]; in sev_es_sync_vmsa()
602 save->xcr0 = svm->vcpu.arch.xcr0; in sev_es_sync_vmsa()
603 save->pkru = svm->vcpu.arch.pkru; in sev_es_sync_vmsa()
604 save->xss = svm->vcpu.arch.ia32_xss; in sev_es_sync_vmsa()
605 save->dr6 = svm->vcpu.arch.dr6; in sev_es_sync_vmsa()
617 struct vcpu_svm *svm = to_svm(vcpu); in __sev_launch_update_vmsa() local
621 ret = sev_es_sync_vmsa(svm); in __sev_launch_update_vmsa()
630 clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); in __sev_launch_update_vmsa()
634 vmsa.address = __sme_pa(svm->sev_es.vmsa); in __sev_launch_update_vmsa()
2332 struct vcpu_svm *svm; in sev_free_vcpu() local
2337 svm = to_svm(vcpu); in sev_free_vcpu()
2340 sev_flush_encrypted_page(vcpu, svm->sev_es.vmsa); in sev_free_vcpu()
2342 __free_page(virt_to_page(svm->sev_es.vmsa)); in sev_free_vcpu()
2344 if (svm->sev_es.ghcb_sa_free) in sev_free_vcpu()
2345 kvfree(svm->sev_es.ghcb_sa); in sev_free_vcpu()
2348 static void dump_ghcb(struct vcpu_svm *svm) in dump_ghcb() argument
2350 struct ghcb *ghcb = svm->sev_es.ghcb; in dump_ghcb()
2361 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa); in dump_ghcb()
2373 static void sev_es_sync_to_ghcb(struct vcpu_svm *svm) in sev_es_sync_to_ghcb() argument
2375 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_sync_to_ghcb()
2376 struct ghcb *ghcb = svm->sev_es.ghcb; in sev_es_sync_to_ghcb()
2392 static void sev_es_sync_from_ghcb(struct vcpu_svm *svm) in sev_es_sync_from_ghcb() argument
2394 struct vmcb_control_area *control = &svm->vmcb->control; in sev_es_sync_from_ghcb()
2395 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_sync_from_ghcb()
2396 struct ghcb *ghcb = svm->sev_es.ghcb; in sev_es_sync_from_ghcb()
2419 svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb); in sev_es_sync_from_ghcb()
2437 static int sev_es_validate_vmgexit(struct vcpu_svm *svm) in sev_es_validate_vmgexit() argument
2444 ghcb = svm->sev_es.ghcb; in sev_es_validate_vmgexit()
2545 vcpu = &svm->vcpu; in sev_es_validate_vmgexit()
2556 dump_ghcb(svm); in sev_es_validate_vmgexit()
2569 void sev_es_unmap_ghcb(struct vcpu_svm *svm) in sev_es_unmap_ghcb() argument
2571 if (!svm->sev_es.ghcb) in sev_es_unmap_ghcb()
2574 if (svm->sev_es.ghcb_sa_free) { in sev_es_unmap_ghcb()
2580 if (svm->sev_es.ghcb_sa_sync) { in sev_es_unmap_ghcb()
2581 kvm_write_guest(svm->vcpu.kvm, in sev_es_unmap_ghcb()
2582 ghcb_get_sw_scratch(svm->sev_es.ghcb), in sev_es_unmap_ghcb()
2583 svm->sev_es.ghcb_sa, in sev_es_unmap_ghcb()
2584 svm->sev_es.ghcb_sa_len); in sev_es_unmap_ghcb()
2585 svm->sev_es.ghcb_sa_sync = false; in sev_es_unmap_ghcb()
2588 kvfree(svm->sev_es.ghcb_sa); in sev_es_unmap_ghcb()
2589 svm->sev_es.ghcb_sa = NULL; in sev_es_unmap_ghcb()
2590 svm->sev_es.ghcb_sa_free = false; in sev_es_unmap_ghcb()
2593 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb); in sev_es_unmap_ghcb()
2595 sev_es_sync_to_ghcb(svm); in sev_es_unmap_ghcb()
2597 kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map, true); in sev_es_unmap_ghcb()
2598 svm->sev_es.ghcb = NULL; in sev_es_unmap_ghcb()
2601 void pre_sev_run(struct vcpu_svm *svm, int cpu) in pre_sev_run() argument
2604 int asid = sev_get_asid(svm->vcpu.kvm); in pre_sev_run()
2607 svm->asid = asid; in pre_sev_run()
2615 if (sd->sev_vmcbs[asid] == svm->vmcb && in pre_sev_run()
2616 svm->vcpu.arch.last_vmentry_cpu == cpu) in pre_sev_run()
2619 sd->sev_vmcbs[asid] = svm->vmcb; in pre_sev_run()
2620 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; in pre_sev_run()
2621 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); in pre_sev_run()
2625 static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) in setup_vmgexit_scratch() argument
2627 struct vmcb_control_area *control = &svm->vmcb->control; in setup_vmgexit_scratch()
2628 struct ghcb *ghcb = svm->sev_es.ghcb; in setup_vmgexit_scratch()
2664 scratch_va = (void *)svm->sev_es.ghcb; in setup_vmgexit_scratch()
2680 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) { in setup_vmgexit_scratch()
2694 svm->sev_es.ghcb_sa_sync = sync; in setup_vmgexit_scratch()
2695 svm->sev_es.ghcb_sa_free = true; in setup_vmgexit_scratch()
2698 svm->sev_es.ghcb_sa = scratch_va; in setup_vmgexit_scratch()
2699 svm->sev_es.ghcb_sa_len = len; in setup_vmgexit_scratch()
2710 static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask, in set_ghcb_msr_bits() argument
2713 svm->vmcb->control.ghcb_gpa &= ~(mask << pos); in set_ghcb_msr_bits()
2714 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos; in set_ghcb_msr_bits()
2717 static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos) in get_ghcb_msr_bits() argument
2719 return (svm->vmcb->control.ghcb_gpa >> pos) & mask; in get_ghcb_msr_bits()
2722 static void set_ghcb_msr(struct vcpu_svm *svm, u64 value) in set_ghcb_msr() argument
2724 svm->vmcb->control.ghcb_gpa = value; in set_ghcb_msr()
2727 static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm) in sev_handle_vmgexit_msr_protocol() argument
2729 struct vmcb_control_area *control = &svm->vmcb->control; in sev_handle_vmgexit_msr_protocol()
2730 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_handle_vmgexit_msr_protocol()
2736 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id, in sev_handle_vmgexit_msr_protocol()
2741 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX, in sev_handle_vmgexit_msr_protocol()
2748 cpuid_fn = get_ghcb_msr_bits(svm, in sev_handle_vmgexit_msr_protocol()
2762 cpuid_reg = get_ghcb_msr_bits(svm, in sev_handle_vmgexit_msr_protocol()
2774 set_ghcb_msr_bits(svm, cpuid_value, in sev_handle_vmgexit_msr_protocol()
2778 set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP, in sev_handle_vmgexit_msr_protocol()
2786 reason_set = get_ghcb_msr_bits(svm, in sev_handle_vmgexit_msr_protocol()
2789 reason_code = get_ghcb_msr_bits(svm, in sev_handle_vmgexit_msr_protocol()
2807 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id, in sev_handle_vmgexit_msr_protocol()
2815 struct vcpu_svm *svm = to_svm(vcpu); in sev_handle_vmgexit() local
2816 struct vmcb_control_area *control = &svm->vmcb->control; in sev_handle_vmgexit()
2824 return sev_handle_vmgexit_msr_protocol(svm); in sev_handle_vmgexit()
2833 if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { in sev_handle_vmgexit()
2842 svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; in sev_handle_vmgexit()
2843 ghcb = svm->sev_es.ghcb_map.hva; in sev_handle_vmgexit()
2849 ret = sev_es_validate_vmgexit(svm); in sev_handle_vmgexit()
2853 sev_es_sync_from_ghcb(svm); in sev_handle_vmgexit()
2859 ret = setup_vmgexit_scratch(svm, true, control->exit_info_2); in sev_handle_vmgexit()
2866 svm->sev_es.ghcb_sa); in sev_handle_vmgexit()
2869 ret = setup_vmgexit_scratch(svm, false, control->exit_info_2); in sev_handle_vmgexit()
2876 svm->sev_es.ghcb_sa); in sev_handle_vmgexit()
2919 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in) in sev_es_string_io() argument
2925 if (svm->vmcb->control.exit_info_2 > INT_MAX) in sev_es_string_io()
2928 count = svm->vmcb->control.exit_info_2; in sev_es_string_io()
2932 r = setup_vmgexit_scratch(svm, in, bytes); in sev_es_string_io()
2936 return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa, in sev_es_string_io()
2940 static void sev_es_init_vmcb(struct vcpu_svm *svm) in sev_es_init_vmcb() argument
2942 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_init_vmcb()
2944 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE; in sev_es_init_vmcb()
2945 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; in sev_es_init_vmcb()
2952 svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa); in sev_es_init_vmcb()
2955 svm_clr_intercept(svm, INTERCEPT_CR0_READ); in sev_es_init_vmcb()
2956 svm_clr_intercept(svm, INTERCEPT_CR4_READ); in sev_es_init_vmcb()
2957 svm_clr_intercept(svm, INTERCEPT_CR8_READ); in sev_es_init_vmcb()
2958 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE); in sev_es_init_vmcb()
2959 svm_clr_intercept(svm, INTERCEPT_CR4_WRITE); in sev_es_init_vmcb()
2960 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE); in sev_es_init_vmcb()
2962 svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0); in sev_es_init_vmcb()
2965 svm_set_intercept(svm, TRAP_EFER_WRITE); in sev_es_init_vmcb()
2966 svm_set_intercept(svm, TRAP_CR0_WRITE); in sev_es_init_vmcb()
2967 svm_set_intercept(svm, TRAP_CR4_WRITE); in sev_es_init_vmcb()
2968 svm_set_intercept(svm, TRAP_CR8_WRITE); in sev_es_init_vmcb()
2971 clr_exception_intercept(svm, GP_VECTOR); in sev_es_init_vmcb()
2974 svm_clr_intercept(svm, INTERCEPT_XSETBV); in sev_es_init_vmcb()
2977 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1); in sev_es_init_vmcb()
2978 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1); in sev_es_init_vmcb()
2979 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1); in sev_es_init_vmcb()
2980 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1); in sev_es_init_vmcb()
2981 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1); in sev_es_init_vmcb()
2982 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1); in sev_es_init_vmcb()
2985 (guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDTSCP) || in sev_es_init_vmcb()
2986 guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDPID))) { in sev_es_init_vmcb()
2987 set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, 1, 1); in sev_es_init_vmcb()
2988 if (guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDTSCP)) in sev_es_init_vmcb()
2989 svm_clr_intercept(svm, INTERCEPT_RDTSCP); in sev_es_init_vmcb()
2993 void sev_init_vmcb(struct vcpu_svm *svm) in sev_init_vmcb() argument
2995 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; in sev_init_vmcb()
2996 clr_exception_intercept(svm, UD_VECTOR); in sev_init_vmcb()
2998 if (sev_es_guest(svm->vcpu.kvm)) in sev_init_vmcb()
2999 sev_es_init_vmcb(svm); in sev_init_vmcb()
3002 void sev_es_vcpu_reset(struct vcpu_svm *svm) in sev_es_vcpu_reset() argument
3008 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX, in sev_es_vcpu_reset()
3034 struct vcpu_svm *svm = to_svm(vcpu); in sev_vcpu_deliver_sipi_vector() local
3037 if (!svm->sev_es.received_first_sipi) { in sev_vcpu_deliver_sipi_vector()
3038 svm->sev_es.received_first_sipi = true; in sev_vcpu_deliver_sipi_vector()
3047 if (!svm->sev_es.ghcb) in sev_vcpu_deliver_sipi_vector()
3050 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1); in sev_vcpu_deliver_sipi_vector()