1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Kernel-based Virtual Machine driver for Linux
4   *
5   * AMD SVM support
6   *
7   * Copyright (C) 2006 Qumranet, Inc.
8   * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9   *
10   * Authors:
11   *   Yaniv Kamay  <yaniv@qumranet.com>
12   *   Avi Kivity   <avi@qumranet.com>
13   */
14  
15  #ifndef __SVM_SVM_H
16  #define __SVM_SVM_H
17  
18  #include <linux/kvm_types.h>
19  #include <linux/kvm_host.h>
20  #include <linux/bits.h>
21  
22  #include <asm/svm.h>
23  #include <asm/sev-common.h>
24  
25  #include "cpuid.h"
26  #include "kvm_cache_regs.h"
27  
28  #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
29  
30  #define	IOPM_SIZE PAGE_SIZE * 3
31  #define	MSRPM_SIZE PAGE_SIZE * 2
32  
33  #define MAX_DIRECT_ACCESS_MSRS	46
34  #define MSRPM_OFFSETS	32
35  extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
36  extern bool npt_enabled;
37  extern int nrips;
38  extern int vgif;
39  extern bool intercept_smi;
40  extern bool x2avic_enabled;
41  extern bool vnmi;
42  
43  /*
44   * Clean bits in VMCB.
45   * VMCB_ALL_CLEAN_MASK might also need to
46   * be updated if this enum is modified.
47   */
48  enum {
49  	VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
50  			    pause filter count */
51  	VMCB_PERM_MAP,   /* IOPM Base and MSRPM Base */
52  	VMCB_ASID,	 /* ASID */
53  	VMCB_INTR,	 /* int_ctl, int_vector */
54  	VMCB_NPT,        /* npt_en, nCR3, gPAT */
55  	VMCB_CR,	 /* CR0, CR3, CR4, EFER */
56  	VMCB_DR,         /* DR6, DR7 */
57  	VMCB_DT,         /* GDT, IDT */
58  	VMCB_SEG,        /* CS, DS, SS, ES, CPL */
59  	VMCB_CR2,        /* CR2 only */
60  	VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
61  	VMCB_AVIC,       /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
62  			  * AVIC PHYSICAL_TABLE pointer,
63  			  * AVIC LOGICAL_TABLE pointer
64  			  */
65  	VMCB_SW = 31,    /* Reserved for hypervisor/software use */
66  };
67  
68  #define VMCB_ALL_CLEAN_MASK (					\
69  	(1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) |	\
70  	(1U << VMCB_ASID) | (1U << VMCB_INTR) |			\
71  	(1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) |	\
72  	(1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) |	\
73  	(1U << VMCB_LBR) | (1U << VMCB_AVIC) |			\
74  	(1U << VMCB_SW))
75  
76  /* TPR and CR2 are always written before VMRUN */
77  #define VMCB_ALWAYS_DIRTY_MASK	((1U << VMCB_INTR) | (1U << VMCB_CR2))
78  
79  struct kvm_sev_info {
80  	bool active;		/* SEV enabled guest */
81  	bool es_active;		/* SEV-ES enabled guest */
82  	unsigned int asid;	/* ASID used for this guest */
83  	unsigned int handle;	/* SEV firmware handle */
84  	int fd;			/* SEV device fd */
85  	unsigned long pages_locked; /* Number of pages locked */
86  	struct list_head regions_list;  /* List of registered regions */
87  	u64 ap_jump_table;	/* SEV-ES AP Jump Table address */
88  	struct kvm *enc_context_owner; /* Owner of copied encryption context */
89  	struct list_head mirror_vms; /* List of VMs mirroring */
90  	struct list_head mirror_entry; /* Use as a list entry of mirrors */
91  	struct misc_cg *misc_cg; /* For misc cgroup accounting */
92  	atomic_t migration_in_progress;
93  };
94  
95  struct kvm_svm {
96  	struct kvm kvm;
97  
98  	/* Struct members for AVIC */
99  	u32 avic_vm_id;
100  	struct page *avic_logical_id_table_page;
101  	struct page *avic_physical_id_table_page;
102  	struct hlist_node hnode;
103  
104  	struct kvm_sev_info sev_info;
105  };
106  
107  struct kvm_vcpu;
108  
109  struct kvm_vmcb_info {
110  	struct vmcb *ptr;
111  	unsigned long pa;
112  	int cpu;
113  	uint64_t asid_generation;
114  };
115  
116  struct vmcb_save_area_cached {
117  	u64 efer;
118  	u64 cr4;
119  	u64 cr3;
120  	u64 cr0;
121  	u64 dr7;
122  	u64 dr6;
123  };
124  
125  struct vmcb_ctrl_area_cached {
126  	u32 intercepts[MAX_INTERCEPT];
127  	u16 pause_filter_thresh;
128  	u16 pause_filter_count;
129  	u64 iopm_base_pa;
130  	u64 msrpm_base_pa;
131  	u64 tsc_offset;
132  	u32 asid;
133  	u8 tlb_ctl;
134  	u32 int_ctl;
135  	u32 int_vector;
136  	u32 int_state;
137  	u32 exit_code;
138  	u32 exit_code_hi;
139  	u64 exit_info_1;
140  	u64 exit_info_2;
141  	u32 exit_int_info;
142  	u32 exit_int_info_err;
143  	u64 nested_ctl;
144  	u32 event_inj;
145  	u32 event_inj_err;
146  	u64 next_rip;
147  	u64 nested_cr3;
148  	u64 virt_ext;
149  	u32 clean;
150  	union {
151  		struct hv_vmcb_enlightenments hv_enlightenments;
152  		u8 reserved_sw[32];
153  	};
154  };
155  
156  struct svm_nested_state {
157  	struct kvm_vmcb_info vmcb02;
158  	u64 hsave_msr;
159  	u64 vm_cr_msr;
160  	u64 vmcb12_gpa;
161  	u64 last_vmcb12_gpa;
162  
163  	/* These are the merged vectors */
164  	u32 *msrpm;
165  
166  	/* A VMRUN has started but has not yet been performed, so
167  	 * we cannot inject a nested vmexit yet.  */
168  	bool nested_run_pending;
169  
170  	/* cache for control fields of the guest */
171  	struct vmcb_ctrl_area_cached ctl;
172  
173  	/*
174  	 * Note: this struct is not kept up-to-date while L2 runs; it is only
175  	 * valid within nested_svm_vmrun.
176  	 */
177  	struct vmcb_save_area_cached save;
178  
179  	bool initialized;
180  
181  	/*
182  	 * Indicates whether MSR bitmap for L2 needs to be rebuilt due to
183  	 * changes in MSR bitmap for L1 or switching to a different L2. Note,
184  	 * this flag can only be used reliably in conjunction with a paravirt L1
185  	 * which informs L0 whether any changes to MSR bitmap for L2 were done
186  	 * on its side.
187  	 */
188  	bool force_msr_bitmap_recalc;
189  };
190  
191  struct vcpu_sev_es_state {
192  	/* SEV-ES support */
193  	struct sev_es_save_area *vmsa;
194  	struct ghcb *ghcb;
195  	u8 valid_bitmap[16];
196  	struct kvm_host_map ghcb_map;
197  	bool received_first_sipi;
198  
199  	/* SEV-ES scratch area support */
200  	u64 sw_scratch;
201  	void *ghcb_sa;
202  	u32 ghcb_sa_len;
203  	bool ghcb_sa_sync;
204  	bool ghcb_sa_free;
205  };
206  
207  struct vcpu_svm {
208  	struct kvm_vcpu vcpu;
209  	/* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
210  	struct vmcb *vmcb;
211  	struct kvm_vmcb_info vmcb01;
212  	struct kvm_vmcb_info *current_vmcb;
213  	u32 asid;
214  	u32 sysenter_esp_hi;
215  	u32 sysenter_eip_hi;
216  	uint64_t tsc_aux;
217  
218  	u64 msr_decfg;
219  
220  	u64 next_rip;
221  
222  	u64 spec_ctrl;
223  
224  	u64 tsc_ratio_msr;
225  	/*
226  	 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
227  	 * translated into the appropriate L2_CFG bits on the host to
228  	 * perform speculative control.
229  	 */
230  	u64 virt_spec_ctrl;
231  
232  	u32 *msrpm;
233  
234  	ulong nmi_iret_rip;
235  
236  	struct svm_nested_state nested;
237  
238  	/* NMI mask value, used when vNMI is not enabled */
239  	bool nmi_masked;
240  
241  	/*
242  	 * True when NMIs are still masked but guest IRET was just intercepted
243  	 * and KVM is waiting for RIP to change, which will signal that the
244  	 * intercepted IRET was retired and thus NMI can be unmasked.
245  	 */
246  	bool awaiting_iret_completion;
247  
248  	/*
249  	 * Set when KVM is awaiting IRET completion and needs to inject NMIs as
250  	 * soon as the IRET completes (e.g. NMI is pending injection).  KVM
251  	 * temporarily steals RFLAGS.TF to single-step the guest in this case
252  	 * in order to regain control as soon as the NMI-blocking condition
253  	 * goes away.
254  	 */
255  	bool nmi_singlestep;
256  	u64 nmi_singlestep_guest_rflags;
257  
258  	bool nmi_l1_to_l2;
259  
260  	unsigned long soft_int_csbase;
261  	unsigned long soft_int_old_rip;
262  	unsigned long soft_int_next_rip;
263  	bool soft_int_injected;
264  
265  	u32 ldr_reg;
266  	u32 dfr_reg;
267  	struct page *avic_backing_page;
268  	u64 *avic_physical_id_cache;
269  
270  	/*
271  	 * Per-vcpu list of struct amd_svm_iommu_ir:
272  	 * This is used mainly to store interrupt remapping information used
273  	 * when update the vcpu affinity. This avoids the need to scan for
274  	 * IRTE and try to match ga_tag in the IOMMU driver.
275  	 */
276  	struct list_head ir_list;
277  	spinlock_t ir_list_lock;
278  
279  	/* Save desired MSR intercept (read: pass-through) state */
280  	struct {
281  		DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
282  		DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
283  	} shadow_msr_intercept;
284  
285  	struct vcpu_sev_es_state sev_es;
286  
287  	bool guest_state_loaded;
288  
289  	bool x2avic_msrs_intercepted;
290  
291  	/* Guest GIF value, used when vGIF is not enabled */
292  	bool guest_gif;
293  };
294  
295  struct svm_cpu_data {
296  	u64 asid_generation;
297  	u32 max_asid;
298  	u32 next_asid;
299  	u32 min_asid;
300  
301  	struct page *save_area;
302  	unsigned long save_area_pa;
303  
304  	struct vmcb *current_vmcb;
305  
306  	/* index = sev_asid, value = vmcb pointer */
307  	struct vmcb **sev_vmcbs;
308  };
309  
310  DECLARE_PER_CPU(struct svm_cpu_data, svm_data);
311  
312  void recalc_intercepts(struct vcpu_svm *svm);
313  
to_kvm_svm(struct kvm * kvm)314  static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
315  {
316  	return container_of(kvm, struct kvm_svm, kvm);
317  }
318  
sev_guest(struct kvm * kvm)319  static __always_inline bool sev_guest(struct kvm *kvm)
320  {
321  #ifdef CONFIG_KVM_AMD_SEV
322  	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
323  
324  	return sev->active;
325  #else
326  	return false;
327  #endif
328  }
329  
sev_es_guest(struct kvm * kvm)330  static __always_inline bool sev_es_guest(struct kvm *kvm)
331  {
332  #ifdef CONFIG_KVM_AMD_SEV
333  	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
334  
335  	return sev->es_active && !WARN_ON_ONCE(!sev->active);
336  #else
337  	return false;
338  #endif
339  }
340  
vmcb_mark_all_dirty(struct vmcb * vmcb)341  static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
342  {
343  	vmcb->control.clean = 0;
344  }
345  
vmcb_mark_all_clean(struct vmcb * vmcb)346  static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
347  {
348  	vmcb->control.clean = VMCB_ALL_CLEAN_MASK
349  			       & ~VMCB_ALWAYS_DIRTY_MASK;
350  }
351  
vmcb_mark_dirty(struct vmcb * vmcb,int bit)352  static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
353  {
354  	vmcb->control.clean &= ~(1 << bit);
355  }
356  
vmcb_is_dirty(struct vmcb * vmcb,int bit)357  static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
358  {
359          return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
360  }
361  
to_svm(struct kvm_vcpu * vcpu)362  static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
363  {
364  	return container_of(vcpu, struct vcpu_svm, vcpu);
365  }
366  
367  /*
368   * Only the PDPTRs are loaded on demand into the shadow MMU.  All other
369   * fields are synchronized on VM-Exit, because accessing the VMCB is cheap.
370   *
371   * CR3 might be out of date in the VMCB but it is not marked dirty; instead,
372   * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3
373   * is changed.  svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB.
374   */
375  #define SVM_REGS_LAZY_LOAD_SET	(1 << VCPU_EXREG_PDPTR)
376  
vmcb_set_intercept(struct vmcb_control_area * control,u32 bit)377  static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
378  {
379  	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
380  	__set_bit(bit, (unsigned long *)&control->intercepts);
381  }
382  
vmcb_clr_intercept(struct vmcb_control_area * control,u32 bit)383  static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
384  {
385  	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
386  	__clear_bit(bit, (unsigned long *)&control->intercepts);
387  }
388  
vmcb_is_intercept(struct vmcb_control_area * control,u32 bit)389  static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
390  {
391  	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
392  	return test_bit(bit, (unsigned long *)&control->intercepts);
393  }
394  
vmcb12_is_intercept(struct vmcb_ctrl_area_cached * control,u32 bit)395  static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
396  {
397  	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
398  	return test_bit(bit, (unsigned long *)&control->intercepts);
399  }
400  
set_exception_intercept(struct vcpu_svm * svm,u32 bit)401  static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
402  {
403  	struct vmcb *vmcb = svm->vmcb01.ptr;
404  
405  	WARN_ON_ONCE(bit >= 32);
406  	vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
407  
408  	recalc_intercepts(svm);
409  }
410  
clr_exception_intercept(struct vcpu_svm * svm,u32 bit)411  static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
412  {
413  	struct vmcb *vmcb = svm->vmcb01.ptr;
414  
415  	WARN_ON_ONCE(bit >= 32);
416  	vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
417  
418  	recalc_intercepts(svm);
419  }
420  
svm_set_intercept(struct vcpu_svm * svm,int bit)421  static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
422  {
423  	struct vmcb *vmcb = svm->vmcb01.ptr;
424  
425  	vmcb_set_intercept(&vmcb->control, bit);
426  
427  	recalc_intercepts(svm);
428  }
429  
svm_clr_intercept(struct vcpu_svm * svm,int bit)430  static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
431  {
432  	struct vmcb *vmcb = svm->vmcb01.ptr;
433  
434  	vmcb_clr_intercept(&vmcb->control, bit);
435  
436  	recalc_intercepts(svm);
437  }
438  
svm_is_intercept(struct vcpu_svm * svm,int bit)439  static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
440  {
441  	return vmcb_is_intercept(&svm->vmcb->control, bit);
442  }
443  
nested_vgif_enabled(struct vcpu_svm * svm)444  static inline bool nested_vgif_enabled(struct vcpu_svm *svm)
445  {
446  	return guest_can_use(&svm->vcpu, X86_FEATURE_VGIF) &&
447  	       (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK);
448  }
449  
get_vgif_vmcb(struct vcpu_svm * svm)450  static inline struct vmcb *get_vgif_vmcb(struct vcpu_svm *svm)
451  {
452  	if (!vgif)
453  		return NULL;
454  
455  	if (is_guest_mode(&svm->vcpu) && !nested_vgif_enabled(svm))
456  		return svm->nested.vmcb02.ptr;
457  	else
458  		return svm->vmcb01.ptr;
459  }
460  
enable_gif(struct vcpu_svm * svm)461  static inline void enable_gif(struct vcpu_svm *svm)
462  {
463  	struct vmcb *vmcb = get_vgif_vmcb(svm);
464  
465  	if (vmcb)
466  		vmcb->control.int_ctl |= V_GIF_MASK;
467  	else
468  		svm->guest_gif = true;
469  }
470  
disable_gif(struct vcpu_svm * svm)471  static inline void disable_gif(struct vcpu_svm *svm)
472  {
473  	struct vmcb *vmcb = get_vgif_vmcb(svm);
474  
475  	if (vmcb)
476  		vmcb->control.int_ctl &= ~V_GIF_MASK;
477  	else
478  		svm->guest_gif = false;
479  }
480  
gif_set(struct vcpu_svm * svm)481  static inline bool gif_set(struct vcpu_svm *svm)
482  {
483  	struct vmcb *vmcb = get_vgif_vmcb(svm);
484  
485  	if (vmcb)
486  		return !!(vmcb->control.int_ctl & V_GIF_MASK);
487  	else
488  		return svm->guest_gif;
489  }
490  
nested_npt_enabled(struct vcpu_svm * svm)491  static inline bool nested_npt_enabled(struct vcpu_svm *svm)
492  {
493  	return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
494  }
495  
nested_vnmi_enabled(struct vcpu_svm * svm)496  static inline bool nested_vnmi_enabled(struct vcpu_svm *svm)
497  {
498  	return guest_can_use(&svm->vcpu, X86_FEATURE_VNMI) &&
499  	       (svm->nested.ctl.int_ctl & V_NMI_ENABLE_MASK);
500  }
501  
is_x2apic_msrpm_offset(u32 offset)502  static inline bool is_x2apic_msrpm_offset(u32 offset)
503  {
504  	/* 4 msrs per u8, and 4 u8 in u32 */
505  	u32 msr = offset * 16;
506  
507  	return (msr >= APIC_BASE_MSR) &&
508  	       (msr < (APIC_BASE_MSR + 0x100));
509  }
510  
get_vnmi_vmcb_l1(struct vcpu_svm * svm)511  static inline struct vmcb *get_vnmi_vmcb_l1(struct vcpu_svm *svm)
512  {
513  	if (!vnmi)
514  		return NULL;
515  
516  	if (is_guest_mode(&svm->vcpu))
517  		return NULL;
518  	else
519  		return svm->vmcb01.ptr;
520  }
521  
is_vnmi_enabled(struct vcpu_svm * svm)522  static inline bool is_vnmi_enabled(struct vcpu_svm *svm)
523  {
524  	struct vmcb *vmcb = get_vnmi_vmcb_l1(svm);
525  
526  	if (vmcb)
527  		return !!(vmcb->control.int_ctl & V_NMI_ENABLE_MASK);
528  	else
529  		return false;
530  }
531  
532  /* svm.c */
533  #define MSR_INVALID				0xffffffffU
534  
535  #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
536  
537  extern bool dump_invalid_vmcb;
538  
539  u32 svm_msrpm_offset(u32 msr);
540  u32 *svm_vcpu_alloc_msrpm(void);
541  void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
542  void svm_vcpu_free_msrpm(u32 *msrpm);
543  void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
544  void svm_update_lbrv(struct kvm_vcpu *vcpu);
545  
546  int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
547  void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
548  void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
549  void disable_nmi_singlestep(struct vcpu_svm *svm);
550  bool svm_smi_blocked(struct kvm_vcpu *vcpu);
551  bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
552  bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
553  void svm_set_gif(struct vcpu_svm *svm, bool value);
554  int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
555  void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
556  			  int read, int write);
557  void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool disable);
558  void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
559  				     int trig_mode, int vec);
560  
561  /* nested.c */
562  
563  #define NESTED_EXIT_HOST	0	/* Exit handled on host level */
564  #define NESTED_EXIT_DONE	1	/* Exit caused nested vmexit  */
565  #define NESTED_EXIT_CONTINUE	2	/* Further checks needed      */
566  
nested_svm_virtualize_tpr(struct kvm_vcpu * vcpu)567  static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
568  {
569  	struct vcpu_svm *svm = to_svm(vcpu);
570  
571  	return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
572  }
573  
nested_exit_on_smi(struct vcpu_svm * svm)574  static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
575  {
576  	return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
577  }
578  
nested_exit_on_intr(struct vcpu_svm * svm)579  static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
580  {
581  	return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
582  }
583  
nested_exit_on_nmi(struct vcpu_svm * svm)584  static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
585  {
586  	return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
587  }
588  
589  int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
590  			 u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
591  void svm_leave_nested(struct kvm_vcpu *vcpu);
592  void svm_free_nested(struct vcpu_svm *svm);
593  int svm_allocate_nested(struct vcpu_svm *svm);
594  int nested_svm_vmrun(struct kvm_vcpu *vcpu);
595  void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
596  			  struct vmcb_save_area *from_save);
597  void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
598  int nested_svm_vmexit(struct vcpu_svm *svm);
599  
nested_svm_simple_vmexit(struct vcpu_svm * svm,u32 exit_code)600  static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
601  {
602  	svm->vmcb->control.exit_code   = exit_code;
603  	svm->vmcb->control.exit_info_1 = 0;
604  	svm->vmcb->control.exit_info_2 = 0;
605  	return nested_svm_vmexit(svm);
606  }
607  
608  int nested_svm_exit_handled(struct vcpu_svm *svm);
609  int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
610  int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
611  			       bool has_error_code, u32 error_code);
612  int nested_svm_exit_special(struct vcpu_svm *svm);
613  void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
614  void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu);
615  void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
616  				       struct vmcb_control_area *control);
617  void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
618  				    struct vmcb_save_area *save);
619  void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
620  void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
621  void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
622  
623  extern struct kvm_x86_nested_ops svm_nested_ops;
624  
625  /* avic.c */
626  #define AVIC_REQUIRED_APICV_INHIBITS			\
627  (							\
628  	BIT(APICV_INHIBIT_REASON_DISABLE) |		\
629  	BIT(APICV_INHIBIT_REASON_ABSENT) |		\
630  	BIT(APICV_INHIBIT_REASON_HYPERV) |		\
631  	BIT(APICV_INHIBIT_REASON_NESTED) |		\
632  	BIT(APICV_INHIBIT_REASON_IRQWIN) |		\
633  	BIT(APICV_INHIBIT_REASON_PIT_REINJ) |		\
634  	BIT(APICV_INHIBIT_REASON_BLOCKIRQ) |		\
635  	BIT(APICV_INHIBIT_REASON_SEV)      |		\
636  	BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED) |	\
637  	BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) |	\
638  	BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED) |	\
639  	BIT(APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED)	\
640  )
641  
642  bool avic_hardware_setup(void);
643  int avic_ga_log_notifier(u32 ga_tag);
644  void avic_vm_destroy(struct kvm *kvm);
645  int avic_vm_init(struct kvm *kvm);
646  void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb);
647  int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
648  int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
649  int avic_init_vcpu(struct vcpu_svm *svm);
650  void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
651  void avic_vcpu_put(struct kvm_vcpu *vcpu);
652  void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
653  void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
654  int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
655  			uint32_t guest_irq, bool set);
656  void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
657  void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
658  void avic_ring_doorbell(struct kvm_vcpu *vcpu);
659  unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu);
660  void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu);
661  
662  
663  /* sev.c */
664  
665  #define GHCB_VERSION_MAX	1ULL
666  #define GHCB_VERSION_MIN	1ULL
667  
668  
669  extern unsigned int max_sev_asid;
670  
671  void sev_vm_destroy(struct kvm *kvm);
672  int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp);
673  int sev_mem_enc_register_region(struct kvm *kvm,
674  				struct kvm_enc_region *range);
675  int sev_mem_enc_unregister_region(struct kvm *kvm,
676  				  struct kvm_enc_region *range);
677  int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd);
678  int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
679  void sev_guest_memory_reclaimed(struct kvm *kvm);
680  
681  void pre_sev_run(struct vcpu_svm *svm, int cpu);
682  void __init sev_set_cpu_caps(void);
683  void __init sev_hardware_setup(void);
684  void sev_hardware_unsetup(void);
685  int sev_cpu_init(struct svm_cpu_data *sd);
686  void sev_init_vmcb(struct vcpu_svm *svm);
687  void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm);
688  void sev_free_vcpu(struct kvm_vcpu *vcpu);
689  int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
690  int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
691  void sev_es_vcpu_reset(struct vcpu_svm *svm);
692  void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
693  void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa);
694  void sev_es_unmap_ghcb(struct vcpu_svm *svm);
695  
696  /* vmenter.S */
697  
698  void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
699  void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
700  
701  #define DEFINE_KVM_GHCB_ACCESSORS(field)						\
702  	static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \
703  	{									\
704  		return test_bit(GHCB_BITMAP_IDX(field),				\
705  				(unsigned long *)&svm->sev_es.valid_bitmap);	\
706  	}									\
707  										\
708  	static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm, struct ghcb *ghcb) \
709  	{									\
710  		return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0;	\
711  	}									\
712  
713  DEFINE_KVM_GHCB_ACCESSORS(cpl)
714  DEFINE_KVM_GHCB_ACCESSORS(rax)
715  DEFINE_KVM_GHCB_ACCESSORS(rcx)
716  DEFINE_KVM_GHCB_ACCESSORS(rdx)
717  DEFINE_KVM_GHCB_ACCESSORS(rbx)
718  DEFINE_KVM_GHCB_ACCESSORS(rsi)
719  DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code)
720  DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1)
721  DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2)
722  DEFINE_KVM_GHCB_ACCESSORS(sw_scratch)
723  DEFINE_KVM_GHCB_ACCESSORS(xcr0)
724  
725  #endif
726