1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/include/asm/kvm_host.h:
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #ifndef __ARM64_KVM_HOST_H__
12 #define __ARM64_KVM_HOST_H__
13 
14 #include <linux/arm-smccc.h>
15 #include <linux/bitmap.h>
16 #include <linux/types.h>
17 #include <linux/jump_label.h>
18 #include <linux/kvm_types.h>
19 #include <linux/percpu.h>
20 #include <asm/arch_gicv3.h>
21 #include <asm/barrier.h>
22 #include <asm/cpufeature.h>
23 #include <asm/cputype.h>
24 #include <asm/daifflags.h>
25 #include <asm/fpsimd.h>
26 #include <asm/kvm.h>
27 #include <asm/kvm_asm.h>
28 #include <asm/thread_info.h>
29 
30 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
31 
32 #define KVM_USER_MEM_SLOTS 512
33 #define KVM_HALT_POLL_NS_DEFAULT 500000
34 
35 #include <kvm/arm_vgic.h>
36 #include <kvm/arm_arch_timer.h>
37 #include <kvm/arm_pmu.h>
38 
39 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
40 
41 #define KVM_VCPU_MAX_FEATURES 7
42 
43 #define KVM_REQ_SLEEP \
44 	KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
45 #define KVM_REQ_IRQ_PENDING	KVM_ARCH_REQ(1)
46 #define KVM_REQ_VCPU_RESET	KVM_ARCH_REQ(2)
47 #define KVM_REQ_RECORD_STEAL	KVM_ARCH_REQ(3)
48 #define KVM_REQ_RELOAD_GICv4	KVM_ARCH_REQ(4)
49 
50 #define KVM_DIRTY_LOG_MANUAL_CAPS   (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
51 				     KVM_DIRTY_LOG_INITIALLY_SET)
52 
53 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
54 
55 extern unsigned int kvm_sve_max_vl;
56 int kvm_arm_init_sve(void);
57 
58 int __attribute_const__ kvm_target_cpu(void);
59 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
60 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
61 int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext);
62 void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
63 
64 struct kvm_vmid {
65 	/* The VMID generation used for the virt. memory system */
66 	u64    vmid_gen;
67 	u32    vmid;
68 };
69 
70 struct kvm_s2_mmu {
71 	struct kvm_vmid vmid;
72 
73 	/*
74 	 * stage2 entry level table
75 	 *
76 	 * Two kvm_s2_mmu structures in the same VM can point to the same
77 	 * pgd here.  This happens when running a guest using a
78 	 * translation regime that isn't affected by its own stage-2
79 	 * translation, such as a non-VHE hypervisor running at vEL2, or
80 	 * for vEL1/EL0 with vHCR_EL2.VM == 0.  In that case, we use the
81 	 * canonical stage-2 page tables.
82 	 */
83 	phys_addr_t	pgd_phys;
84 	struct kvm_pgtable *pgt;
85 
86 	/* The last vcpu id that ran on each physical CPU */
87 	int __percpu *last_vcpu_ran;
88 
89 	struct kvm *kvm;
90 };
91 
92 struct kvm_arch {
93 	struct kvm_s2_mmu mmu;
94 
95 	/* VTCR_EL2 value for this VM */
96 	u64    vtcr;
97 
98 	/* The maximum number of vCPUs depends on the used GIC model */
99 	int max_vcpus;
100 
101 	/* Interrupt controller */
102 	struct vgic_dist	vgic;
103 
104 	/* Mandated version of PSCI */
105 	u32 psci_version;
106 
107 	/*
108 	 * If we encounter a data abort without valid instruction syndrome
109 	 * information, report this to user space.  User space can (and
110 	 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
111 	 * supported.
112 	 */
113 	bool return_nisv_io_abort_to_user;
114 
115 	/*
116 	 * VM-wide PMU filter, implemented as a bitmap and big enough for
117 	 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
118 	 */
119 	unsigned long *pmu_filter;
120 	unsigned int pmuver;
121 
122 	u8 pfr0_csv2;
123 };
124 
125 struct kvm_vcpu_fault_info {
126 	u32 esr_el2;		/* Hyp Syndrom Register */
127 	u64 far_el2;		/* Hyp Fault Address Register */
128 	u64 hpfar_el2;		/* Hyp IPA Fault Address Register */
129 	u64 disr_el1;		/* Deferred [SError] Status Register */
130 };
131 
132 enum vcpu_sysreg {
133 	__INVALID_SYSREG__,   /* 0 is reserved as an invalid value */
134 	MPIDR_EL1,	/* MultiProcessor Affinity Register */
135 	CSSELR_EL1,	/* Cache Size Selection Register */
136 	SCTLR_EL1,	/* System Control Register */
137 	ACTLR_EL1,	/* Auxiliary Control Register */
138 	CPACR_EL1,	/* Coprocessor Access Control */
139 	ZCR_EL1,	/* SVE Control */
140 	TTBR0_EL1,	/* Translation Table Base Register 0 */
141 	TTBR1_EL1,	/* Translation Table Base Register 1 */
142 	TCR_EL1,	/* Translation Control Register */
143 	ESR_EL1,	/* Exception Syndrome Register */
144 	AFSR0_EL1,	/* Auxiliary Fault Status Register 0 */
145 	AFSR1_EL1,	/* Auxiliary Fault Status Register 1 */
146 	FAR_EL1,	/* Fault Address Register */
147 	MAIR_EL1,	/* Memory Attribute Indirection Register */
148 	VBAR_EL1,	/* Vector Base Address Register */
149 	CONTEXTIDR_EL1,	/* Context ID Register */
150 	TPIDR_EL0,	/* Thread ID, User R/W */
151 	TPIDRRO_EL0,	/* Thread ID, User R/O */
152 	TPIDR_EL1,	/* Thread ID, Privileged */
153 	AMAIR_EL1,	/* Aux Memory Attribute Indirection Register */
154 	CNTKCTL_EL1,	/* Timer Control Register (EL1) */
155 	PAR_EL1,	/* Physical Address Register */
156 	MDSCR_EL1,	/* Monitor Debug System Control Register */
157 	MDCCINT_EL1,	/* Monitor Debug Comms Channel Interrupt Enable Reg */
158 	DISR_EL1,	/* Deferred Interrupt Status Register */
159 
160 	/* Performance Monitors Registers */
161 	PMCR_EL0,	/* Control Register */
162 	PMSELR_EL0,	/* Event Counter Selection Register */
163 	PMEVCNTR0_EL0,	/* Event Counter Register (0-30) */
164 	PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
165 	PMCCNTR_EL0,	/* Cycle Counter Register */
166 	PMEVTYPER0_EL0,	/* Event Type Register (0-30) */
167 	PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
168 	PMCCFILTR_EL0,	/* Cycle Count Filter Register */
169 	PMCNTENSET_EL0,	/* Count Enable Set Register */
170 	PMINTENSET_EL1,	/* Interrupt Enable Set Register */
171 	PMOVSSET_EL0,	/* Overflow Flag Status Set Register */
172 	PMSWINC_EL0,	/* Software Increment Register */
173 	PMUSERENR_EL0,	/* User Enable Register */
174 
175 	/* Pointer Authentication Registers in a strict increasing order. */
176 	APIAKEYLO_EL1,
177 	APIAKEYHI_EL1,
178 	APIBKEYLO_EL1,
179 	APIBKEYHI_EL1,
180 	APDAKEYLO_EL1,
181 	APDAKEYHI_EL1,
182 	APDBKEYLO_EL1,
183 	APDBKEYHI_EL1,
184 	APGAKEYLO_EL1,
185 	APGAKEYHI_EL1,
186 
187 	ELR_EL1,
188 	SP_EL1,
189 	SPSR_EL1,
190 
191 	CNTVOFF_EL2,
192 	CNTV_CVAL_EL0,
193 	CNTV_CTL_EL0,
194 	CNTP_CVAL_EL0,
195 	CNTP_CTL_EL0,
196 
197 	/* 32bit specific registers. Keep them at the end of the range */
198 	DACR32_EL2,	/* Domain Access Control Register */
199 	IFSR32_EL2,	/* Instruction Fault Status Register */
200 	FPEXC32_EL2,	/* Floating-Point Exception Control Register */
201 	DBGVCR32_EL2,	/* Debug Vector Catch Register */
202 
203 	NR_SYS_REGS	/* Nothing after this line! */
204 };
205 
206 /* 32bit mapping */
207 #define c0_MPIDR	(MPIDR_EL1 * 2)	/* MultiProcessor ID Register */
208 #define c0_CSSELR	(CSSELR_EL1 * 2)/* Cache Size Selection Register */
209 #define c1_SCTLR	(SCTLR_EL1 * 2)	/* System Control Register */
210 #define c1_ACTLR	(ACTLR_EL1 * 2)	/* Auxiliary Control Register */
211 #define c1_CPACR	(CPACR_EL1 * 2)	/* Coprocessor Access Control */
212 #define c2_TTBR0	(TTBR0_EL1 * 2)	/* Translation Table Base Register 0 */
213 #define c2_TTBR0_high	(c2_TTBR0 + 1)	/* TTBR0 top 32 bits */
214 #define c2_TTBR1	(TTBR1_EL1 * 2)	/* Translation Table Base Register 1 */
215 #define c2_TTBR1_high	(c2_TTBR1 + 1)	/* TTBR1 top 32 bits */
216 #define c2_TTBCR	(TCR_EL1 * 2)	/* Translation Table Base Control R. */
217 #define c3_DACR		(DACR32_EL2 * 2)/* Domain Access Control Register */
218 #define c5_DFSR		(ESR_EL1 * 2)	/* Data Fault Status Register */
219 #define c5_IFSR		(IFSR32_EL2 * 2)/* Instruction Fault Status Register */
220 #define c5_ADFSR	(AFSR0_EL1 * 2)	/* Auxiliary Data Fault Status R */
221 #define c5_AIFSR	(AFSR1_EL1 * 2)	/* Auxiliary Instr Fault Status R */
222 #define c6_DFAR		(FAR_EL1 * 2)	/* Data Fault Address Register */
223 #define c6_IFAR		(c6_DFAR + 1)	/* Instruction Fault Address Register */
224 #define c7_PAR		(PAR_EL1 * 2)	/* Physical Address Register */
225 #define c7_PAR_high	(c7_PAR + 1)	/* PAR top 32 bits */
226 #define c10_PRRR	(MAIR_EL1 * 2)	/* Primary Region Remap Register */
227 #define c10_NMRR	(c10_PRRR + 1)	/* Normal Memory Remap Register */
228 #define c12_VBAR	(VBAR_EL1 * 2)	/* Vector Base Address Register */
229 #define c13_CID		(CONTEXTIDR_EL1 * 2)	/* Context ID Register */
230 #define c13_TID_URW	(TPIDR_EL0 * 2)	/* Thread ID, User R/W */
231 #define c13_TID_URO	(TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
232 #define c13_TID_PRIV	(TPIDR_EL1 * 2)	/* Thread ID, Privileged */
233 #define c10_AMAIR0	(AMAIR_EL1 * 2)	/* Aux Memory Attr Indirection Reg */
234 #define c10_AMAIR1	(c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
235 #define c14_CNTKCTL	(CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
236 
237 #define cp14_DBGDSCRext	(MDSCR_EL1 * 2)
238 #define cp14_DBGBCR0	(DBGBCR0_EL1 * 2)
239 #define cp14_DBGBVR0	(DBGBVR0_EL1 * 2)
240 #define cp14_DBGBXVR0	(cp14_DBGBVR0 + 1)
241 #define cp14_DBGWCR0	(DBGWCR0_EL1 * 2)
242 #define cp14_DBGWVR0	(DBGWVR0_EL1 * 2)
243 #define cp14_DBGDCCINT	(MDCCINT_EL1 * 2)
244 #define cp14_DBGVCR	(DBGVCR32_EL2 * 2)
245 
246 #define NR_COPRO_REGS	(NR_SYS_REGS * 2)
247 
248 struct kvm_cpu_context {
249 	struct user_pt_regs regs;	/* sp = sp_el0 */
250 
251 	u64	spsr_abt;
252 	u64	spsr_und;
253 	u64	spsr_irq;
254 	u64	spsr_fiq;
255 
256 	struct user_fpsimd_state fp_regs;
257 
258 	union {
259 		u64 sys_regs[NR_SYS_REGS];
260 		u32 copro[NR_COPRO_REGS];
261 	};
262 
263 	struct kvm_vcpu *__hyp_running_vcpu;
264 };
265 
266 struct kvm_pmu_events {
267 	u32 events_host;
268 	u32 events_guest;
269 };
270 
271 struct kvm_host_data {
272 	struct kvm_cpu_context host_ctxt;
273 	struct kvm_pmu_events pmu_events;
274 };
275 
276 struct vcpu_reset_state {
277 	unsigned long	pc;
278 	unsigned long	r0;
279 	bool		be;
280 	bool		reset;
281 };
282 
283 struct kvm_vcpu_arch {
284 	struct kvm_cpu_context ctxt;
285 	void *sve_state;
286 	unsigned int sve_max_vl;
287 
288 	/* Stage 2 paging state used by the hardware on next switch */
289 	struct kvm_s2_mmu *hw_mmu;
290 
291 	/* HYP configuration */
292 	u64 hcr_el2;
293 	u32 mdcr_el2;
294 
295 	/* Exception Information */
296 	struct kvm_vcpu_fault_info fault;
297 
298 	/* State of various workarounds, see kvm_asm.h for bit assignment */
299 	u64 workaround_flags;
300 
301 	/* Miscellaneous vcpu state flags */
302 	u64 flags;
303 
304 	/*
305 	 * We maintain more than a single set of debug registers to support
306 	 * debugging the guest from the host and to maintain separate host and
307 	 * guest state during world switches. vcpu_debug_state are the debug
308 	 * registers of the vcpu as the guest sees them.  host_debug_state are
309 	 * the host registers which are saved and restored during
310 	 * world switches. external_debug_state contains the debug
311 	 * values we want to debug the guest. This is set via the
312 	 * KVM_SET_GUEST_DEBUG ioctl.
313 	 *
314 	 * debug_ptr points to the set of debug registers that should be loaded
315 	 * onto the hardware when running the guest.
316 	 */
317 	struct kvm_guest_debug_arch *debug_ptr;
318 	struct kvm_guest_debug_arch vcpu_debug_state;
319 	struct kvm_guest_debug_arch external_debug_state;
320 
321 	struct thread_info *host_thread_info;	/* hyp VA */
322 	struct user_fpsimd_state *host_fpsimd_state;	/* hyp VA */
323 
324 	struct {
325 		/* {Break,watch}point registers */
326 		struct kvm_guest_debug_arch regs;
327 		/* Statistical profiling extension */
328 		u64 pmscr_el1;
329 	} host_debug_state;
330 
331 	/* VGIC state */
332 	struct vgic_cpu vgic_cpu;
333 	struct arch_timer_cpu timer_cpu;
334 	struct kvm_pmu pmu;
335 
336 	/*
337 	 * Anything that is not used directly from assembly code goes
338 	 * here.
339 	 */
340 
341 	/*
342 	 * Guest registers we preserve during guest debugging.
343 	 *
344 	 * These shadow registers are updated by the kvm_handle_sys_reg
345 	 * trap handler if the guest accesses or updates them while we
346 	 * are using guest debug.
347 	 */
348 	struct {
349 		u32	mdscr_el1;
350 	} guest_debug_preserved;
351 
352 	/* vcpu power-off state */
353 	bool power_off;
354 
355 	/* Don't run the guest (internal implementation need) */
356 	bool pause;
357 
358 	/* Cache some mmu pages needed inside spinlock regions */
359 	struct kvm_mmu_memory_cache mmu_page_cache;
360 
361 	/* Target CPU and feature flags */
362 	int target;
363 	DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
364 
365 	/* Detect first run of a vcpu */
366 	bool has_run_once;
367 
368 	/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
369 	u64 vsesr_el2;
370 
371 	/* Additional reset state */
372 	struct vcpu_reset_state	reset_state;
373 
374 	/* True when deferrable sysregs are loaded on the physical CPU,
375 	 * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */
376 	bool sysregs_loaded_on_cpu;
377 
378 	/* Guest PV state */
379 	struct {
380 		u64 last_steal;
381 		gpa_t base;
382 	} steal;
383 };
384 
385 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
386 #define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \
387 				      sve_ffr_offset((vcpu)->arch.sve_max_vl)))
388 
389 #define vcpu_sve_state_size(vcpu) ({					\
390 	size_t __size_ret;						\
391 	unsigned int __vcpu_vq;						\
392 									\
393 	if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) {		\
394 		__size_ret = 0;						\
395 	} else {							\
396 		__vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl);	\
397 		__size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq);		\
398 	}								\
399 									\
400 	__size_ret;							\
401 })
402 
403 /* vcpu_arch flags field values: */
404 #define KVM_ARM64_DEBUG_DIRTY		(1 << 0)
405 #define KVM_ARM64_FP_ENABLED		(1 << 1) /* guest FP regs loaded */
406 #define KVM_ARM64_FP_HOST		(1 << 2) /* host FP regs loaded */
407 #define KVM_ARM64_HOST_SVE_IN_USE	(1 << 3) /* backup for host TIF_SVE */
408 #define KVM_ARM64_HOST_SVE_ENABLED	(1 << 4) /* SVE enabled for EL0 */
409 #define KVM_ARM64_GUEST_HAS_SVE		(1 << 5) /* SVE exposed to guest */
410 #define KVM_ARM64_VCPU_SVE_FINALIZED	(1 << 6) /* SVE config completed */
411 #define KVM_ARM64_GUEST_HAS_PTRAUTH	(1 << 7) /* PTRAUTH exposed to guest */
412 
413 #define vcpu_has_sve(vcpu) (system_supports_sve() && \
414 			    ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
415 
416 #ifdef CONFIG_ARM64_PTR_AUTH
417 #define vcpu_has_ptrauth(vcpu)						\
418 	((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) ||		\
419 	  cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) &&		\
420 	 (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
421 #else
422 #define vcpu_has_ptrauth(vcpu)		false
423 #endif
424 
425 #define vcpu_gp_regs(v)		(&(v)->arch.ctxt.regs)
426 
427 /*
428  * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
429  * memory backed version of a register, and not the one most recently
430  * accessed by a running VCPU.  For example, for userspace access or
431  * for system registers that are never context switched, but only
432  * emulated.
433  */
434 #define __ctxt_sys_reg(c,r)	(&(c)->sys_regs[(r)])
435 
436 #define ctxt_sys_reg(c,r)	(*__ctxt_sys_reg(c,r))
437 
438 #define __vcpu_sys_reg(v,r)	(ctxt_sys_reg(&(v)->arch.ctxt, (r)))
439 
440 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
441 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
442 
443 /*
444  * CP14 and CP15 live in the same array, as they are backed by the
445  * same system registers.
446  */
447 #define CPx_BIAS		IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
448 
449 #define vcpu_cp14(v,r)		((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
450 #define vcpu_cp15(v,r)		((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
451 
452 struct kvm_vm_stat {
453 	ulong remote_tlb_flush;
454 };
455 
456 struct kvm_vcpu_stat {
457 	u64 halt_successful_poll;
458 	u64 halt_attempted_poll;
459 	u64 halt_poll_success_ns;
460 	u64 halt_poll_fail_ns;
461 	u64 halt_poll_invalid;
462 	u64 halt_wakeup;
463 	u64 hvc_exit_stat;
464 	u64 wfe_exit_stat;
465 	u64 wfi_exit_stat;
466 	u64 mmio_exit_user;
467 	u64 mmio_exit_kernel;
468 	u64 exits;
469 };
470 
471 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
472 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
473 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
474 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
475 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
476 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
477 			      struct kvm_vcpu_events *events);
478 
479 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
480 			      struct kvm_vcpu_events *events);
481 
482 #define KVM_ARCH_WANT_MMU_NOTIFIER
483 int kvm_unmap_hva_range(struct kvm *kvm,
484 			unsigned long start, unsigned long end, unsigned flags);
485 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
486 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
487 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
488 
489 void kvm_arm_halt_guest(struct kvm *kvm);
490 void kvm_arm_resume_guest(struct kvm *kvm);
491 
492 #define kvm_call_hyp_nvhe(f, ...)						\
493 	({								\
494 		struct arm_smccc_res res;				\
495 									\
496 		arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f),		\
497 				  ##__VA_ARGS__, &res);			\
498 		WARN_ON(res.a0 != SMCCC_RET_SUCCESS);			\
499 									\
500 		res.a1;							\
501 	})
502 
503 /*
504  * The couple of isb() below are there to guarantee the same behaviour
505  * on VHE as on !VHE, where the eret to EL1 acts as a context
506  * synchronization event.
507  */
508 #define kvm_call_hyp(f, ...)						\
509 	do {								\
510 		if (has_vhe()) {					\
511 			f(__VA_ARGS__);					\
512 			isb();						\
513 		} else {						\
514 			kvm_call_hyp_nvhe(f, ##__VA_ARGS__);		\
515 		}							\
516 	} while(0)
517 
518 #define kvm_call_hyp_ret(f, ...)					\
519 	({								\
520 		typeof(f(__VA_ARGS__)) ret;				\
521 									\
522 		if (has_vhe()) {					\
523 			ret = f(__VA_ARGS__);				\
524 			isb();						\
525 		} else {						\
526 			ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__);	\
527 		}							\
528 									\
529 		ret;							\
530 	})
531 
532 void force_vm_exit(const cpumask_t *mask);
533 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
534 
535 int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
536 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
537 
538 /* MMIO helpers */
539 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
540 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
541 
542 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
543 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
544 
545 int kvm_perf_init(void);
546 int kvm_perf_teardown(void);
547 
548 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
549 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
550 void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
551 
552 bool kvm_arm_pvtime_supported(void);
553 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
554 			    struct kvm_device_attr *attr);
555 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
556 			    struct kvm_device_attr *attr);
557 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
558 			    struct kvm_device_attr *attr);
559 
kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch * vcpu_arch)560 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
561 {
562 	vcpu_arch->steal.base = GPA_INVALID;
563 }
564 
kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch * vcpu_arch)565 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
566 {
567 	return (vcpu_arch->steal.base != GPA_INVALID);
568 }
569 
570 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
571 
572 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
573 
574 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
575 
kvm_init_host_cpu_context(struct kvm_cpu_context * cpu_ctxt)576 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
577 {
578 	/* The host's MPIDR is immutable, so let's set it up at boot time */
579 	ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
580 }
581 
kvm_arch_requires_vhe(void)582 static inline bool kvm_arch_requires_vhe(void)
583 {
584 	/*
585 	 * The Arm architecture specifies that implementation of SVE
586 	 * requires VHE also to be implemented.  The KVM code for arm64
587 	 * relies on this when SVE is present:
588 	 */
589 	if (system_supports_sve())
590 		return true;
591 
592 	return false;
593 }
594 
595 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
596 
kvm_arch_hardware_unsetup(void)597 static inline void kvm_arch_hardware_unsetup(void) {}
kvm_arch_sync_events(struct kvm * kvm)598 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
kvm_arch_sched_in(struct kvm_vcpu * vcpu,int cpu)599 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
kvm_arch_vcpu_block_finish(struct kvm_vcpu * vcpu)600 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
601 
602 void kvm_arm_init_debug(void);
603 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
604 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
605 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
606 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
607 			       struct kvm_device_attr *attr);
608 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
609 			       struct kvm_device_attr *attr);
610 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
611 			       struct kvm_device_attr *attr);
612 
613 /* Guest/host FPSIMD coordination helpers */
614 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
615 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
616 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
617 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
618 
kvm_pmu_counter_deferred(struct perf_event_attr * attr)619 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
620 {
621 	return (!has_vhe() && attr->exclude_host);
622 }
623 
624 #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
kvm_arch_vcpu_run_pid_change(struct kvm_vcpu * vcpu)625 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
626 {
627 	return kvm_arch_vcpu_run_map_fp(vcpu);
628 }
629 
630 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
631 void kvm_clr_pmu_events(u32 clr);
632 
633 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
634 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
635 #else
kvm_set_pmu_events(u32 set,struct perf_event_attr * attr)636 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
kvm_clr_pmu_events(u32 clr)637 static inline void kvm_clr_pmu_events(u32 clr) {}
638 #endif
639 
640 void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
641 void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
642 
643 int kvm_set_ipa_limit(void);
644 
645 #define __KVM_HAVE_ARCH_VM_ALLOC
646 struct kvm *kvm_arch_alloc_vm(void);
647 void kvm_arch_free_vm(struct kvm *kvm);
648 
649 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
650 
651 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
652 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
653 
654 #define kvm_arm_vcpu_sve_finalized(vcpu) \
655 	((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
656 
657 #endif /* __ARM64_KVM_HOST_H__ */
658