1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * This module enables machines with Intel VT-x extensions to run virtual
6  * machines without emulation or binary translation.
7  *
8  * MMU support
9  *
10  * Copyright (C) 2006 Qumranet, Inc.
11  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12  *
13  * Authors:
14  *   Yaniv Kamay  <yaniv@qumranet.com>
15  *   Avi Kivity   <avi@qumranet.com>
16  */
17 
18 #include "irq.h"
19 #include "ioapic.h"
20 #include "mmu.h"
21 #include "mmu_internal.h"
22 #include "tdp_mmu.h"
23 #include "x86.h"
24 #include "kvm_cache_regs.h"
25 #include "kvm_emulate.h"
26 #include "cpuid.h"
27 #include "spte.h"
28 
29 #include <linux/kvm_host.h>
30 #include <linux/types.h>
31 #include <linux/string.h>
32 #include <linux/mm.h>
33 #include <linux/highmem.h>
34 #include <linux/moduleparam.h>
35 #include <linux/export.h>
36 #include <linux/swap.h>
37 #include <linux/hugetlb.h>
38 #include <linux/compiler.h>
39 #include <linux/srcu.h>
40 #include <linux/slab.h>
41 #include <linux/sched/signal.h>
42 #include <linux/uaccess.h>
43 #include <linux/hash.h>
44 #include <linux/kern_levels.h>
45 #include <linux/kthread.h>
46 
47 #include <asm/page.h>
48 #include <asm/memtype.h>
49 #include <asm/cmpxchg.h>
50 #include <asm/io.h>
51 #include <asm/set_memory.h>
52 #include <asm/vmx.h>
53 #include <asm/kvm_page_track.h>
54 #include "trace.h"
55 
56 #include "paging.h"
57 
58 extern bool itlb_multihit_kvm_mitigation;
59 
60 int __read_mostly nx_huge_pages = -1;
61 #ifdef CONFIG_PREEMPT_RT
62 /* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
63 static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
64 #else
65 static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
66 #endif
67 
68 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
69 static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
70 
71 static const struct kernel_param_ops nx_huge_pages_ops = {
72 	.set = set_nx_huge_pages,
73 	.get = param_get_bool,
74 };
75 
76 static const struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = {
77 	.set = set_nx_huge_pages_recovery_ratio,
78 	.get = param_get_uint,
79 };
80 
81 module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
82 __MODULE_PARM_TYPE(nx_huge_pages, "bool");
83 module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops,
84 		&nx_huge_pages_recovery_ratio, 0644);
85 __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
86 
87 static bool __read_mostly force_flush_and_sync_on_reuse;
88 module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
89 
90 /*
91  * When setting this variable to true it enables Two-Dimensional-Paging
92  * where the hardware walks 2 page tables:
93  * 1. the guest-virtual to guest-physical
94  * 2. while doing 1. it walks guest-physical to host-physical
95  * If the hardware supports that we don't need to do shadow paging.
96  */
97 bool tdp_enabled = false;
98 
99 static int max_huge_page_level __read_mostly;
100 static int tdp_root_level __read_mostly;
101 static int max_tdp_level __read_mostly;
102 
103 enum {
104 	AUDIT_PRE_PAGE_FAULT,
105 	AUDIT_POST_PAGE_FAULT,
106 	AUDIT_PRE_PTE_WRITE,
107 	AUDIT_POST_PTE_WRITE,
108 	AUDIT_PRE_SYNC,
109 	AUDIT_POST_SYNC
110 };
111 
112 #ifdef MMU_DEBUG
113 bool dbg = 0;
114 module_param(dbg, bool, 0644);
115 #endif
116 
117 #define PTE_PREFETCH_NUM		8
118 
119 #define PT32_LEVEL_BITS 10
120 
121 #define PT32_LEVEL_SHIFT(level) \
122 		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
123 
124 #define PT32_LVL_OFFSET_MASK(level) \
125 	(PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
126 						* PT32_LEVEL_BITS))) - 1))
127 
128 #define PT32_INDEX(address, level)\
129 	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
130 
131 
132 #define PT32_BASE_ADDR_MASK PAGE_MASK
133 #define PT32_DIR_BASE_ADDR_MASK \
134 	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
135 #define PT32_LVL_ADDR_MASK(level) \
136 	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
137 					    * PT32_LEVEL_BITS))) - 1))
138 
139 #include <trace/events/kvm.h>
140 
141 /* make pte_list_desc fit well in cache lines */
142 #define PTE_LIST_EXT 14
143 
144 /*
145  * Slight optimization of cacheline layout, by putting `more' and `spte_count'
146  * at the start; then accessing it will only use one single cacheline for
147  * either full (entries==PTE_LIST_EXT) case or entries<=6.
148  */
149 struct pte_list_desc {
150 	struct pte_list_desc *more;
151 	/*
152 	 * Stores number of entries stored in the pte_list_desc.  No need to be
153 	 * u64 but just for easier alignment.  When PTE_LIST_EXT, means full.
154 	 */
155 	u64 spte_count;
156 	u64 *sptes[PTE_LIST_EXT];
157 };
158 
159 struct kvm_shadow_walk_iterator {
160 	u64 addr;
161 	hpa_t shadow_addr;
162 	u64 *sptep;
163 	int level;
164 	unsigned index;
165 };
166 
167 #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
168 	for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
169 					 (_root), (_addr));                \
170 	     shadow_walk_okay(&(_walker));			           \
171 	     shadow_walk_next(&(_walker)))
172 
173 #define for_each_shadow_entry(_vcpu, _addr, _walker)            \
174 	for (shadow_walk_init(&(_walker), _vcpu, _addr);	\
175 	     shadow_walk_okay(&(_walker));			\
176 	     shadow_walk_next(&(_walker)))
177 
178 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)	\
179 	for (shadow_walk_init(&(_walker), _vcpu, _addr);		\
180 	     shadow_walk_okay(&(_walker)) &&				\
181 		({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });	\
182 	     __shadow_walk_next(&(_walker), spte))
183 
184 static struct kmem_cache *pte_list_desc_cache;
185 struct kmem_cache *mmu_page_header_cache;
186 static struct percpu_counter kvm_total_used_mmu_pages;
187 
188 static void mmu_spte_set(u64 *sptep, u64 spte);
189 static union kvm_mmu_page_role
190 kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
191 
192 struct kvm_mmu_role_regs {
193 	const unsigned long cr0;
194 	const unsigned long cr4;
195 	const u64 efer;
196 };
197 
198 #define CREATE_TRACE_POINTS
199 #include "mmutrace.h"
200 
201 /*
202  * Yes, lot's of underscores.  They're a hint that you probably shouldn't be
203  * reading from the role_regs.  Once the mmu_role is constructed, it becomes
204  * the single source of truth for the MMU's state.
205  */
206 #define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag)			\
207 static inline bool __maybe_unused ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
208 {									\
209 	return !!(regs->reg & flag);					\
210 }
211 BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
212 BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
213 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
214 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
215 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
216 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
217 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
218 BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
219 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
220 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
221 
222 /*
223  * The MMU itself (with a valid role) is the single source of truth for the
224  * MMU.  Do not use the regs used to build the MMU/role, nor the vCPU.  The
225  * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
226  * and the vCPU may be incorrect/irrelevant.
227  */
228 #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name)		\
229 static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu)	\
230 {								\
231 	return !!(mmu->mmu_role. base_or_ext . reg##_##name);	\
232 }
233 BUILD_MMU_ROLE_ACCESSOR(ext,  cr0, pg);
234 BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
235 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pse);
236 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pae);
237 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smep);
238 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smap);
239 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pke);
240 BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, la57);
241 BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
242 
vcpu_to_role_regs(struct kvm_vcpu * vcpu)243 static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
244 {
245 	struct kvm_mmu_role_regs regs = {
246 		.cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
247 		.cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
248 		.efer = vcpu->arch.efer,
249 	};
250 
251 	return regs;
252 }
253 
role_regs_to_root_level(struct kvm_mmu_role_regs * regs)254 static int role_regs_to_root_level(struct kvm_mmu_role_regs *regs)
255 {
256 	if (!____is_cr0_pg(regs))
257 		return 0;
258 	else if (____is_efer_lma(regs))
259 		return ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
260 					       PT64_ROOT_4LEVEL;
261 	else if (____is_cr4_pae(regs))
262 		return PT32E_ROOT_LEVEL;
263 	else
264 		return PT32_ROOT_LEVEL;
265 }
266 
kvm_available_flush_tlb_with_range(void)267 static inline bool kvm_available_flush_tlb_with_range(void)
268 {
269 	return kvm_x86_ops.tlb_remote_flush_with_range;
270 }
271 
kvm_flush_remote_tlbs_with_range(struct kvm * kvm,struct kvm_tlb_range * range)272 static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
273 		struct kvm_tlb_range *range)
274 {
275 	int ret = -ENOTSUPP;
276 
277 	if (range && kvm_x86_ops.tlb_remote_flush_with_range)
278 		ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range);
279 
280 	if (ret)
281 		kvm_flush_remote_tlbs(kvm);
282 }
283 
kvm_flush_remote_tlbs_with_address(struct kvm * kvm,u64 start_gfn,u64 pages)284 void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
285 		u64 start_gfn, u64 pages)
286 {
287 	struct kvm_tlb_range range;
288 
289 	range.start_gfn = start_gfn;
290 	range.pages = pages;
291 
292 	kvm_flush_remote_tlbs_with_range(kvm, &range);
293 }
294 
mark_mmio_spte(struct kvm_vcpu * vcpu,u64 * sptep,u64 gfn,unsigned int access)295 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
296 			   unsigned int access)
297 {
298 	u64 spte = make_mmio_spte(vcpu, gfn, access);
299 
300 	trace_mark_mmio_spte(sptep, gfn, spte);
301 	mmu_spte_set(sptep, spte);
302 }
303 
get_mmio_spte_gfn(u64 spte)304 static gfn_t get_mmio_spte_gfn(u64 spte)
305 {
306 	u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
307 
308 	gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
309 	       & shadow_nonpresent_or_rsvd_mask;
310 
311 	return gpa >> PAGE_SHIFT;
312 }
313 
get_mmio_spte_access(u64 spte)314 static unsigned get_mmio_spte_access(u64 spte)
315 {
316 	return spte & shadow_mmio_access_mask;
317 }
318 
check_mmio_spte(struct kvm_vcpu * vcpu,u64 spte)319 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
320 {
321 	u64 kvm_gen, spte_gen, gen;
322 
323 	gen = kvm_vcpu_memslots(vcpu)->generation;
324 	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
325 		return false;
326 
327 	kvm_gen = gen & MMIO_SPTE_GEN_MASK;
328 	spte_gen = get_mmio_spte_generation(spte);
329 
330 	trace_check_mmio_spte(spte, kvm_gen, spte_gen);
331 	return likely(kvm_gen == spte_gen);
332 }
333 
translate_gpa(struct kvm_vcpu * vcpu,gpa_t gpa,u32 access,struct x86_exception * exception)334 static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
335                                   struct x86_exception *exception)
336 {
337         return gpa;
338 }
339 
is_cpuid_PSE36(void)340 static int is_cpuid_PSE36(void)
341 {
342 	return 1;
343 }
344 
pse36_gfn_delta(u32 gpte)345 static gfn_t pse36_gfn_delta(u32 gpte)
346 {
347 	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
348 
349 	return (gpte & PT32_DIR_PSE36_MASK) << shift;
350 }
351 
352 #ifdef CONFIG_X86_64
__set_spte(u64 * sptep,u64 spte)353 static void __set_spte(u64 *sptep, u64 spte)
354 {
355 	WRITE_ONCE(*sptep, spte);
356 }
357 
__update_clear_spte_fast(u64 * sptep,u64 spte)358 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
359 {
360 	WRITE_ONCE(*sptep, spte);
361 }
362 
__update_clear_spte_slow(u64 * sptep,u64 spte)363 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
364 {
365 	return xchg(sptep, spte);
366 }
367 
__get_spte_lockless(u64 * sptep)368 static u64 __get_spte_lockless(u64 *sptep)
369 {
370 	return READ_ONCE(*sptep);
371 }
372 #else
373 union split_spte {
374 	struct {
375 		u32 spte_low;
376 		u32 spte_high;
377 	};
378 	u64 spte;
379 };
380 
count_spte_clear(u64 * sptep,u64 spte)381 static void count_spte_clear(u64 *sptep, u64 spte)
382 {
383 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
384 
385 	if (is_shadow_present_pte(spte))
386 		return;
387 
388 	/* Ensure the spte is completely set before we increase the count */
389 	smp_wmb();
390 	sp->clear_spte_count++;
391 }
392 
__set_spte(u64 * sptep,u64 spte)393 static void __set_spte(u64 *sptep, u64 spte)
394 {
395 	union split_spte *ssptep, sspte;
396 
397 	ssptep = (union split_spte *)sptep;
398 	sspte = (union split_spte)spte;
399 
400 	ssptep->spte_high = sspte.spte_high;
401 
402 	/*
403 	 * If we map the spte from nonpresent to present, We should store
404 	 * the high bits firstly, then set present bit, so cpu can not
405 	 * fetch this spte while we are setting the spte.
406 	 */
407 	smp_wmb();
408 
409 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
410 }
411 
__update_clear_spte_fast(u64 * sptep,u64 spte)412 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
413 {
414 	union split_spte *ssptep, sspte;
415 
416 	ssptep = (union split_spte *)sptep;
417 	sspte = (union split_spte)spte;
418 
419 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
420 
421 	/*
422 	 * If we map the spte from present to nonpresent, we should clear
423 	 * present bit firstly to avoid vcpu fetch the old high bits.
424 	 */
425 	smp_wmb();
426 
427 	ssptep->spte_high = sspte.spte_high;
428 	count_spte_clear(sptep, spte);
429 }
430 
__update_clear_spte_slow(u64 * sptep,u64 spte)431 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
432 {
433 	union split_spte *ssptep, sspte, orig;
434 
435 	ssptep = (union split_spte *)sptep;
436 	sspte = (union split_spte)spte;
437 
438 	/* xchg acts as a barrier before the setting of the high bits */
439 	orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
440 	orig.spte_high = ssptep->spte_high;
441 	ssptep->spte_high = sspte.spte_high;
442 	count_spte_clear(sptep, spte);
443 
444 	return orig.spte;
445 }
446 
447 /*
448  * The idea using the light way get the spte on x86_32 guest is from
449  * gup_get_pte (mm/gup.c).
450  *
451  * An spte tlb flush may be pending, because kvm_set_pte_rmapp
452  * coalesces them and we are running out of the MMU lock.  Therefore
453  * we need to protect against in-progress updates of the spte.
454  *
455  * Reading the spte while an update is in progress may get the old value
456  * for the high part of the spte.  The race is fine for a present->non-present
457  * change (because the high part of the spte is ignored for non-present spte),
458  * but for a present->present change we must reread the spte.
459  *
460  * All such changes are done in two steps (present->non-present and
461  * non-present->present), hence it is enough to count the number of
462  * present->non-present updates: if it changed while reading the spte,
463  * we might have hit the race.  This is done using clear_spte_count.
464  */
__get_spte_lockless(u64 * sptep)465 static u64 __get_spte_lockless(u64 *sptep)
466 {
467 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
468 	union split_spte spte, *orig = (union split_spte *)sptep;
469 	int count;
470 
471 retry:
472 	count = sp->clear_spte_count;
473 	smp_rmb();
474 
475 	spte.spte_low = orig->spte_low;
476 	smp_rmb();
477 
478 	spte.spte_high = orig->spte_high;
479 	smp_rmb();
480 
481 	if (unlikely(spte.spte_low != orig->spte_low ||
482 	      count != sp->clear_spte_count))
483 		goto retry;
484 
485 	return spte.spte;
486 }
487 #endif
488 
spte_has_volatile_bits(u64 spte)489 static bool spte_has_volatile_bits(u64 spte)
490 {
491 	if (!is_shadow_present_pte(spte))
492 		return false;
493 
494 	/*
495 	 * Always atomically update spte if it can be updated
496 	 * out of mmu-lock, it can ensure dirty bit is not lost,
497 	 * also, it can help us to get a stable is_writable_pte()
498 	 * to ensure tlb flush is not missed.
499 	 */
500 	if (spte_can_locklessly_be_made_writable(spte) ||
501 	    is_access_track_spte(spte))
502 		return true;
503 
504 	if (spte_ad_enabled(spte)) {
505 		if ((spte & shadow_accessed_mask) == 0 ||
506 	    	    (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
507 			return true;
508 	}
509 
510 	return false;
511 }
512 
513 /* Rules for using mmu_spte_set:
514  * Set the sptep from nonpresent to present.
515  * Note: the sptep being assigned *must* be either not present
516  * or in a state where the hardware will not attempt to update
517  * the spte.
518  */
mmu_spte_set(u64 * sptep,u64 new_spte)519 static void mmu_spte_set(u64 *sptep, u64 new_spte)
520 {
521 	WARN_ON(is_shadow_present_pte(*sptep));
522 	__set_spte(sptep, new_spte);
523 }
524 
525 /*
526  * Update the SPTE (excluding the PFN), but do not track changes in its
527  * accessed/dirty status.
528  */
mmu_spte_update_no_track(u64 * sptep,u64 new_spte)529 static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
530 {
531 	u64 old_spte = *sptep;
532 
533 	WARN_ON(!is_shadow_present_pte(new_spte));
534 
535 	if (!is_shadow_present_pte(old_spte)) {
536 		mmu_spte_set(sptep, new_spte);
537 		return old_spte;
538 	}
539 
540 	if (!spte_has_volatile_bits(old_spte))
541 		__update_clear_spte_fast(sptep, new_spte);
542 	else
543 		old_spte = __update_clear_spte_slow(sptep, new_spte);
544 
545 	WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
546 
547 	return old_spte;
548 }
549 
550 /* Rules for using mmu_spte_update:
551  * Update the state bits, it means the mapped pfn is not changed.
552  *
553  * Whenever we overwrite a writable spte with a read-only one we
554  * should flush remote TLBs. Otherwise rmap_write_protect
555  * will find a read-only spte, even though the writable spte
556  * might be cached on a CPU's TLB, the return value indicates this
557  * case.
558  *
559  * Returns true if the TLB needs to be flushed
560  */
mmu_spte_update(u64 * sptep,u64 new_spte)561 static bool mmu_spte_update(u64 *sptep, u64 new_spte)
562 {
563 	bool flush = false;
564 	u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
565 
566 	if (!is_shadow_present_pte(old_spte))
567 		return false;
568 
569 	/*
570 	 * For the spte updated out of mmu-lock is safe, since
571 	 * we always atomically update it, see the comments in
572 	 * spte_has_volatile_bits().
573 	 */
574 	if (spte_can_locklessly_be_made_writable(old_spte) &&
575 	      !is_writable_pte(new_spte))
576 		flush = true;
577 
578 	/*
579 	 * Flush TLB when accessed/dirty states are changed in the page tables,
580 	 * to guarantee consistency between TLB and page tables.
581 	 */
582 
583 	if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
584 		flush = true;
585 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
586 	}
587 
588 	if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
589 		flush = true;
590 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
591 	}
592 
593 	return flush;
594 }
595 
596 /*
597  * Rules for using mmu_spte_clear_track_bits:
598  * It sets the sptep from present to nonpresent, and track the
599  * state bits, it is used to clear the last level sptep.
600  * Returns the old PTE.
601  */
mmu_spte_clear_track_bits(struct kvm * kvm,u64 * sptep)602 static int mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
603 {
604 	kvm_pfn_t pfn;
605 	u64 old_spte = *sptep;
606 	int level = sptep_to_sp(sptep)->role.level;
607 
608 	if (!spte_has_volatile_bits(old_spte))
609 		__update_clear_spte_fast(sptep, 0ull);
610 	else
611 		old_spte = __update_clear_spte_slow(sptep, 0ull);
612 
613 	if (!is_shadow_present_pte(old_spte))
614 		return old_spte;
615 
616 	kvm_update_page_stats(kvm, level, -1);
617 
618 	pfn = spte_to_pfn(old_spte);
619 
620 	/*
621 	 * KVM does not hold the refcount of the page used by
622 	 * kvm mmu, before reclaiming the page, we should
623 	 * unmap it from mmu first.
624 	 */
625 	WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
626 
627 	if (is_accessed_spte(old_spte))
628 		kvm_set_pfn_accessed(pfn);
629 
630 	if (is_dirty_spte(old_spte))
631 		kvm_set_pfn_dirty(pfn);
632 
633 	return old_spte;
634 }
635 
636 /*
637  * Rules for using mmu_spte_clear_no_track:
638  * Directly clear spte without caring the state bits of sptep,
639  * it is used to set the upper level spte.
640  */
mmu_spte_clear_no_track(u64 * sptep)641 static void mmu_spte_clear_no_track(u64 *sptep)
642 {
643 	__update_clear_spte_fast(sptep, 0ull);
644 }
645 
mmu_spte_get_lockless(u64 * sptep)646 static u64 mmu_spte_get_lockless(u64 *sptep)
647 {
648 	return __get_spte_lockless(sptep);
649 }
650 
651 /* Restore an acc-track PTE back to a regular PTE */
restore_acc_track_spte(u64 spte)652 static u64 restore_acc_track_spte(u64 spte)
653 {
654 	u64 new_spte = spte;
655 	u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
656 			 & SHADOW_ACC_TRACK_SAVED_BITS_MASK;
657 
658 	WARN_ON_ONCE(spte_ad_enabled(spte));
659 	WARN_ON_ONCE(!is_access_track_spte(spte));
660 
661 	new_spte &= ~shadow_acc_track_mask;
662 	new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
663 		      SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
664 	new_spte |= saved_bits;
665 
666 	return new_spte;
667 }
668 
669 /* Returns the Accessed status of the PTE and resets it at the same time. */
mmu_spte_age(u64 * sptep)670 static bool mmu_spte_age(u64 *sptep)
671 {
672 	u64 spte = mmu_spte_get_lockless(sptep);
673 
674 	if (!is_accessed_spte(spte))
675 		return false;
676 
677 	if (spte_ad_enabled(spte)) {
678 		clear_bit((ffs(shadow_accessed_mask) - 1),
679 			  (unsigned long *)sptep);
680 	} else {
681 		/*
682 		 * Capture the dirty status of the page, so that it doesn't get
683 		 * lost when the SPTE is marked for access tracking.
684 		 */
685 		if (is_writable_pte(spte))
686 			kvm_set_pfn_dirty(spte_to_pfn(spte));
687 
688 		spte = mark_spte_for_access_track(spte);
689 		mmu_spte_update_no_track(sptep, spte);
690 	}
691 
692 	return true;
693 }
694 
walk_shadow_page_lockless_begin(struct kvm_vcpu * vcpu)695 static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
696 {
697 	if (is_tdp_mmu(vcpu->arch.mmu)) {
698 		kvm_tdp_mmu_walk_lockless_begin();
699 	} else {
700 		/*
701 		 * Prevent page table teardown by making any free-er wait during
702 		 * kvm_flush_remote_tlbs() IPI to all active vcpus.
703 		 */
704 		local_irq_disable();
705 
706 		/*
707 		 * Make sure a following spte read is not reordered ahead of the write
708 		 * to vcpu->mode.
709 		 */
710 		smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
711 	}
712 }
713 
walk_shadow_page_lockless_end(struct kvm_vcpu * vcpu)714 static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
715 {
716 	if (is_tdp_mmu(vcpu->arch.mmu)) {
717 		kvm_tdp_mmu_walk_lockless_end();
718 	} else {
719 		/*
720 		 * Make sure the write to vcpu->mode is not reordered in front of
721 		 * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
722 		 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
723 		 */
724 		smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
725 		local_irq_enable();
726 	}
727 }
728 
mmu_topup_memory_caches(struct kvm_vcpu * vcpu,bool maybe_indirect)729 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
730 {
731 	int r;
732 
733 	/* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
734 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
735 				       1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
736 	if (r)
737 		return r;
738 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
739 				       PT64_ROOT_MAX_LEVEL);
740 	if (r)
741 		return r;
742 	if (maybe_indirect) {
743 		r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
744 					       PT64_ROOT_MAX_LEVEL);
745 		if (r)
746 			return r;
747 	}
748 	return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
749 					  PT64_ROOT_MAX_LEVEL);
750 }
751 
mmu_free_memory_caches(struct kvm_vcpu * vcpu)752 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
753 {
754 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
755 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
756 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
757 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
758 }
759 
mmu_alloc_pte_list_desc(struct kvm_vcpu * vcpu)760 static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
761 {
762 	return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
763 }
764 
mmu_free_pte_list_desc(struct pte_list_desc * pte_list_desc)765 static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
766 {
767 	kmem_cache_free(pte_list_desc_cache, pte_list_desc);
768 }
769 
kvm_mmu_page_get_gfn(struct kvm_mmu_page * sp,int index)770 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
771 {
772 	if (!sp->role.direct)
773 		return sp->gfns[index];
774 
775 	return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
776 }
777 
kvm_mmu_page_set_gfn(struct kvm_mmu_page * sp,int index,gfn_t gfn)778 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
779 {
780 	if (!sp->role.direct) {
781 		sp->gfns[index] = gfn;
782 		return;
783 	}
784 
785 	if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index)))
786 		pr_err_ratelimited("gfn mismatch under direct page %llx "
787 				   "(expected %llx, got %llx)\n",
788 				   sp->gfn,
789 				   kvm_mmu_page_get_gfn(sp, index), gfn);
790 }
791 
792 /*
793  * Return the pointer to the large page information for a given gfn,
794  * handling slots that are not large page aligned.
795  */
lpage_info_slot(gfn_t gfn,const struct kvm_memory_slot * slot,int level)796 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
797 		const struct kvm_memory_slot *slot, int level)
798 {
799 	unsigned long idx;
800 
801 	idx = gfn_to_index(gfn, slot->base_gfn, level);
802 	return &slot->arch.lpage_info[level - 2][idx];
803 }
804 
update_gfn_disallow_lpage_count(const struct kvm_memory_slot * slot,gfn_t gfn,int count)805 static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
806 					    gfn_t gfn, int count)
807 {
808 	struct kvm_lpage_info *linfo;
809 	int i;
810 
811 	for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
812 		linfo = lpage_info_slot(gfn, slot, i);
813 		linfo->disallow_lpage += count;
814 		WARN_ON(linfo->disallow_lpage < 0);
815 	}
816 }
817 
kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot * slot,gfn_t gfn)818 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
819 {
820 	update_gfn_disallow_lpage_count(slot, gfn, 1);
821 }
822 
kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot * slot,gfn_t gfn)823 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
824 {
825 	update_gfn_disallow_lpage_count(slot, gfn, -1);
826 }
827 
account_shadowed(struct kvm * kvm,struct kvm_mmu_page * sp)828 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
829 {
830 	struct kvm_memslots *slots;
831 	struct kvm_memory_slot *slot;
832 	gfn_t gfn;
833 
834 	kvm->arch.indirect_shadow_pages++;
835 	gfn = sp->gfn;
836 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
837 	slot = __gfn_to_memslot(slots, gfn);
838 
839 	/* the non-leaf shadow pages are keeping readonly. */
840 	if (sp->role.level > PG_LEVEL_4K)
841 		return kvm_slot_page_track_add_page(kvm, slot, gfn,
842 						    KVM_PAGE_TRACK_WRITE);
843 
844 	kvm_mmu_gfn_disallow_lpage(slot, gfn);
845 }
846 
account_huge_nx_page(struct kvm * kvm,struct kvm_mmu_page * sp)847 void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
848 {
849 	if (sp->lpage_disallowed)
850 		return;
851 
852 	++kvm->stat.nx_lpage_splits;
853 	list_add_tail(&sp->lpage_disallowed_link,
854 		      &kvm->arch.lpage_disallowed_mmu_pages);
855 	sp->lpage_disallowed = true;
856 }
857 
unaccount_shadowed(struct kvm * kvm,struct kvm_mmu_page * sp)858 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
859 {
860 	struct kvm_memslots *slots;
861 	struct kvm_memory_slot *slot;
862 	gfn_t gfn;
863 
864 	kvm->arch.indirect_shadow_pages--;
865 	gfn = sp->gfn;
866 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
867 	slot = __gfn_to_memslot(slots, gfn);
868 	if (sp->role.level > PG_LEVEL_4K)
869 		return kvm_slot_page_track_remove_page(kvm, slot, gfn,
870 						       KVM_PAGE_TRACK_WRITE);
871 
872 	kvm_mmu_gfn_allow_lpage(slot, gfn);
873 }
874 
unaccount_huge_nx_page(struct kvm * kvm,struct kvm_mmu_page * sp)875 void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
876 {
877 	--kvm->stat.nx_lpage_splits;
878 	sp->lpage_disallowed = false;
879 	list_del(&sp->lpage_disallowed_link);
880 }
881 
882 static struct kvm_memory_slot *
gfn_to_memslot_dirty_bitmap(struct kvm_vcpu * vcpu,gfn_t gfn,bool no_dirty_log)883 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
884 			    bool no_dirty_log)
885 {
886 	struct kvm_memory_slot *slot;
887 
888 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
889 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
890 		return NULL;
891 	if (no_dirty_log && kvm_slot_dirty_track_enabled(slot))
892 		return NULL;
893 
894 	return slot;
895 }
896 
897 /*
898  * About rmap_head encoding:
899  *
900  * If the bit zero of rmap_head->val is clear, then it points to the only spte
901  * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
902  * pte_list_desc containing more mappings.
903  */
904 
905 /*
906  * Returns the number of pointers in the rmap chain, not counting the new one.
907  */
pte_list_add(struct kvm_vcpu * vcpu,u64 * spte,struct kvm_rmap_head * rmap_head)908 static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
909 			struct kvm_rmap_head *rmap_head)
910 {
911 	struct pte_list_desc *desc;
912 	int count = 0;
913 
914 	if (!rmap_head->val) {
915 		rmap_printk("%p %llx 0->1\n", spte, *spte);
916 		rmap_head->val = (unsigned long)spte;
917 	} else if (!(rmap_head->val & 1)) {
918 		rmap_printk("%p %llx 1->many\n", spte, *spte);
919 		desc = mmu_alloc_pte_list_desc(vcpu);
920 		desc->sptes[0] = (u64 *)rmap_head->val;
921 		desc->sptes[1] = spte;
922 		desc->spte_count = 2;
923 		rmap_head->val = (unsigned long)desc | 1;
924 		++count;
925 	} else {
926 		rmap_printk("%p %llx many->many\n", spte, *spte);
927 		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
928 		while (desc->spte_count == PTE_LIST_EXT) {
929 			count += PTE_LIST_EXT;
930 			if (!desc->more) {
931 				desc->more = mmu_alloc_pte_list_desc(vcpu);
932 				desc = desc->more;
933 				desc->spte_count = 0;
934 				break;
935 			}
936 			desc = desc->more;
937 		}
938 		count += desc->spte_count;
939 		desc->sptes[desc->spte_count++] = spte;
940 	}
941 	return count;
942 }
943 
944 static void
pte_list_desc_remove_entry(struct kvm_rmap_head * rmap_head,struct pte_list_desc * desc,int i,struct pte_list_desc * prev_desc)945 pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
946 			   struct pte_list_desc *desc, int i,
947 			   struct pte_list_desc *prev_desc)
948 {
949 	int j = desc->spte_count - 1;
950 
951 	desc->sptes[i] = desc->sptes[j];
952 	desc->sptes[j] = NULL;
953 	desc->spte_count--;
954 	if (desc->spte_count)
955 		return;
956 	if (!prev_desc && !desc->more)
957 		rmap_head->val = 0;
958 	else
959 		if (prev_desc)
960 			prev_desc->more = desc->more;
961 		else
962 			rmap_head->val = (unsigned long)desc->more | 1;
963 	mmu_free_pte_list_desc(desc);
964 }
965 
__pte_list_remove(u64 * spte,struct kvm_rmap_head * rmap_head)966 static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
967 {
968 	struct pte_list_desc *desc;
969 	struct pte_list_desc *prev_desc;
970 	int i;
971 
972 	if (!rmap_head->val) {
973 		pr_err("%s: %p 0->BUG\n", __func__, spte);
974 		BUG();
975 	} else if (!(rmap_head->val & 1)) {
976 		rmap_printk("%p 1->0\n", spte);
977 		if ((u64 *)rmap_head->val != spte) {
978 			pr_err("%s:  %p 1->BUG\n", __func__, spte);
979 			BUG();
980 		}
981 		rmap_head->val = 0;
982 	} else {
983 		rmap_printk("%p many->many\n", spte);
984 		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
985 		prev_desc = NULL;
986 		while (desc) {
987 			for (i = 0; i < desc->spte_count; ++i) {
988 				if (desc->sptes[i] == spte) {
989 					pte_list_desc_remove_entry(rmap_head,
990 							desc, i, prev_desc);
991 					return;
992 				}
993 			}
994 			prev_desc = desc;
995 			desc = desc->more;
996 		}
997 		pr_err("%s: %p many->many\n", __func__, spte);
998 		BUG();
999 	}
1000 }
1001 
pte_list_remove(struct kvm * kvm,struct kvm_rmap_head * rmap_head,u64 * sptep)1002 static void pte_list_remove(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1003 			    u64 *sptep)
1004 {
1005 	mmu_spte_clear_track_bits(kvm, sptep);
1006 	__pte_list_remove(sptep, rmap_head);
1007 }
1008 
1009 /* Return true if rmap existed, false otherwise */
pte_list_destroy(struct kvm * kvm,struct kvm_rmap_head * rmap_head)1010 static bool pte_list_destroy(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1011 {
1012 	struct pte_list_desc *desc, *next;
1013 	int i;
1014 
1015 	if (!rmap_head->val)
1016 		return false;
1017 
1018 	if (!(rmap_head->val & 1)) {
1019 		mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
1020 		goto out;
1021 	}
1022 
1023 	desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1024 
1025 	for (; desc; desc = next) {
1026 		for (i = 0; i < desc->spte_count; i++)
1027 			mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
1028 		next = desc->more;
1029 		mmu_free_pte_list_desc(desc);
1030 	}
1031 out:
1032 	/* rmap_head is meaningless now, remember to reset it */
1033 	rmap_head->val = 0;
1034 	return true;
1035 }
1036 
pte_list_count(struct kvm_rmap_head * rmap_head)1037 unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
1038 {
1039 	struct pte_list_desc *desc;
1040 	unsigned int count = 0;
1041 
1042 	if (!rmap_head->val)
1043 		return 0;
1044 	else if (!(rmap_head->val & 1))
1045 		return 1;
1046 
1047 	desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1048 
1049 	while (desc) {
1050 		count += desc->spte_count;
1051 		desc = desc->more;
1052 	}
1053 
1054 	return count;
1055 }
1056 
gfn_to_rmap(gfn_t gfn,int level,const struct kvm_memory_slot * slot)1057 static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
1058 					 const struct kvm_memory_slot *slot)
1059 {
1060 	unsigned long idx;
1061 
1062 	idx = gfn_to_index(gfn, slot->base_gfn, level);
1063 	return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
1064 }
1065 
rmap_can_add(struct kvm_vcpu * vcpu)1066 static bool rmap_can_add(struct kvm_vcpu *vcpu)
1067 {
1068 	struct kvm_mmu_memory_cache *mc;
1069 
1070 	mc = &vcpu->arch.mmu_pte_list_desc_cache;
1071 	return kvm_mmu_memory_cache_nr_free_objects(mc);
1072 }
1073 
rmap_add(struct kvm_vcpu * vcpu,u64 * spte,gfn_t gfn)1074 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1075 {
1076 	struct kvm_memory_slot *slot;
1077 	struct kvm_mmu_page *sp;
1078 	struct kvm_rmap_head *rmap_head;
1079 
1080 	sp = sptep_to_sp(spte);
1081 	kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
1082 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1083 	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1084 	return pte_list_add(vcpu, spte, rmap_head);
1085 }
1086 
1087 
rmap_remove(struct kvm * kvm,u64 * spte)1088 static void rmap_remove(struct kvm *kvm, u64 *spte)
1089 {
1090 	struct kvm_memslots *slots;
1091 	struct kvm_memory_slot *slot;
1092 	struct kvm_mmu_page *sp;
1093 	gfn_t gfn;
1094 	struct kvm_rmap_head *rmap_head;
1095 
1096 	sp = sptep_to_sp(spte);
1097 	gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
1098 
1099 	/*
1100 	 * Unlike rmap_add and rmap_recycle, rmap_remove does not run in the
1101 	 * context of a vCPU so have to determine which memslots to use based
1102 	 * on context information in sp->role.
1103 	 */
1104 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
1105 
1106 	slot = __gfn_to_memslot(slots, gfn);
1107 	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1108 
1109 	__pte_list_remove(spte, rmap_head);
1110 }
1111 
1112 /*
1113  * Used by the following functions to iterate through the sptes linked by a
1114  * rmap.  All fields are private and not assumed to be used outside.
1115  */
1116 struct rmap_iterator {
1117 	/* private fields */
1118 	struct pte_list_desc *desc;	/* holds the sptep if not NULL */
1119 	int pos;			/* index of the sptep */
1120 };
1121 
1122 /*
1123  * Iteration must be started by this function.  This should also be used after
1124  * removing/dropping sptes from the rmap link because in such cases the
1125  * information in the iterator may not be valid.
1126  *
1127  * Returns sptep if found, NULL otherwise.
1128  */
rmap_get_first(struct kvm_rmap_head * rmap_head,struct rmap_iterator * iter)1129 static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
1130 			   struct rmap_iterator *iter)
1131 {
1132 	u64 *sptep;
1133 
1134 	if (!rmap_head->val)
1135 		return NULL;
1136 
1137 	if (!(rmap_head->val & 1)) {
1138 		iter->desc = NULL;
1139 		sptep = (u64 *)rmap_head->val;
1140 		goto out;
1141 	}
1142 
1143 	iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1144 	iter->pos = 0;
1145 	sptep = iter->desc->sptes[iter->pos];
1146 out:
1147 	BUG_ON(!is_shadow_present_pte(*sptep));
1148 	return sptep;
1149 }
1150 
1151 /*
1152  * Must be used with a valid iterator: e.g. after rmap_get_first().
1153  *
1154  * Returns sptep if found, NULL otherwise.
1155  */
rmap_get_next(struct rmap_iterator * iter)1156 static u64 *rmap_get_next(struct rmap_iterator *iter)
1157 {
1158 	u64 *sptep;
1159 
1160 	if (iter->desc) {
1161 		if (iter->pos < PTE_LIST_EXT - 1) {
1162 			++iter->pos;
1163 			sptep = iter->desc->sptes[iter->pos];
1164 			if (sptep)
1165 				goto out;
1166 		}
1167 
1168 		iter->desc = iter->desc->more;
1169 
1170 		if (iter->desc) {
1171 			iter->pos = 0;
1172 			/* desc->sptes[0] cannot be NULL */
1173 			sptep = iter->desc->sptes[iter->pos];
1174 			goto out;
1175 		}
1176 	}
1177 
1178 	return NULL;
1179 out:
1180 	BUG_ON(!is_shadow_present_pte(*sptep));
1181 	return sptep;
1182 }
1183 
1184 #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)			\
1185 	for (_spte_ = rmap_get_first(_rmap_head_, _iter_);		\
1186 	     _spte_; _spte_ = rmap_get_next(_iter_))
1187 
drop_spte(struct kvm * kvm,u64 * sptep)1188 static void drop_spte(struct kvm *kvm, u64 *sptep)
1189 {
1190 	u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep);
1191 
1192 	if (is_shadow_present_pte(old_spte))
1193 		rmap_remove(kvm, sptep);
1194 }
1195 
1196 
__drop_large_spte(struct kvm * kvm,u64 * sptep)1197 static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
1198 {
1199 	if (is_large_pte(*sptep)) {
1200 		WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
1201 		drop_spte(kvm, sptep);
1202 		return true;
1203 	}
1204 
1205 	return false;
1206 }
1207 
drop_large_spte(struct kvm_vcpu * vcpu,u64 * sptep)1208 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1209 {
1210 	if (__drop_large_spte(vcpu->kvm, sptep)) {
1211 		struct kvm_mmu_page *sp = sptep_to_sp(sptep);
1212 
1213 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1214 			KVM_PAGES_PER_HPAGE(sp->role.level));
1215 	}
1216 }
1217 
1218 /*
1219  * Write-protect on the specified @sptep, @pt_protect indicates whether
1220  * spte write-protection is caused by protecting shadow page table.
1221  *
1222  * Note: write protection is difference between dirty logging and spte
1223  * protection:
1224  * - for dirty logging, the spte can be set to writable at anytime if
1225  *   its dirty bitmap is properly set.
1226  * - for spte protection, the spte can be writable only after unsync-ing
1227  *   shadow page.
1228  *
1229  * Return true if tlb need be flushed.
1230  */
spte_write_protect(u64 * sptep,bool pt_protect)1231 static bool spte_write_protect(u64 *sptep, bool pt_protect)
1232 {
1233 	u64 spte = *sptep;
1234 
1235 	if (!is_writable_pte(spte) &&
1236 	      !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1237 		return false;
1238 
1239 	rmap_printk("spte %p %llx\n", sptep, *sptep);
1240 
1241 	if (pt_protect)
1242 		spte &= ~shadow_mmu_writable_mask;
1243 	spte = spte & ~PT_WRITABLE_MASK;
1244 
1245 	return mmu_spte_update(sptep, spte);
1246 }
1247 
__rmap_write_protect(struct kvm * kvm,struct kvm_rmap_head * rmap_head,bool pt_protect)1248 static bool __rmap_write_protect(struct kvm *kvm,
1249 				 struct kvm_rmap_head *rmap_head,
1250 				 bool pt_protect)
1251 {
1252 	u64 *sptep;
1253 	struct rmap_iterator iter;
1254 	bool flush = false;
1255 
1256 	for_each_rmap_spte(rmap_head, &iter, sptep)
1257 		flush |= spte_write_protect(sptep, pt_protect);
1258 
1259 	return flush;
1260 }
1261 
spte_clear_dirty(u64 * sptep)1262 static bool spte_clear_dirty(u64 *sptep)
1263 {
1264 	u64 spte = *sptep;
1265 
1266 	rmap_printk("spte %p %llx\n", sptep, *sptep);
1267 
1268 	MMU_WARN_ON(!spte_ad_enabled(spte));
1269 	spte &= ~shadow_dirty_mask;
1270 	return mmu_spte_update(sptep, spte);
1271 }
1272 
spte_wrprot_for_clear_dirty(u64 * sptep)1273 static bool spte_wrprot_for_clear_dirty(u64 *sptep)
1274 {
1275 	bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
1276 					       (unsigned long *)sptep);
1277 	if (was_writable && !spte_ad_enabled(*sptep))
1278 		kvm_set_pfn_dirty(spte_to_pfn(*sptep));
1279 
1280 	return was_writable;
1281 }
1282 
1283 /*
1284  * Gets the GFN ready for another round of dirty logging by clearing the
1285  *	- D bit on ad-enabled SPTEs, and
1286  *	- W bit on ad-disabled SPTEs.
1287  * Returns true iff any D or W bits were cleared.
1288  */
__rmap_clear_dirty(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)1289 static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1290 			       const struct kvm_memory_slot *slot)
1291 {
1292 	u64 *sptep;
1293 	struct rmap_iterator iter;
1294 	bool flush = false;
1295 
1296 	for_each_rmap_spte(rmap_head, &iter, sptep)
1297 		if (spte_ad_need_write_protect(*sptep))
1298 			flush |= spte_wrprot_for_clear_dirty(sptep);
1299 		else
1300 			flush |= spte_clear_dirty(sptep);
1301 
1302 	return flush;
1303 }
1304 
1305 /**
1306  * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1307  * @kvm: kvm instance
1308  * @slot: slot to protect
1309  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1310  * @mask: indicates which pages we should protect
1311  *
1312  * Used when we do not need to care about huge page mappings.
1313  */
kvm_mmu_write_protect_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)1314 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1315 				     struct kvm_memory_slot *slot,
1316 				     gfn_t gfn_offset, unsigned long mask)
1317 {
1318 	struct kvm_rmap_head *rmap_head;
1319 
1320 	if (is_tdp_mmu_enabled(kvm))
1321 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1322 				slot->base_gfn + gfn_offset, mask, true);
1323 
1324 	if (!kvm_memslots_have_rmaps(kvm))
1325 		return;
1326 
1327 	while (mask) {
1328 		rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1329 					PG_LEVEL_4K, slot);
1330 		__rmap_write_protect(kvm, rmap_head, false);
1331 
1332 		/* clear the first set bit */
1333 		mask &= mask - 1;
1334 	}
1335 }
1336 
1337 /**
1338  * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
1339  * protect the page if the D-bit isn't supported.
1340  * @kvm: kvm instance
1341  * @slot: slot to clear D-bit
1342  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1343  * @mask: indicates which pages we should clear D-bit
1344  *
1345  * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
1346  */
kvm_mmu_clear_dirty_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)1347 static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1348 					 struct kvm_memory_slot *slot,
1349 					 gfn_t gfn_offset, unsigned long mask)
1350 {
1351 	struct kvm_rmap_head *rmap_head;
1352 
1353 	if (is_tdp_mmu_enabled(kvm))
1354 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1355 				slot->base_gfn + gfn_offset, mask, false);
1356 
1357 	if (!kvm_memslots_have_rmaps(kvm))
1358 		return;
1359 
1360 	while (mask) {
1361 		rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1362 					PG_LEVEL_4K, slot);
1363 		__rmap_clear_dirty(kvm, rmap_head, slot);
1364 
1365 		/* clear the first set bit */
1366 		mask &= mask - 1;
1367 	}
1368 }
1369 
1370 /**
1371  * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1372  * PT level pages.
1373  *
1374  * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1375  * enable dirty logging for them.
1376  *
1377  * We need to care about huge page mappings: e.g. during dirty logging we may
1378  * have such mappings.
1379  */
kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)1380 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1381 				struct kvm_memory_slot *slot,
1382 				gfn_t gfn_offset, unsigned long mask)
1383 {
1384 	/*
1385 	 * Huge pages are NOT write protected when we start dirty logging in
1386 	 * initially-all-set mode; must write protect them here so that they
1387 	 * are split to 4K on the first write.
1388 	 *
1389 	 * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
1390 	 * of memslot has no such restriction, so the range can cross two large
1391 	 * pages.
1392 	 */
1393 	if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
1394 		gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
1395 		gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
1396 
1397 		kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);
1398 
1399 		/* Cross two large pages? */
1400 		if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
1401 		    ALIGN(end << PAGE_SHIFT, PMD_SIZE))
1402 			kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
1403 						       PG_LEVEL_2M);
1404 	}
1405 
1406 	/* Now handle 4K PTEs.  */
1407 	if (kvm_x86_ops.cpu_dirty_log_size)
1408 		kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
1409 	else
1410 		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1411 }
1412 
kvm_cpu_dirty_log_size(void)1413 int kvm_cpu_dirty_log_size(void)
1414 {
1415 	return kvm_x86_ops.cpu_dirty_log_size;
1416 }
1417 
kvm_mmu_slot_gfn_write_protect(struct kvm * kvm,struct kvm_memory_slot * slot,u64 gfn,int min_level)1418 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1419 				    struct kvm_memory_slot *slot, u64 gfn,
1420 				    int min_level)
1421 {
1422 	struct kvm_rmap_head *rmap_head;
1423 	int i;
1424 	bool write_protected = false;
1425 
1426 	if (kvm_memslots_have_rmaps(kvm)) {
1427 		for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1428 			rmap_head = gfn_to_rmap(gfn, i, slot);
1429 			write_protected |= __rmap_write_protect(kvm, rmap_head, true);
1430 		}
1431 	}
1432 
1433 	if (is_tdp_mmu_enabled(kvm))
1434 		write_protected |=
1435 			kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
1436 
1437 	return write_protected;
1438 }
1439 
rmap_write_protect(struct kvm_vcpu * vcpu,u64 gfn)1440 static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
1441 {
1442 	struct kvm_memory_slot *slot;
1443 
1444 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1445 	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
1446 }
1447 
kvm_zap_rmapp(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)1448 static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1449 			  const struct kvm_memory_slot *slot)
1450 {
1451 	return pte_list_destroy(kvm, rmap_head);
1452 }
1453 
kvm_unmap_rmapp(struct kvm * kvm,struct kvm_rmap_head * rmap_head,struct kvm_memory_slot * slot,gfn_t gfn,int level,pte_t unused)1454 static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1455 			    struct kvm_memory_slot *slot, gfn_t gfn, int level,
1456 			    pte_t unused)
1457 {
1458 	return kvm_zap_rmapp(kvm, rmap_head, slot);
1459 }
1460 
kvm_set_pte_rmapp(struct kvm * kvm,struct kvm_rmap_head * rmap_head,struct kvm_memory_slot * slot,gfn_t gfn,int level,pte_t pte)1461 static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1462 			      struct kvm_memory_slot *slot, gfn_t gfn, int level,
1463 			      pte_t pte)
1464 {
1465 	u64 *sptep;
1466 	struct rmap_iterator iter;
1467 	int need_flush = 0;
1468 	u64 new_spte;
1469 	kvm_pfn_t new_pfn;
1470 
1471 	WARN_ON(pte_huge(pte));
1472 	new_pfn = pte_pfn(pte);
1473 
1474 restart:
1475 	for_each_rmap_spte(rmap_head, &iter, sptep) {
1476 		rmap_printk("spte %p %llx gfn %llx (%d)\n",
1477 			    sptep, *sptep, gfn, level);
1478 
1479 		need_flush = 1;
1480 
1481 		if (pte_write(pte)) {
1482 			pte_list_remove(kvm, rmap_head, sptep);
1483 			goto restart;
1484 		} else {
1485 			new_spte = kvm_mmu_changed_pte_notifier_make_spte(
1486 					*sptep, new_pfn);
1487 
1488 			mmu_spte_clear_track_bits(kvm, sptep);
1489 			mmu_spte_set(sptep, new_spte);
1490 		}
1491 	}
1492 
1493 	if (need_flush && kvm_available_flush_tlb_with_range()) {
1494 		kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
1495 		return 0;
1496 	}
1497 
1498 	return need_flush;
1499 }
1500 
1501 struct slot_rmap_walk_iterator {
1502 	/* input fields. */
1503 	const struct kvm_memory_slot *slot;
1504 	gfn_t start_gfn;
1505 	gfn_t end_gfn;
1506 	int start_level;
1507 	int end_level;
1508 
1509 	/* output fields. */
1510 	gfn_t gfn;
1511 	struct kvm_rmap_head *rmap;
1512 	int level;
1513 
1514 	/* private field. */
1515 	struct kvm_rmap_head *end_rmap;
1516 };
1517 
1518 static void
rmap_walk_init_level(struct slot_rmap_walk_iterator * iterator,int level)1519 rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
1520 {
1521 	iterator->level = level;
1522 	iterator->gfn = iterator->start_gfn;
1523 	iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
1524 	iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
1525 }
1526 
1527 static void
slot_rmap_walk_init(struct slot_rmap_walk_iterator * iterator,const struct kvm_memory_slot * slot,int start_level,int end_level,gfn_t start_gfn,gfn_t end_gfn)1528 slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1529 		    const struct kvm_memory_slot *slot, int start_level,
1530 		    int end_level, gfn_t start_gfn, gfn_t end_gfn)
1531 {
1532 	iterator->slot = slot;
1533 	iterator->start_level = start_level;
1534 	iterator->end_level = end_level;
1535 	iterator->start_gfn = start_gfn;
1536 	iterator->end_gfn = end_gfn;
1537 
1538 	rmap_walk_init_level(iterator, iterator->start_level);
1539 }
1540 
slot_rmap_walk_okay(struct slot_rmap_walk_iterator * iterator)1541 static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1542 {
1543 	return !!iterator->rmap;
1544 }
1545 
slot_rmap_walk_next(struct slot_rmap_walk_iterator * iterator)1546 static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1547 {
1548 	if (++iterator->rmap <= iterator->end_rmap) {
1549 		iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
1550 		return;
1551 	}
1552 
1553 	if (++iterator->level > iterator->end_level) {
1554 		iterator->rmap = NULL;
1555 		return;
1556 	}
1557 
1558 	rmap_walk_init_level(iterator, iterator->level);
1559 }
1560 
1561 #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
1562 	   _start_gfn, _end_gfn, _iter_)				\
1563 	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
1564 				 _end_level_, _start_gfn, _end_gfn);	\
1565 	     slot_rmap_walk_okay(_iter_);				\
1566 	     slot_rmap_walk_next(_iter_))
1567 
1568 typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1569 			       struct kvm_memory_slot *slot, gfn_t gfn,
1570 			       int level, pte_t pte);
1571 
kvm_handle_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range,rmap_handler_t handler)1572 static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
1573 						 struct kvm_gfn_range *range,
1574 						 rmap_handler_t handler)
1575 {
1576 	struct slot_rmap_walk_iterator iterator;
1577 	bool ret = false;
1578 
1579 	for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
1580 				 range->start, range->end - 1, &iterator)
1581 		ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
1582 			       iterator.level, range->pte);
1583 
1584 	return ret;
1585 }
1586 
kvm_unmap_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range)1587 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1588 {
1589 	bool flush = false;
1590 
1591 	if (kvm_memslots_have_rmaps(kvm))
1592 		flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
1593 
1594 	if (is_tdp_mmu_enabled(kvm))
1595 		flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
1596 
1597 	return flush;
1598 }
1599 
kvm_set_spte_gfn(struct kvm * kvm,struct kvm_gfn_range * range)1600 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1601 {
1602 	bool flush = false;
1603 
1604 	if (kvm_memslots_have_rmaps(kvm))
1605 		flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp);
1606 
1607 	if (is_tdp_mmu_enabled(kvm))
1608 		flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range);
1609 
1610 	return flush;
1611 }
1612 
kvm_age_rmapp(struct kvm * kvm,struct kvm_rmap_head * rmap_head,struct kvm_memory_slot * slot,gfn_t gfn,int level,pte_t unused)1613 static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1614 			  struct kvm_memory_slot *slot, gfn_t gfn, int level,
1615 			  pte_t unused)
1616 {
1617 	u64 *sptep;
1618 	struct rmap_iterator iter;
1619 	int young = 0;
1620 
1621 	for_each_rmap_spte(rmap_head, &iter, sptep)
1622 		young |= mmu_spte_age(sptep);
1623 
1624 	return young;
1625 }
1626 
kvm_test_age_rmapp(struct kvm * kvm,struct kvm_rmap_head * rmap_head,struct kvm_memory_slot * slot,gfn_t gfn,int level,pte_t unused)1627 static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1628 			       struct kvm_memory_slot *slot, gfn_t gfn,
1629 			       int level, pte_t unused)
1630 {
1631 	u64 *sptep;
1632 	struct rmap_iterator iter;
1633 
1634 	for_each_rmap_spte(rmap_head, &iter, sptep)
1635 		if (is_accessed_spte(*sptep))
1636 			return 1;
1637 	return 0;
1638 }
1639 
1640 #define RMAP_RECYCLE_THRESHOLD 1000
1641 
rmap_recycle(struct kvm_vcpu * vcpu,u64 * spte,gfn_t gfn)1642 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1643 {
1644 	struct kvm_memory_slot *slot;
1645 	struct kvm_rmap_head *rmap_head;
1646 	struct kvm_mmu_page *sp;
1647 
1648 	sp = sptep_to_sp(spte);
1649 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1650 	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1651 
1652 	kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
1653 	kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1654 			KVM_PAGES_PER_HPAGE(sp->role.level));
1655 }
1656 
kvm_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range)1657 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1658 {
1659 	bool young = false;
1660 
1661 	if (kvm_memslots_have_rmaps(kvm))
1662 		young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp);
1663 
1664 	if (is_tdp_mmu_enabled(kvm))
1665 		young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
1666 
1667 	return young;
1668 }
1669 
kvm_test_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range)1670 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1671 {
1672 	bool young = false;
1673 
1674 	if (kvm_memslots_have_rmaps(kvm))
1675 		young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp);
1676 
1677 	if (is_tdp_mmu_enabled(kvm))
1678 		young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
1679 
1680 	return young;
1681 }
1682 
1683 #ifdef MMU_DEBUG
is_empty_shadow_page(u64 * spt)1684 static int is_empty_shadow_page(u64 *spt)
1685 {
1686 	u64 *pos;
1687 	u64 *end;
1688 
1689 	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1690 		if (is_shadow_present_pte(*pos)) {
1691 			printk(KERN_ERR "%s: %p %llx\n", __func__,
1692 			       pos, *pos);
1693 			return 0;
1694 		}
1695 	return 1;
1696 }
1697 #endif
1698 
1699 /*
1700  * This value is the sum of all of the kvm instances's
1701  * kvm->arch.n_used_mmu_pages values.  We need a global,
1702  * aggregate version in order to make the slab shrinker
1703  * faster
1704  */
kvm_mod_used_mmu_pages(struct kvm * kvm,long nr)1705 static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
1706 {
1707 	kvm->arch.n_used_mmu_pages += nr;
1708 	percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1709 }
1710 
kvm_mmu_free_page(struct kvm_mmu_page * sp)1711 static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
1712 {
1713 	MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
1714 	hlist_del(&sp->hash_link);
1715 	list_del(&sp->link);
1716 	free_page((unsigned long)sp->spt);
1717 	if (!sp->role.direct)
1718 		free_page((unsigned long)sp->gfns);
1719 	kmem_cache_free(mmu_page_header_cache, sp);
1720 }
1721 
kvm_page_table_hashfn(gfn_t gfn)1722 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1723 {
1724 	return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1725 }
1726 
mmu_page_add_parent_pte(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,u64 * parent_pte)1727 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1728 				    struct kvm_mmu_page *sp, u64 *parent_pte)
1729 {
1730 	if (!parent_pte)
1731 		return;
1732 
1733 	pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
1734 }
1735 
mmu_page_remove_parent_pte(struct kvm_mmu_page * sp,u64 * parent_pte)1736 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1737 				       u64 *parent_pte)
1738 {
1739 	__pte_list_remove(parent_pte, &sp->parent_ptes);
1740 }
1741 
drop_parent_pte(struct kvm_mmu_page * sp,u64 * parent_pte)1742 static void drop_parent_pte(struct kvm_mmu_page *sp,
1743 			    u64 *parent_pte)
1744 {
1745 	mmu_page_remove_parent_pte(sp, parent_pte);
1746 	mmu_spte_clear_no_track(parent_pte);
1747 }
1748 
kvm_mmu_alloc_page(struct kvm_vcpu * vcpu,int direct)1749 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
1750 {
1751 	struct kvm_mmu_page *sp;
1752 
1753 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
1754 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
1755 	if (!direct)
1756 		sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
1757 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1758 
1759 	/*
1760 	 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
1761 	 * depends on valid pages being added to the head of the list.  See
1762 	 * comments in kvm_zap_obsolete_pages().
1763 	 */
1764 	sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
1765 	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1766 	kvm_mod_used_mmu_pages(vcpu->kvm, +1);
1767 	return sp;
1768 }
1769 
1770 static void mark_unsync(u64 *spte);
kvm_mmu_mark_parents_unsync(struct kvm_mmu_page * sp)1771 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1772 {
1773 	u64 *sptep;
1774 	struct rmap_iterator iter;
1775 
1776 	for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
1777 		mark_unsync(sptep);
1778 	}
1779 }
1780 
mark_unsync(u64 * spte)1781 static void mark_unsync(u64 *spte)
1782 {
1783 	struct kvm_mmu_page *sp;
1784 	unsigned int index;
1785 
1786 	sp = sptep_to_sp(spte);
1787 	index = spte - sp->spt;
1788 	if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1789 		return;
1790 	if (sp->unsync_children++)
1791 		return;
1792 	kvm_mmu_mark_parents_unsync(sp);
1793 }
1794 
nonpaging_sync_page(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp)1795 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1796 			       struct kvm_mmu_page *sp)
1797 {
1798 	return 0;
1799 }
1800 
1801 #define KVM_PAGE_ARRAY_NR 16
1802 
1803 struct kvm_mmu_pages {
1804 	struct mmu_page_and_offset {
1805 		struct kvm_mmu_page *sp;
1806 		unsigned int idx;
1807 	} page[KVM_PAGE_ARRAY_NR];
1808 	unsigned int nr;
1809 };
1810 
mmu_pages_add(struct kvm_mmu_pages * pvec,struct kvm_mmu_page * sp,int idx)1811 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1812 			 int idx)
1813 {
1814 	int i;
1815 
1816 	if (sp->unsync)
1817 		for (i=0; i < pvec->nr; i++)
1818 			if (pvec->page[i].sp == sp)
1819 				return 0;
1820 
1821 	pvec->page[pvec->nr].sp = sp;
1822 	pvec->page[pvec->nr].idx = idx;
1823 	pvec->nr++;
1824 	return (pvec->nr == KVM_PAGE_ARRAY_NR);
1825 }
1826 
clear_unsync_child_bit(struct kvm_mmu_page * sp,int idx)1827 static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
1828 {
1829 	--sp->unsync_children;
1830 	WARN_ON((int)sp->unsync_children < 0);
1831 	__clear_bit(idx, sp->unsync_child_bitmap);
1832 }
1833 
__mmu_unsync_walk(struct kvm_mmu_page * sp,struct kvm_mmu_pages * pvec)1834 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1835 			   struct kvm_mmu_pages *pvec)
1836 {
1837 	int i, ret, nr_unsync_leaf = 0;
1838 
1839 	for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1840 		struct kvm_mmu_page *child;
1841 		u64 ent = sp->spt[i];
1842 
1843 		if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
1844 			clear_unsync_child_bit(sp, i);
1845 			continue;
1846 		}
1847 
1848 		child = to_shadow_page(ent & PT64_BASE_ADDR_MASK);
1849 
1850 		if (child->unsync_children) {
1851 			if (mmu_pages_add(pvec, child, i))
1852 				return -ENOSPC;
1853 
1854 			ret = __mmu_unsync_walk(child, pvec);
1855 			if (!ret) {
1856 				clear_unsync_child_bit(sp, i);
1857 				continue;
1858 			} else if (ret > 0) {
1859 				nr_unsync_leaf += ret;
1860 			} else
1861 				return ret;
1862 		} else if (child->unsync) {
1863 			nr_unsync_leaf++;
1864 			if (mmu_pages_add(pvec, child, i))
1865 				return -ENOSPC;
1866 		} else
1867 			clear_unsync_child_bit(sp, i);
1868 	}
1869 
1870 	return nr_unsync_leaf;
1871 }
1872 
1873 #define INVALID_INDEX (-1)
1874 
mmu_unsync_walk(struct kvm_mmu_page * sp,struct kvm_mmu_pages * pvec)1875 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1876 			   struct kvm_mmu_pages *pvec)
1877 {
1878 	pvec->nr = 0;
1879 	if (!sp->unsync_children)
1880 		return 0;
1881 
1882 	mmu_pages_add(pvec, sp, INVALID_INDEX);
1883 	return __mmu_unsync_walk(sp, pvec);
1884 }
1885 
kvm_unlink_unsync_page(struct kvm * kvm,struct kvm_mmu_page * sp)1886 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1887 {
1888 	WARN_ON(!sp->unsync);
1889 	trace_kvm_mmu_sync_page(sp);
1890 	sp->unsync = 0;
1891 	--kvm->stat.mmu_unsync;
1892 }
1893 
1894 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1895 				     struct list_head *invalid_list);
1896 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1897 				    struct list_head *invalid_list);
1898 
1899 #define for_each_valid_sp(_kvm, _sp, _list)				\
1900 	hlist_for_each_entry(_sp, _list, hash_link)			\
1901 		if (is_obsolete_sp((_kvm), (_sp))) {			\
1902 		} else
1903 
1904 #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)			\
1905 	for_each_valid_sp(_kvm, _sp,					\
1906 	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])	\
1907 		if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
1908 
kvm_sync_page(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,struct list_head * invalid_list)1909 static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1910 			 struct list_head *invalid_list)
1911 {
1912 	if (vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
1913 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1914 		return false;
1915 	}
1916 
1917 	return true;
1918 }
1919 
kvm_mmu_remote_flush_or_zap(struct kvm * kvm,struct list_head * invalid_list,bool remote_flush)1920 static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
1921 					struct list_head *invalid_list,
1922 					bool remote_flush)
1923 {
1924 	if (!remote_flush && list_empty(invalid_list))
1925 		return false;
1926 
1927 	if (!list_empty(invalid_list))
1928 		kvm_mmu_commit_zap_page(kvm, invalid_list);
1929 	else
1930 		kvm_flush_remote_tlbs(kvm);
1931 	return true;
1932 }
1933 
kvm_mmu_flush_or_zap(struct kvm_vcpu * vcpu,struct list_head * invalid_list,bool remote_flush,bool local_flush)1934 static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
1935 				 struct list_head *invalid_list,
1936 				 bool remote_flush, bool local_flush)
1937 {
1938 	if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush))
1939 		return;
1940 
1941 	if (local_flush)
1942 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1943 }
1944 
1945 #ifdef CONFIG_KVM_MMU_AUDIT
1946 #include "mmu_audit.c"
1947 #else
kvm_mmu_audit(struct kvm_vcpu * vcpu,int point)1948 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
mmu_audit_disable(void)1949 static void mmu_audit_disable(void) { }
1950 #endif
1951 
is_obsolete_sp(struct kvm * kvm,struct kvm_mmu_page * sp)1952 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
1953 {
1954 	return sp->role.invalid ||
1955 	       unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
1956 }
1957 
1958 struct mmu_page_path {
1959 	struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
1960 	unsigned int idx[PT64_ROOT_MAX_LEVEL];
1961 };
1962 
1963 #define for_each_sp(pvec, sp, parents, i)			\
1964 		for (i = mmu_pages_first(&pvec, &parents);	\
1965 			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
1966 			i = mmu_pages_next(&pvec, &parents, i))
1967 
mmu_pages_next(struct kvm_mmu_pages * pvec,struct mmu_page_path * parents,int i)1968 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1969 			  struct mmu_page_path *parents,
1970 			  int i)
1971 {
1972 	int n;
1973 
1974 	for (n = i+1; n < pvec->nr; n++) {
1975 		struct kvm_mmu_page *sp = pvec->page[n].sp;
1976 		unsigned idx = pvec->page[n].idx;
1977 		int level = sp->role.level;
1978 
1979 		parents->idx[level-1] = idx;
1980 		if (level == PG_LEVEL_4K)
1981 			break;
1982 
1983 		parents->parent[level-2] = sp;
1984 	}
1985 
1986 	return n;
1987 }
1988 
mmu_pages_first(struct kvm_mmu_pages * pvec,struct mmu_page_path * parents)1989 static int mmu_pages_first(struct kvm_mmu_pages *pvec,
1990 			   struct mmu_page_path *parents)
1991 {
1992 	struct kvm_mmu_page *sp;
1993 	int level;
1994 
1995 	if (pvec->nr == 0)
1996 		return 0;
1997 
1998 	WARN_ON(pvec->page[0].idx != INVALID_INDEX);
1999 
2000 	sp = pvec->page[0].sp;
2001 	level = sp->role.level;
2002 	WARN_ON(level == PG_LEVEL_4K);
2003 
2004 	parents->parent[level-2] = sp;
2005 
2006 	/* Also set up a sentinel.  Further entries in pvec are all
2007 	 * children of sp, so this element is never overwritten.
2008 	 */
2009 	parents->parent[level-1] = NULL;
2010 	return mmu_pages_next(pvec, parents, 0);
2011 }
2012 
mmu_pages_clear_parents(struct mmu_page_path * parents)2013 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
2014 {
2015 	struct kvm_mmu_page *sp;
2016 	unsigned int level = 0;
2017 
2018 	do {
2019 		unsigned int idx = parents->idx[level];
2020 		sp = parents->parent[level];
2021 		if (!sp)
2022 			return;
2023 
2024 		WARN_ON(idx == INVALID_INDEX);
2025 		clear_unsync_child_bit(sp, idx);
2026 		level++;
2027 	} while (!sp->unsync_children);
2028 }
2029 
mmu_sync_children(struct kvm_vcpu * vcpu,struct kvm_mmu_page * parent,bool can_yield)2030 static int mmu_sync_children(struct kvm_vcpu *vcpu,
2031 			     struct kvm_mmu_page *parent, bool can_yield)
2032 {
2033 	int i;
2034 	struct kvm_mmu_page *sp;
2035 	struct mmu_page_path parents;
2036 	struct kvm_mmu_pages pages;
2037 	LIST_HEAD(invalid_list);
2038 	bool flush = false;
2039 
2040 	while (mmu_unsync_walk(parent, &pages)) {
2041 		bool protected = false;
2042 
2043 		for_each_sp(pages, sp, parents, i)
2044 			protected |= rmap_write_protect(vcpu, sp->gfn);
2045 
2046 		if (protected) {
2047 			kvm_flush_remote_tlbs(vcpu->kvm);
2048 			flush = false;
2049 		}
2050 
2051 		for_each_sp(pages, sp, parents, i) {
2052 			kvm_unlink_unsync_page(vcpu->kvm, sp);
2053 			flush |= kvm_sync_page(vcpu, sp, &invalid_list);
2054 			mmu_pages_clear_parents(&parents);
2055 		}
2056 		if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
2057 			kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2058 			if (!can_yield) {
2059 				kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2060 				return -EINTR;
2061 			}
2062 
2063 			cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
2064 			flush = false;
2065 		}
2066 	}
2067 
2068 	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2069 	return 0;
2070 }
2071 
__clear_sp_write_flooding_count(struct kvm_mmu_page * sp)2072 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
2073 {
2074 	atomic_set(&sp->write_flooding_count,  0);
2075 }
2076 
clear_sp_write_flooding_count(u64 * spte)2077 static void clear_sp_write_flooding_count(u64 *spte)
2078 {
2079 	__clear_sp_write_flooding_count(sptep_to_sp(spte));
2080 }
2081 
kvm_mmu_get_page(struct kvm_vcpu * vcpu,gfn_t gfn,gva_t gaddr,unsigned level,int direct,unsigned int access)2082 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
2083 					     gfn_t gfn,
2084 					     gva_t gaddr,
2085 					     unsigned level,
2086 					     int direct,
2087 					     unsigned int access)
2088 {
2089 	bool direct_mmu = vcpu->arch.mmu->direct_map;
2090 	union kvm_mmu_page_role role;
2091 	struct hlist_head *sp_list;
2092 	unsigned quadrant;
2093 	struct kvm_mmu_page *sp;
2094 	int collisions = 0;
2095 	LIST_HEAD(invalid_list);
2096 
2097 	role = vcpu->arch.mmu->mmu_role.base;
2098 	role.level = level;
2099 	role.direct = direct;
2100 	if (role.direct)
2101 		role.gpte_is_8_bytes = true;
2102 	role.access = access;
2103 	if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
2104 		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
2105 		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
2106 		role.quadrant = quadrant;
2107 	}
2108 
2109 	sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2110 	for_each_valid_sp(vcpu->kvm, sp, sp_list) {
2111 		if (sp->gfn != gfn) {
2112 			collisions++;
2113 			continue;
2114 		}
2115 
2116 		if (sp->role.word != role.word) {
2117 			/*
2118 			 * If the guest is creating an upper-level page, zap
2119 			 * unsync pages for the same gfn.  While it's possible
2120 			 * the guest is using recursive page tables, in all
2121 			 * likelihood the guest has stopped using the unsync
2122 			 * page and is installing a completely unrelated page.
2123 			 * Unsync pages must not be left as is, because the new
2124 			 * upper-level page will be write-protected.
2125 			 */
2126 			if (level > PG_LEVEL_4K && sp->unsync)
2127 				kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
2128 							 &invalid_list);
2129 			continue;
2130 		}
2131 
2132 		if (direct_mmu)
2133 			goto trace_get_page;
2134 
2135 		if (sp->unsync) {
2136 			/*
2137 			 * The page is good, but is stale.  kvm_sync_page does
2138 			 * get the latest guest state, but (unlike mmu_unsync_children)
2139 			 * it doesn't write-protect the page or mark it synchronized!
2140 			 * This way the validity of the mapping is ensured, but the
2141 			 * overhead of write protection is not incurred until the
2142 			 * guest invalidates the TLB mapping.  This allows multiple
2143 			 * SPs for a single gfn to be unsync.
2144 			 *
2145 			 * If the sync fails, the page is zapped.  If so, break
2146 			 * in order to rebuild it.
2147 			 */
2148 			if (!kvm_sync_page(vcpu, sp, &invalid_list))
2149 				break;
2150 
2151 			WARN_ON(!list_empty(&invalid_list));
2152 			kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
2153 		}
2154 
2155 		__clear_sp_write_flooding_count(sp);
2156 
2157 trace_get_page:
2158 		trace_kvm_mmu_get_page(sp, false);
2159 		goto out;
2160 	}
2161 
2162 	++vcpu->kvm->stat.mmu_cache_miss;
2163 
2164 	sp = kvm_mmu_alloc_page(vcpu, direct);
2165 
2166 	sp->gfn = gfn;
2167 	sp->role = role;
2168 	hlist_add_head(&sp->hash_link, sp_list);
2169 	if (!direct) {
2170 		account_shadowed(vcpu->kvm, sp);
2171 		if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn))
2172 			kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
2173 	}
2174 	trace_kvm_mmu_get_page(sp, true);
2175 out:
2176 	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2177 
2178 	if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
2179 		vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
2180 	return sp;
2181 }
2182 
shadow_walk_init_using_root(struct kvm_shadow_walk_iterator * iterator,struct kvm_vcpu * vcpu,hpa_t root,u64 addr)2183 static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
2184 					struct kvm_vcpu *vcpu, hpa_t root,
2185 					u64 addr)
2186 {
2187 	iterator->addr = addr;
2188 	iterator->shadow_addr = root;
2189 	iterator->level = vcpu->arch.mmu->shadow_root_level;
2190 
2191 	if (iterator->level == PT64_ROOT_4LEVEL &&
2192 	    vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
2193 	    !vcpu->arch.mmu->direct_map)
2194 		--iterator->level;
2195 
2196 	if (iterator->level == PT32E_ROOT_LEVEL) {
2197 		/*
2198 		 * prev_root is currently only used for 64-bit hosts. So only
2199 		 * the active root_hpa is valid here.
2200 		 */
2201 		BUG_ON(root != vcpu->arch.mmu->root_hpa);
2202 
2203 		iterator->shadow_addr
2204 			= vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2205 		iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
2206 		--iterator->level;
2207 		if (!iterator->shadow_addr)
2208 			iterator->level = 0;
2209 	}
2210 }
2211 
shadow_walk_init(struct kvm_shadow_walk_iterator * iterator,struct kvm_vcpu * vcpu,u64 addr)2212 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2213 			     struct kvm_vcpu *vcpu, u64 addr)
2214 {
2215 	shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
2216 				    addr);
2217 }
2218 
shadow_walk_okay(struct kvm_shadow_walk_iterator * iterator)2219 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2220 {
2221 	if (iterator->level < PG_LEVEL_4K)
2222 		return false;
2223 
2224 	iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
2225 	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2226 	return true;
2227 }
2228 
__shadow_walk_next(struct kvm_shadow_walk_iterator * iterator,u64 spte)2229 static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2230 			       u64 spte)
2231 {
2232 	if (is_last_spte(spte, iterator->level)) {
2233 		iterator->level = 0;
2234 		return;
2235 	}
2236 
2237 	iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2238 	--iterator->level;
2239 }
2240 
shadow_walk_next(struct kvm_shadow_walk_iterator * iterator)2241 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2242 {
2243 	__shadow_walk_next(iterator, *iterator->sptep);
2244 }
2245 
link_shadow_page(struct kvm_vcpu * vcpu,u64 * sptep,struct kvm_mmu_page * sp)2246 static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2247 			     struct kvm_mmu_page *sp)
2248 {
2249 	u64 spte;
2250 
2251 	BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2252 
2253 	spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
2254 
2255 	mmu_spte_set(sptep, spte);
2256 
2257 	mmu_page_add_parent_pte(vcpu, sp, sptep);
2258 
2259 	if (sp->unsync_children || sp->unsync)
2260 		mark_unsync(sptep);
2261 }
2262 
validate_direct_spte(struct kvm_vcpu * vcpu,u64 * sptep,unsigned direct_access)2263 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2264 				   unsigned direct_access)
2265 {
2266 	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2267 		struct kvm_mmu_page *child;
2268 
2269 		/*
2270 		 * For the direct sp, if the guest pte's dirty bit
2271 		 * changed form clean to dirty, it will corrupt the
2272 		 * sp's access: allow writable in the read-only sp,
2273 		 * so we should update the spte at this point to get
2274 		 * a new sp with the correct access.
2275 		 */
2276 		child = to_shadow_page(*sptep & PT64_BASE_ADDR_MASK);
2277 		if (child->role.access == direct_access)
2278 			return;
2279 
2280 		drop_parent_pte(child, sptep);
2281 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
2282 	}
2283 }
2284 
2285 /* Returns the number of zapped non-leaf child shadow pages. */
mmu_page_zap_pte(struct kvm * kvm,struct kvm_mmu_page * sp,u64 * spte,struct list_head * invalid_list)2286 static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
2287 			    u64 *spte, struct list_head *invalid_list)
2288 {
2289 	u64 pte;
2290 	struct kvm_mmu_page *child;
2291 
2292 	pte = *spte;
2293 	if (is_shadow_present_pte(pte)) {
2294 		if (is_last_spte(pte, sp->role.level)) {
2295 			drop_spte(kvm, spte);
2296 		} else {
2297 			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2298 			drop_parent_pte(child, spte);
2299 
2300 			/*
2301 			 * Recursively zap nested TDP SPs, parentless SPs are
2302 			 * unlikely to be used again in the near future.  This
2303 			 * avoids retaining a large number of stale nested SPs.
2304 			 */
2305 			if (tdp_enabled && invalid_list &&
2306 			    child->role.guest_mode && !child->parent_ptes.val)
2307 				return kvm_mmu_prepare_zap_page(kvm, child,
2308 								invalid_list);
2309 		}
2310 	} else if (is_mmio_spte(pte)) {
2311 		mmu_spte_clear_no_track(spte);
2312 	}
2313 	return 0;
2314 }
2315 
kvm_mmu_page_unlink_children(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list)2316 static int kvm_mmu_page_unlink_children(struct kvm *kvm,
2317 					struct kvm_mmu_page *sp,
2318 					struct list_head *invalid_list)
2319 {
2320 	int zapped = 0;
2321 	unsigned i;
2322 
2323 	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2324 		zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list);
2325 
2326 	return zapped;
2327 }
2328 
kvm_mmu_unlink_parents(struct kvm * kvm,struct kvm_mmu_page * sp)2329 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2330 {
2331 	u64 *sptep;
2332 	struct rmap_iterator iter;
2333 
2334 	while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2335 		drop_parent_pte(sp, sptep);
2336 }
2337 
mmu_zap_unsync_children(struct kvm * kvm,struct kvm_mmu_page * parent,struct list_head * invalid_list)2338 static int mmu_zap_unsync_children(struct kvm *kvm,
2339 				   struct kvm_mmu_page *parent,
2340 				   struct list_head *invalid_list)
2341 {
2342 	int i, zapped = 0;
2343 	struct mmu_page_path parents;
2344 	struct kvm_mmu_pages pages;
2345 
2346 	if (parent->role.level == PG_LEVEL_4K)
2347 		return 0;
2348 
2349 	while (mmu_unsync_walk(parent, &pages)) {
2350 		struct kvm_mmu_page *sp;
2351 
2352 		for_each_sp(pages, sp, parents, i) {
2353 			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2354 			mmu_pages_clear_parents(&parents);
2355 			zapped++;
2356 		}
2357 	}
2358 
2359 	return zapped;
2360 }
2361 
__kvm_mmu_prepare_zap_page(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list,int * nr_zapped)2362 static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
2363 				       struct kvm_mmu_page *sp,
2364 				       struct list_head *invalid_list,
2365 				       int *nr_zapped)
2366 {
2367 	bool list_unstable;
2368 
2369 	trace_kvm_mmu_prepare_zap_page(sp);
2370 	++kvm->stat.mmu_shadow_zapped;
2371 	*nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
2372 	*nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2373 	kvm_mmu_unlink_parents(kvm, sp);
2374 
2375 	/* Zapping children means active_mmu_pages has become unstable. */
2376 	list_unstable = *nr_zapped;
2377 
2378 	if (!sp->role.invalid && !sp->role.direct)
2379 		unaccount_shadowed(kvm, sp);
2380 
2381 	if (sp->unsync)
2382 		kvm_unlink_unsync_page(kvm, sp);
2383 	if (!sp->root_count) {
2384 		/* Count self */
2385 		(*nr_zapped)++;
2386 
2387 		/*
2388 		 * Already invalid pages (previously active roots) are not on
2389 		 * the active page list.  See list_del() in the "else" case of
2390 		 * !sp->root_count.
2391 		 */
2392 		if (sp->role.invalid)
2393 			list_add(&sp->link, invalid_list);
2394 		else
2395 			list_move(&sp->link, invalid_list);
2396 		kvm_mod_used_mmu_pages(kvm, -1);
2397 	} else {
2398 		/*
2399 		 * Remove the active root from the active page list, the root
2400 		 * will be explicitly freed when the root_count hits zero.
2401 		 */
2402 		list_del(&sp->link);
2403 
2404 		/*
2405 		 * Obsolete pages cannot be used on any vCPUs, see the comment
2406 		 * in kvm_mmu_zap_all_fast().  Note, is_obsolete_sp() also
2407 		 * treats invalid shadow pages as being obsolete.
2408 		 */
2409 		if (!is_obsolete_sp(kvm, sp))
2410 			kvm_reload_remote_mmus(kvm);
2411 	}
2412 
2413 	if (sp->lpage_disallowed)
2414 		unaccount_huge_nx_page(kvm, sp);
2415 
2416 	sp->role.invalid = 1;
2417 	return list_unstable;
2418 }
2419 
kvm_mmu_prepare_zap_page(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list)2420 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2421 				     struct list_head *invalid_list)
2422 {
2423 	int nr_zapped;
2424 
2425 	__kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
2426 	return nr_zapped;
2427 }
2428 
kvm_mmu_commit_zap_page(struct kvm * kvm,struct list_head * invalid_list)2429 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2430 				    struct list_head *invalid_list)
2431 {
2432 	struct kvm_mmu_page *sp, *nsp;
2433 
2434 	if (list_empty(invalid_list))
2435 		return;
2436 
2437 	/*
2438 	 * We need to make sure everyone sees our modifications to
2439 	 * the page tables and see changes to vcpu->mode here. The barrier
2440 	 * in the kvm_flush_remote_tlbs() achieves this. This pairs
2441 	 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
2442 	 *
2443 	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
2444 	 * guest mode and/or lockless shadow page table walks.
2445 	 */
2446 	kvm_flush_remote_tlbs(kvm);
2447 
2448 	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2449 		WARN_ON(!sp->role.invalid || sp->root_count);
2450 		kvm_mmu_free_page(sp);
2451 	}
2452 }
2453 
kvm_mmu_zap_oldest_mmu_pages(struct kvm * kvm,unsigned long nr_to_zap)2454 static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
2455 						  unsigned long nr_to_zap)
2456 {
2457 	unsigned long total_zapped = 0;
2458 	struct kvm_mmu_page *sp, *tmp;
2459 	LIST_HEAD(invalid_list);
2460 	bool unstable;
2461 	int nr_zapped;
2462 
2463 	if (list_empty(&kvm->arch.active_mmu_pages))
2464 		return 0;
2465 
2466 restart:
2467 	list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
2468 		/*
2469 		 * Don't zap active root pages, the page itself can't be freed
2470 		 * and zapping it will just force vCPUs to realloc and reload.
2471 		 */
2472 		if (sp->root_count)
2473 			continue;
2474 
2475 		unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
2476 						      &nr_zapped);
2477 		total_zapped += nr_zapped;
2478 		if (total_zapped >= nr_to_zap)
2479 			break;
2480 
2481 		if (unstable)
2482 			goto restart;
2483 	}
2484 
2485 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2486 
2487 	kvm->stat.mmu_recycled += total_zapped;
2488 	return total_zapped;
2489 }
2490 
kvm_mmu_available_pages(struct kvm * kvm)2491 static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
2492 {
2493 	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2494 		return kvm->arch.n_max_mmu_pages -
2495 			kvm->arch.n_used_mmu_pages;
2496 
2497 	return 0;
2498 }
2499 
make_mmu_pages_available(struct kvm_vcpu * vcpu)2500 static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
2501 {
2502 	unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
2503 
2504 	if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2505 		return 0;
2506 
2507 	kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
2508 
2509 	/*
2510 	 * Note, this check is intentionally soft, it only guarantees that one
2511 	 * page is available, while the caller may end up allocating as many as
2512 	 * four pages, e.g. for PAE roots or for 5-level paging.  Temporarily
2513 	 * exceeding the (arbitrary by default) limit will not harm the host,
2514 	 * being too aggressive may unnecessarily kill the guest, and getting an
2515 	 * exact count is far more trouble than it's worth, especially in the
2516 	 * page fault paths.
2517 	 */
2518 	if (!kvm_mmu_available_pages(vcpu->kvm))
2519 		return -ENOSPC;
2520 	return 0;
2521 }
2522 
2523 /*
2524  * Changing the number of mmu pages allocated to the vm
2525  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2526  */
kvm_mmu_change_mmu_pages(struct kvm * kvm,unsigned long goal_nr_mmu_pages)2527 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2528 {
2529 	write_lock(&kvm->mmu_lock);
2530 
2531 	if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2532 		kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
2533 						  goal_nr_mmu_pages);
2534 
2535 		goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2536 	}
2537 
2538 	kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2539 
2540 	write_unlock(&kvm->mmu_lock);
2541 }
2542 
kvm_mmu_unprotect_page(struct kvm * kvm,gfn_t gfn)2543 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2544 {
2545 	struct kvm_mmu_page *sp;
2546 	LIST_HEAD(invalid_list);
2547 	int r;
2548 
2549 	pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2550 	r = 0;
2551 	write_lock(&kvm->mmu_lock);
2552 	for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2553 		pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2554 			 sp->role.word);
2555 		r = 1;
2556 		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2557 	}
2558 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2559 	write_unlock(&kvm->mmu_lock);
2560 
2561 	return r;
2562 }
2563 
kvm_mmu_unprotect_page_virt(struct kvm_vcpu * vcpu,gva_t gva)2564 static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2565 {
2566 	gpa_t gpa;
2567 	int r;
2568 
2569 	if (vcpu->arch.mmu->direct_map)
2570 		return 0;
2571 
2572 	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
2573 
2574 	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2575 
2576 	return r;
2577 }
2578 
kvm_unsync_page(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp)2579 static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2580 {
2581 	trace_kvm_mmu_unsync_page(sp);
2582 	++vcpu->kvm->stat.mmu_unsync;
2583 	sp->unsync = 1;
2584 
2585 	kvm_mmu_mark_parents_unsync(sp);
2586 }
2587 
2588 /*
2589  * Attempt to unsync any shadow pages that can be reached by the specified gfn,
2590  * KVM is creating a writable mapping for said gfn.  Returns 0 if all pages
2591  * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
2592  * be write-protected.
2593  */
mmu_try_to_unsync_pages(struct kvm_vcpu * vcpu,gfn_t gfn,bool can_unsync)2594 int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
2595 {
2596 	struct kvm_mmu_page *sp;
2597 	bool locked = false;
2598 
2599 	/*
2600 	 * Force write-protection if the page is being tracked.  Note, the page
2601 	 * track machinery is used to write-protect upper-level shadow pages,
2602 	 * i.e. this guards the role.level == 4K assertion below!
2603 	 */
2604 	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
2605 		return -EPERM;
2606 
2607 	/*
2608 	 * The page is not write-tracked, mark existing shadow pages unsync
2609 	 * unless KVM is synchronizing an unsync SP (can_unsync = false).  In
2610 	 * that case, KVM must complete emulation of the guest TLB flush before
2611 	 * allowing shadow pages to become unsync (writable by the guest).
2612 	 */
2613 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
2614 		if (!can_unsync)
2615 			return -EPERM;
2616 
2617 		if (sp->unsync)
2618 			continue;
2619 
2620 		/*
2621 		 * TDP MMU page faults require an additional spinlock as they
2622 		 * run with mmu_lock held for read, not write, and the unsync
2623 		 * logic is not thread safe.  Take the spinklock regardless of
2624 		 * the MMU type to avoid extra conditionals/parameters, there's
2625 		 * no meaningful penalty if mmu_lock is held for write.
2626 		 */
2627 		if (!locked) {
2628 			locked = true;
2629 			spin_lock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
2630 
2631 			/*
2632 			 * Recheck after taking the spinlock, a different vCPU
2633 			 * may have since marked the page unsync.  A false
2634 			 * positive on the unprotected check above is not
2635 			 * possible as clearing sp->unsync _must_ hold mmu_lock
2636 			 * for write, i.e. unsync cannot transition from 0->1
2637 			 * while this CPU holds mmu_lock for read (or write).
2638 			 */
2639 			if (READ_ONCE(sp->unsync))
2640 				continue;
2641 		}
2642 
2643 		WARN_ON(sp->role.level != PG_LEVEL_4K);
2644 		kvm_unsync_page(vcpu, sp);
2645 	}
2646 	if (locked)
2647 		spin_unlock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
2648 
2649 	/*
2650 	 * We need to ensure that the marking of unsync pages is visible
2651 	 * before the SPTE is updated to allow writes because
2652 	 * kvm_mmu_sync_roots() checks the unsync flags without holding
2653 	 * the MMU lock and so can race with this. If the SPTE was updated
2654 	 * before the page had been marked as unsync-ed, something like the
2655 	 * following could happen:
2656 	 *
2657 	 * CPU 1                    CPU 2
2658 	 * ---------------------------------------------------------------------
2659 	 * 1.2 Host updates SPTE
2660 	 *     to be writable
2661 	 *                      2.1 Guest writes a GPTE for GVA X.
2662 	 *                          (GPTE being in the guest page table shadowed
2663 	 *                           by the SP from CPU 1.)
2664 	 *                          This reads SPTE during the page table walk.
2665 	 *                          Since SPTE.W is read as 1, there is no
2666 	 *                          fault.
2667 	 *
2668 	 *                      2.2 Guest issues TLB flush.
2669 	 *                          That causes a VM Exit.
2670 	 *
2671 	 *                      2.3 Walking of unsync pages sees sp->unsync is
2672 	 *                          false and skips the page.
2673 	 *
2674 	 *                      2.4 Guest accesses GVA X.
2675 	 *                          Since the mapping in the SP was not updated,
2676 	 *                          so the old mapping for GVA X incorrectly
2677 	 *                          gets used.
2678 	 * 1.1 Host marks SP
2679 	 *     as unsync
2680 	 *     (sp->unsync = true)
2681 	 *
2682 	 * The write barrier below ensures that 1.1 happens before 1.2 and thus
2683 	 * the situation in 2.4 does not arise. The implicit barrier in 2.2
2684 	 * pairs with this write barrier.
2685 	 */
2686 	smp_wmb();
2687 
2688 	return 0;
2689 }
2690 
set_spte(struct kvm_vcpu * vcpu,u64 * sptep,unsigned int pte_access,int level,gfn_t gfn,kvm_pfn_t pfn,bool speculative,bool can_unsync,bool host_writable)2691 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2692 		    unsigned int pte_access, int level,
2693 		    gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2694 		    bool can_unsync, bool host_writable)
2695 {
2696 	u64 spte;
2697 	struct kvm_mmu_page *sp;
2698 	int ret;
2699 
2700 	sp = sptep_to_sp(sptep);
2701 
2702 	ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
2703 			can_unsync, host_writable, sp_ad_disabled(sp), &spte);
2704 
2705 	if (spte & PT_WRITABLE_MASK)
2706 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
2707 
2708 	if (*sptep == spte)
2709 		ret |= SET_SPTE_SPURIOUS;
2710 	else if (mmu_spte_update(sptep, spte))
2711 		ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
2712 	return ret;
2713 }
2714 
mmu_set_spte(struct kvm_vcpu * vcpu,u64 * sptep,unsigned int pte_access,bool write_fault,int level,gfn_t gfn,kvm_pfn_t pfn,bool speculative,bool host_writable)2715 static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2716 			unsigned int pte_access, bool write_fault, int level,
2717 			gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2718 			bool host_writable)
2719 {
2720 	int was_rmapped = 0;
2721 	int rmap_count;
2722 	int set_spte_ret;
2723 	int ret = RET_PF_FIXED;
2724 	bool flush = false;
2725 
2726 	pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
2727 		 *sptep, write_fault, gfn);
2728 
2729 	if (unlikely(is_noslot_pfn(pfn))) {
2730 		mark_mmio_spte(vcpu, sptep, gfn, pte_access);
2731 		return RET_PF_EMULATE;
2732 	}
2733 
2734 	if (is_shadow_present_pte(*sptep)) {
2735 		/*
2736 		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2737 		 * the parent of the now unreachable PTE.
2738 		 */
2739 		if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
2740 			struct kvm_mmu_page *child;
2741 			u64 pte = *sptep;
2742 
2743 			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2744 			drop_parent_pte(child, sptep);
2745 			flush = true;
2746 		} else if (pfn != spte_to_pfn(*sptep)) {
2747 			pgprintk("hfn old %llx new %llx\n",
2748 				 spte_to_pfn(*sptep), pfn);
2749 			drop_spte(vcpu->kvm, sptep);
2750 			flush = true;
2751 		} else
2752 			was_rmapped = 1;
2753 	}
2754 
2755 	set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
2756 				speculative, true, host_writable);
2757 	if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
2758 		if (write_fault)
2759 			ret = RET_PF_EMULATE;
2760 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
2761 	}
2762 
2763 	if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
2764 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
2765 				KVM_PAGES_PER_HPAGE(level));
2766 
2767 	/*
2768 	 * The fault is fully spurious if and only if the new SPTE and old SPTE
2769 	 * are identical, and emulation is not required.
2770 	 */
2771 	if ((set_spte_ret & SET_SPTE_SPURIOUS) && ret == RET_PF_FIXED) {
2772 		WARN_ON_ONCE(!was_rmapped);
2773 		return RET_PF_SPURIOUS;
2774 	}
2775 
2776 	pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2777 	trace_kvm_mmu_set_spte(level, gfn, sptep);
2778 
2779 	if (!was_rmapped) {
2780 		kvm_update_page_stats(vcpu->kvm, level, 1);
2781 		rmap_count = rmap_add(vcpu, sptep, gfn);
2782 		if (rmap_count > RMAP_RECYCLE_THRESHOLD)
2783 			rmap_recycle(vcpu, sptep, gfn);
2784 	}
2785 
2786 	return ret;
2787 }
2788 
pte_prefetch_gfn_to_pfn(struct kvm_vcpu * vcpu,gfn_t gfn,bool no_dirty_log)2789 static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2790 				     bool no_dirty_log)
2791 {
2792 	struct kvm_memory_slot *slot;
2793 
2794 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
2795 	if (!slot)
2796 		return KVM_PFN_ERR_FAULT;
2797 
2798 	return gfn_to_pfn_memslot_atomic(slot, gfn);
2799 }
2800 
direct_pte_prefetch_many(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,u64 * start,u64 * end)2801 static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2802 				    struct kvm_mmu_page *sp,
2803 				    u64 *start, u64 *end)
2804 {
2805 	struct page *pages[PTE_PREFETCH_NUM];
2806 	struct kvm_memory_slot *slot;
2807 	unsigned int access = sp->role.access;
2808 	int i, ret;
2809 	gfn_t gfn;
2810 
2811 	gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2812 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2813 	if (!slot)
2814 		return -1;
2815 
2816 	ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2817 	if (ret <= 0)
2818 		return -1;
2819 
2820 	for (i = 0; i < ret; i++, gfn++, start++) {
2821 		mmu_set_spte(vcpu, start, access, false, sp->role.level, gfn,
2822 			     page_to_pfn(pages[i]), true, true);
2823 		put_page(pages[i]);
2824 	}
2825 
2826 	return 0;
2827 }
2828 
__direct_pte_prefetch(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,u64 * sptep)2829 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2830 				  struct kvm_mmu_page *sp, u64 *sptep)
2831 {
2832 	u64 *spte, *start = NULL;
2833 	int i;
2834 
2835 	WARN_ON(!sp->role.direct);
2836 
2837 	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
2838 	spte = sp->spt + i;
2839 
2840 	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2841 		if (is_shadow_present_pte(*spte) || spte == sptep) {
2842 			if (!start)
2843 				continue;
2844 			if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2845 				break;
2846 			start = NULL;
2847 		} else if (!start)
2848 			start = spte;
2849 	}
2850 }
2851 
direct_pte_prefetch(struct kvm_vcpu * vcpu,u64 * sptep)2852 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2853 {
2854 	struct kvm_mmu_page *sp;
2855 
2856 	sp = sptep_to_sp(sptep);
2857 
2858 	/*
2859 	 * Without accessed bits, there's no way to distinguish between
2860 	 * actually accessed translations and prefetched, so disable pte
2861 	 * prefetch if accessed bits aren't available.
2862 	 */
2863 	if (sp_ad_disabled(sp))
2864 		return;
2865 
2866 	if (sp->role.level > PG_LEVEL_4K)
2867 		return;
2868 
2869 	/*
2870 	 * If addresses are being invalidated, skip prefetching to avoid
2871 	 * accidentally prefetching those addresses.
2872 	 */
2873 	if (unlikely(vcpu->kvm->mmu_notifier_count))
2874 		return;
2875 
2876 	__direct_pte_prefetch(vcpu, sp, sptep);
2877 }
2878 
host_pfn_mapping_level(struct kvm * kvm,gfn_t gfn,kvm_pfn_t pfn,const struct kvm_memory_slot * slot)2879 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
2880 				  const struct kvm_memory_slot *slot)
2881 {
2882 	unsigned long hva;
2883 	pte_t *pte;
2884 	int level;
2885 
2886 	if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn))
2887 		return PG_LEVEL_4K;
2888 
2889 	/*
2890 	 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
2891 	 * is not solely for performance, it's also necessary to avoid the
2892 	 * "writable" check in __gfn_to_hva_many(), which will always fail on
2893 	 * read-only memslots due to gfn_to_hva() assuming writes.  Earlier
2894 	 * page fault steps have already verified the guest isn't writing a
2895 	 * read-only memslot.
2896 	 */
2897 	hva = __gfn_to_hva_memslot(slot, gfn);
2898 
2899 	pte = lookup_address_in_mm(kvm->mm, hva, &level);
2900 	if (unlikely(!pte))
2901 		return PG_LEVEL_4K;
2902 
2903 	return level;
2904 }
2905 
kvm_mmu_max_mapping_level(struct kvm * kvm,const struct kvm_memory_slot * slot,gfn_t gfn,kvm_pfn_t pfn,int max_level)2906 int kvm_mmu_max_mapping_level(struct kvm *kvm,
2907 			      const struct kvm_memory_slot *slot, gfn_t gfn,
2908 			      kvm_pfn_t pfn, int max_level)
2909 {
2910 	struct kvm_lpage_info *linfo;
2911 	int host_level;
2912 
2913 	max_level = min(max_level, max_huge_page_level);
2914 	for ( ; max_level > PG_LEVEL_4K; max_level--) {
2915 		linfo = lpage_info_slot(gfn, slot, max_level);
2916 		if (!linfo->disallow_lpage)
2917 			break;
2918 	}
2919 
2920 	if (max_level == PG_LEVEL_4K)
2921 		return PG_LEVEL_4K;
2922 
2923 	host_level = host_pfn_mapping_level(kvm, gfn, pfn, slot);
2924 	return min(host_level, max_level);
2925 }
2926 
kvm_mmu_hugepage_adjust(struct kvm_vcpu * vcpu,gfn_t gfn,int max_level,kvm_pfn_t * pfnp,bool huge_page_disallowed,int * req_level)2927 int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
2928 			    int max_level, kvm_pfn_t *pfnp,
2929 			    bool huge_page_disallowed, int *req_level)
2930 {
2931 	struct kvm_memory_slot *slot;
2932 	kvm_pfn_t pfn = *pfnp;
2933 	kvm_pfn_t mask;
2934 	int level;
2935 
2936 	*req_level = PG_LEVEL_4K;
2937 
2938 	if (unlikely(max_level == PG_LEVEL_4K))
2939 		return PG_LEVEL_4K;
2940 
2941 	if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn))
2942 		return PG_LEVEL_4K;
2943 
2944 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, true);
2945 	if (!slot)
2946 		return PG_LEVEL_4K;
2947 
2948 	/*
2949 	 * Enforce the iTLB multihit workaround after capturing the requested
2950 	 * level, which will be used to do precise, accurate accounting.
2951 	 */
2952 	*req_level = level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level);
2953 	if (level == PG_LEVEL_4K || huge_page_disallowed)
2954 		return PG_LEVEL_4K;
2955 
2956 	/*
2957 	 * mmu_notifier_retry() was successful and mmu_lock is held, so
2958 	 * the pmd can't be split from under us.
2959 	 */
2960 	mask = KVM_PAGES_PER_HPAGE(level) - 1;
2961 	VM_BUG_ON((gfn & mask) != (pfn & mask));
2962 	*pfnp = pfn & ~mask;
2963 
2964 	return level;
2965 }
2966 
disallowed_hugepage_adjust(u64 spte,gfn_t gfn,int cur_level,kvm_pfn_t * pfnp,int * goal_levelp)2967 void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
2968 				kvm_pfn_t *pfnp, int *goal_levelp)
2969 {
2970 	int level = *goal_levelp;
2971 
2972 	if (cur_level == level && level > PG_LEVEL_4K &&
2973 	    is_shadow_present_pte(spte) &&
2974 	    !is_large_pte(spte)) {
2975 		/*
2976 		 * A small SPTE exists for this pfn, but FNAME(fetch)
2977 		 * and __direct_map would like to create a large PTE
2978 		 * instead: just force them to go down another level,
2979 		 * patching back for them into pfn the next 9 bits of
2980 		 * the address.
2981 		 */
2982 		u64 page_mask = KVM_PAGES_PER_HPAGE(level) -
2983 				KVM_PAGES_PER_HPAGE(level - 1);
2984 		*pfnp |= gfn & page_mask;
2985 		(*goal_levelp)--;
2986 	}
2987 }
2988 
__direct_map(struct kvm_vcpu * vcpu,gpa_t gpa,u32 error_code,int map_writable,int max_level,kvm_pfn_t pfn,bool prefault,bool is_tdp)2989 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
2990 			int map_writable, int max_level, kvm_pfn_t pfn,
2991 			bool prefault, bool is_tdp)
2992 {
2993 	bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
2994 	bool write = error_code & PFERR_WRITE_MASK;
2995 	bool exec = error_code & PFERR_FETCH_MASK;
2996 	bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
2997 	struct kvm_shadow_walk_iterator it;
2998 	struct kvm_mmu_page *sp;
2999 	int level, req_level, ret;
3000 	gfn_t gfn = gpa >> PAGE_SHIFT;
3001 	gfn_t base_gfn = gfn;
3002 
3003 	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
3004 					huge_page_disallowed, &req_level);
3005 
3006 	trace_kvm_mmu_spte_requested(gpa, level, pfn);
3007 	for_each_shadow_entry(vcpu, gpa, it) {
3008 		/*
3009 		 * We cannot overwrite existing page tables with an NX
3010 		 * large page, as the leaf could be executable.
3011 		 */
3012 		if (nx_huge_page_workaround_enabled)
3013 			disallowed_hugepage_adjust(*it.sptep, gfn, it.level,
3014 						   &pfn, &level);
3015 
3016 		base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
3017 		if (it.level == level)
3018 			break;
3019 
3020 		drop_large_spte(vcpu, it.sptep);
3021 		if (is_shadow_present_pte(*it.sptep))
3022 			continue;
3023 
3024 		sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
3025 				      it.level - 1, true, ACC_ALL);
3026 
3027 		link_shadow_page(vcpu, it.sptep, sp);
3028 		if (is_tdp && huge_page_disallowed &&
3029 		    req_level >= it.level)
3030 			account_huge_nx_page(vcpu->kvm, sp);
3031 	}
3032 
3033 	ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
3034 			   write, level, base_gfn, pfn, prefault,
3035 			   map_writable);
3036 	if (ret == RET_PF_SPURIOUS)
3037 		return ret;
3038 
3039 	direct_pte_prefetch(vcpu, it.sptep);
3040 	++vcpu->stat.pf_fixed;
3041 	return ret;
3042 }
3043 
kvm_send_hwpoison_signal(unsigned long address,struct task_struct * tsk)3044 static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
3045 {
3046 	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
3047 }
3048 
kvm_handle_bad_page(struct kvm_vcpu * vcpu,gfn_t gfn,kvm_pfn_t pfn)3049 static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
3050 {
3051 	/*
3052 	 * Do not cache the mmio info caused by writing the readonly gfn
3053 	 * into the spte otherwise read access on readonly gfn also can
3054 	 * caused mmio page fault and treat it as mmio access.
3055 	 */
3056 	if (pfn == KVM_PFN_ERR_RO_FAULT)
3057 		return RET_PF_EMULATE;
3058 
3059 	if (pfn == KVM_PFN_ERR_HWPOISON) {
3060 		kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
3061 		return RET_PF_RETRY;
3062 	}
3063 
3064 	return -EFAULT;
3065 }
3066 
handle_abnormal_pfn(struct kvm_vcpu * vcpu,gva_t gva,gfn_t gfn,kvm_pfn_t pfn,unsigned int access,int * ret_val)3067 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
3068 				kvm_pfn_t pfn, unsigned int access,
3069 				int *ret_val)
3070 {
3071 	/* The pfn is invalid, report the error! */
3072 	if (unlikely(is_error_pfn(pfn))) {
3073 		*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
3074 		return true;
3075 	}
3076 
3077 	if (unlikely(is_noslot_pfn(pfn))) {
3078 		vcpu_cache_mmio_info(vcpu, gva, gfn,
3079 				     access & shadow_mmio_access_mask);
3080 		/*
3081 		 * If MMIO caching is disabled, emulate immediately without
3082 		 * touching the shadow page tables as attempting to install an
3083 		 * MMIO SPTE will just be an expensive nop.
3084 		 */
3085 		if (unlikely(!shadow_mmio_value)) {
3086 			*ret_val = RET_PF_EMULATE;
3087 			return true;
3088 		}
3089 	}
3090 
3091 	return false;
3092 }
3093 
page_fault_can_be_fast(u32 error_code)3094 static bool page_fault_can_be_fast(u32 error_code)
3095 {
3096 	/*
3097 	 * Do not fix the mmio spte with invalid generation number which
3098 	 * need to be updated by slow page fault path.
3099 	 */
3100 	if (unlikely(error_code & PFERR_RSVD_MASK))
3101 		return false;
3102 
3103 	/* See if the page fault is due to an NX violation */
3104 	if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))
3105 		      == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))))
3106 		return false;
3107 
3108 	/*
3109 	 * #PF can be fast if:
3110 	 * 1. The shadow page table entry is not present, which could mean that
3111 	 *    the fault is potentially caused by access tracking (if enabled).
3112 	 * 2. The shadow page table entry is present and the fault
3113 	 *    is caused by write-protect, that means we just need change the W
3114 	 *    bit of the spte which can be done out of mmu-lock.
3115 	 *
3116 	 * However, if access tracking is disabled we know that a non-present
3117 	 * page must be a genuine page fault where we have to create a new SPTE.
3118 	 * So, if access tracking is disabled, we return true only for write
3119 	 * accesses to a present page.
3120 	 */
3121 
3122 	return shadow_acc_track_mask != 0 ||
3123 	       ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK))
3124 		== (PFERR_WRITE_MASK | PFERR_PRESENT_MASK));
3125 }
3126 
3127 /*
3128  * Returns true if the SPTE was fixed successfully. Otherwise,
3129  * someone else modified the SPTE from its original value.
3130  */
3131 static bool
fast_pf_fix_direct_spte(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,u64 * sptep,u64 old_spte,u64 new_spte)3132 fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
3133 			u64 *sptep, u64 old_spte, u64 new_spte)
3134 {
3135 	gfn_t gfn;
3136 
3137 	WARN_ON(!sp->role.direct);
3138 
3139 	/*
3140 	 * Theoretically we could also set dirty bit (and flush TLB) here in
3141 	 * order to eliminate unnecessary PML logging. See comments in
3142 	 * set_spte. But fast_page_fault is very unlikely to happen with PML
3143 	 * enabled, so we do not do this. This might result in the same GPA
3144 	 * to be logged in PML buffer again when the write really happens, and
3145 	 * eventually to be called by mark_page_dirty twice. But it's also no
3146 	 * harm. This also avoids the TLB flush needed after setting dirty bit
3147 	 * so non-PML cases won't be impacted.
3148 	 *
3149 	 * Compare with set_spte where instead shadow_dirty_mask is set.
3150 	 */
3151 	if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
3152 		return false;
3153 
3154 	if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) {
3155 		/*
3156 		 * The gfn of direct spte is stable since it is
3157 		 * calculated by sp->gfn.
3158 		 */
3159 		gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
3160 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
3161 	}
3162 
3163 	return true;
3164 }
3165 
is_access_allowed(u32 fault_err_code,u64 spte)3166 static bool is_access_allowed(u32 fault_err_code, u64 spte)
3167 {
3168 	if (fault_err_code & PFERR_FETCH_MASK)
3169 		return is_executable_pte(spte);
3170 
3171 	if (fault_err_code & PFERR_WRITE_MASK)
3172 		return is_writable_pte(spte);
3173 
3174 	/* Fault was on Read access */
3175 	return spte & PT_PRESENT_MASK;
3176 }
3177 
3178 /*
3179  * Returns the last level spte pointer of the shadow page walk for the given
3180  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
3181  * walk could be performed, returns NULL and *spte does not contain valid data.
3182  *
3183  * Contract:
3184  *  - Must be called between walk_shadow_page_lockless_{begin,end}.
3185  *  - The returned sptep must not be used after walk_shadow_page_lockless_end.
3186  */
fast_pf_get_last_sptep(struct kvm_vcpu * vcpu,gpa_t gpa,u64 * spte)3187 static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
3188 {
3189 	struct kvm_shadow_walk_iterator iterator;
3190 	u64 old_spte;
3191 	u64 *sptep = NULL;
3192 
3193 	for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) {
3194 		sptep = iterator.sptep;
3195 		*spte = old_spte;
3196 
3197 		if (!is_shadow_present_pte(old_spte))
3198 			break;
3199 	}
3200 
3201 	return sptep;
3202 }
3203 
3204 /*
3205  * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
3206  */
fast_page_fault(struct kvm_vcpu * vcpu,gpa_t gpa,u32 error_code)3207 static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code)
3208 {
3209 	struct kvm_mmu_page *sp;
3210 	int ret = RET_PF_INVALID;
3211 	u64 spte = 0ull;
3212 	u64 *sptep = NULL;
3213 	uint retry_count = 0;
3214 
3215 	if (!page_fault_can_be_fast(error_code))
3216 		return ret;
3217 
3218 	walk_shadow_page_lockless_begin(vcpu);
3219 
3220 	do {
3221 		u64 new_spte;
3222 
3223 		if (is_tdp_mmu(vcpu->arch.mmu))
3224 			sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, gpa, &spte);
3225 		else
3226 			sptep = fast_pf_get_last_sptep(vcpu, gpa, &spte);
3227 
3228 		if (!is_shadow_present_pte(spte))
3229 			break;
3230 
3231 		sp = sptep_to_sp(sptep);
3232 		if (!is_last_spte(spte, sp->role.level))
3233 			break;
3234 
3235 		/*
3236 		 * Check whether the memory access that caused the fault would
3237 		 * still cause it if it were to be performed right now. If not,
3238 		 * then this is a spurious fault caused by TLB lazily flushed,
3239 		 * or some other CPU has already fixed the PTE after the
3240 		 * current CPU took the fault.
3241 		 *
3242 		 * Need not check the access of upper level table entries since
3243 		 * they are always ACC_ALL.
3244 		 */
3245 		if (is_access_allowed(error_code, spte)) {
3246 			ret = RET_PF_SPURIOUS;
3247 			break;
3248 		}
3249 
3250 		new_spte = spte;
3251 
3252 		if (is_access_track_spte(spte))
3253 			new_spte = restore_acc_track_spte(new_spte);
3254 
3255 		/*
3256 		 * Currently, to simplify the code, write-protection can
3257 		 * be removed in the fast path only if the SPTE was
3258 		 * write-protected for dirty-logging or access tracking.
3259 		 */
3260 		if ((error_code & PFERR_WRITE_MASK) &&
3261 		    spte_can_locklessly_be_made_writable(spte)) {
3262 			new_spte |= PT_WRITABLE_MASK;
3263 
3264 			/*
3265 			 * Do not fix write-permission on the large spte.  Since
3266 			 * we only dirty the first page into the dirty-bitmap in
3267 			 * fast_pf_fix_direct_spte(), other pages are missed
3268 			 * if its slot has dirty logging enabled.
3269 			 *
3270 			 * Instead, we let the slow page fault path create a
3271 			 * normal spte to fix the access.
3272 			 *
3273 			 * See the comments in kvm_arch_commit_memory_region().
3274 			 */
3275 			if (sp->role.level > PG_LEVEL_4K)
3276 				break;
3277 		}
3278 
3279 		/* Verify that the fault can be handled in the fast path */
3280 		if (new_spte == spte ||
3281 		    !is_access_allowed(error_code, new_spte))
3282 			break;
3283 
3284 		/*
3285 		 * Currently, fast page fault only works for direct mapping
3286 		 * since the gfn is not stable for indirect shadow page. See
3287 		 * Documentation/virt/kvm/locking.rst to get more detail.
3288 		 */
3289 		if (fast_pf_fix_direct_spte(vcpu, sp, sptep, spte, new_spte)) {
3290 			ret = RET_PF_FIXED;
3291 			break;
3292 		}
3293 
3294 		if (++retry_count > 4) {
3295 			printk_once(KERN_WARNING
3296 				"kvm: Fast #PF retrying more than 4 times.\n");
3297 			break;
3298 		}
3299 
3300 	} while (true);
3301 
3302 	trace_fast_page_fault(vcpu, gpa, error_code, sptep, spte, ret);
3303 	walk_shadow_page_lockless_end(vcpu);
3304 
3305 	return ret;
3306 }
3307 
mmu_free_root_page(struct kvm * kvm,hpa_t * root_hpa,struct list_head * invalid_list)3308 static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
3309 			       struct list_head *invalid_list)
3310 {
3311 	struct kvm_mmu_page *sp;
3312 
3313 	if (!VALID_PAGE(*root_hpa))
3314 		return;
3315 
3316 	sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
3317 
3318 	if (is_tdp_mmu_page(sp))
3319 		kvm_tdp_mmu_put_root(kvm, sp, false);
3320 	else if (!--sp->root_count && sp->role.invalid)
3321 		kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3322 
3323 	*root_hpa = INVALID_PAGE;
3324 }
3325 
3326 /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
kvm_mmu_free_roots(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,ulong roots_to_free)3327 void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
3328 			ulong roots_to_free)
3329 {
3330 	struct kvm *kvm = vcpu->kvm;
3331 	int i;
3332 	LIST_HEAD(invalid_list);
3333 	bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
3334 
3335 	BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3336 
3337 	/* Before acquiring the MMU lock, see if we need to do any real work. */
3338 	if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) {
3339 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3340 			if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
3341 			    VALID_PAGE(mmu->prev_roots[i].hpa))
3342 				break;
3343 
3344 		if (i == KVM_MMU_NUM_PREV_ROOTS)
3345 			return;
3346 	}
3347 
3348 	write_lock(&kvm->mmu_lock);
3349 
3350 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3351 		if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
3352 			mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,
3353 					   &invalid_list);
3354 
3355 	if (free_active_root) {
3356 		if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
3357 		    (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
3358 			mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
3359 		} else if (mmu->pae_root) {
3360 			for (i = 0; i < 4; ++i) {
3361 				if (!IS_VALID_PAE_ROOT(mmu->pae_root[i]))
3362 					continue;
3363 
3364 				mmu_free_root_page(kvm, &mmu->pae_root[i],
3365 						   &invalid_list);
3366 				mmu->pae_root[i] = INVALID_PAE_ROOT;
3367 			}
3368 		}
3369 		mmu->root_hpa = INVALID_PAGE;
3370 		mmu->root_pgd = 0;
3371 	}
3372 
3373 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
3374 	write_unlock(&kvm->mmu_lock);
3375 }
3376 EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3377 
kvm_mmu_free_guest_mode_roots(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)3378 void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
3379 {
3380 	unsigned long roots_to_free = 0;
3381 	hpa_t root_hpa;
3382 	int i;
3383 
3384 	/*
3385 	 * This should not be called while L2 is active, L2 can't invalidate
3386 	 * _only_ its own roots, e.g. INVVPID unconditionally exits.
3387 	 */
3388 	WARN_ON_ONCE(mmu->mmu_role.base.guest_mode);
3389 
3390 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
3391 		root_hpa = mmu->prev_roots[i].hpa;
3392 		if (!VALID_PAGE(root_hpa))
3393 			continue;
3394 
3395 		if (!to_shadow_page(root_hpa) ||
3396 			to_shadow_page(root_hpa)->role.guest_mode)
3397 			roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
3398 	}
3399 
3400 	kvm_mmu_free_roots(vcpu, mmu, roots_to_free);
3401 }
3402 EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);
3403 
3404 
mmu_check_root(struct kvm_vcpu * vcpu,gfn_t root_gfn)3405 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
3406 {
3407 	int ret = 0;
3408 
3409 	if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
3410 		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3411 		ret = 1;
3412 	}
3413 
3414 	return ret;
3415 }
3416 
mmu_alloc_root(struct kvm_vcpu * vcpu,gfn_t gfn,gva_t gva,u8 level,bool direct)3417 static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
3418 			    u8 level, bool direct)
3419 {
3420 	struct kvm_mmu_page *sp;
3421 
3422 	sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
3423 	++sp->root_count;
3424 
3425 	return __pa(sp->spt);
3426 }
3427 
mmu_alloc_direct_roots(struct kvm_vcpu * vcpu)3428 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3429 {
3430 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3431 	u8 shadow_root_level = mmu->shadow_root_level;
3432 	hpa_t root;
3433 	unsigned i;
3434 	int r;
3435 
3436 	write_lock(&vcpu->kvm->mmu_lock);
3437 	r = make_mmu_pages_available(vcpu);
3438 	if (r < 0)
3439 		goto out_unlock;
3440 
3441 	if (is_tdp_mmu_enabled(vcpu->kvm)) {
3442 		root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
3443 		mmu->root_hpa = root;
3444 	} else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
3445 		root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
3446 		mmu->root_hpa = root;
3447 	} else if (shadow_root_level == PT32E_ROOT_LEVEL) {
3448 		if (WARN_ON_ONCE(!mmu->pae_root)) {
3449 			r = -EIO;
3450 			goto out_unlock;
3451 		}
3452 
3453 		for (i = 0; i < 4; ++i) {
3454 			WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3455 
3456 			root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
3457 					      i << 30, PT32_ROOT_LEVEL, true);
3458 			mmu->pae_root[i] = root | PT_PRESENT_MASK |
3459 					   shadow_me_mask;
3460 		}
3461 		mmu->root_hpa = __pa(mmu->pae_root);
3462 	} else {
3463 		WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
3464 		r = -EIO;
3465 		goto out_unlock;
3466 	}
3467 
3468 	/* root_pgd is ignored for direct MMUs. */
3469 	mmu->root_pgd = 0;
3470 out_unlock:
3471 	write_unlock(&vcpu->kvm->mmu_lock);
3472 	return r;
3473 }
3474 
mmu_alloc_shadow_roots(struct kvm_vcpu * vcpu)3475 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3476 {
3477 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3478 	u64 pdptrs[4], pm_mask;
3479 	gfn_t root_gfn, root_pgd;
3480 	hpa_t root;
3481 	unsigned i;
3482 	int r;
3483 
3484 	root_pgd = mmu->get_guest_pgd(vcpu);
3485 	root_gfn = root_pgd >> PAGE_SHIFT;
3486 
3487 	if (mmu_check_root(vcpu, root_gfn))
3488 		return 1;
3489 
3490 	/*
3491 	 * On SVM, reading PDPTRs might access guest memory, which might fault
3492 	 * and thus might sleep.  Grab the PDPTRs before acquiring mmu_lock.
3493 	 */
3494 	if (mmu->root_level == PT32E_ROOT_LEVEL) {
3495 		for (i = 0; i < 4; ++i) {
3496 			pdptrs[i] = mmu->get_pdptr(vcpu, i);
3497 			if (!(pdptrs[i] & PT_PRESENT_MASK))
3498 				continue;
3499 
3500 			if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT))
3501 				return 1;
3502 		}
3503 	}
3504 
3505 	r = alloc_all_memslots_rmaps(vcpu->kvm);
3506 	if (r)
3507 		return r;
3508 
3509 	write_lock(&vcpu->kvm->mmu_lock);
3510 	r = make_mmu_pages_available(vcpu);
3511 	if (r < 0)
3512 		goto out_unlock;
3513 
3514 	/*
3515 	 * Do we shadow a long mode page table? If so we need to
3516 	 * write-protect the guests page table root.
3517 	 */
3518 	if (mmu->root_level >= PT64_ROOT_4LEVEL) {
3519 		root = mmu_alloc_root(vcpu, root_gfn, 0,
3520 				      mmu->shadow_root_level, false);
3521 		mmu->root_hpa = root;
3522 		goto set_root_pgd;
3523 	}
3524 
3525 	if (WARN_ON_ONCE(!mmu->pae_root)) {
3526 		r = -EIO;
3527 		goto out_unlock;
3528 	}
3529 
3530 	/*
3531 	 * We shadow a 32 bit page table. This may be a legacy 2-level
3532 	 * or a PAE 3-level page table. In either case we need to be aware that
3533 	 * the shadow page table may be a PAE or a long mode page table.
3534 	 */
3535 	pm_mask = PT_PRESENT_MASK | shadow_me_mask;
3536 	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
3537 		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
3538 
3539 		if (WARN_ON_ONCE(!mmu->pml4_root)) {
3540 			r = -EIO;
3541 			goto out_unlock;
3542 		}
3543 		mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
3544 
3545 		if (mmu->shadow_root_level == PT64_ROOT_5LEVEL) {
3546 			if (WARN_ON_ONCE(!mmu->pml5_root)) {
3547 				r = -EIO;
3548 				goto out_unlock;
3549 			}
3550 			mmu->pml5_root[0] = __pa(mmu->pml4_root) | pm_mask;
3551 		}
3552 	}
3553 
3554 	for (i = 0; i < 4; ++i) {
3555 		WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3556 
3557 		if (mmu->root_level == PT32E_ROOT_LEVEL) {
3558 			if (!(pdptrs[i] & PT_PRESENT_MASK)) {
3559 				mmu->pae_root[i] = INVALID_PAE_ROOT;
3560 				continue;
3561 			}
3562 			root_gfn = pdptrs[i] >> PAGE_SHIFT;
3563 		}
3564 
3565 		root = mmu_alloc_root(vcpu, root_gfn, i << 30,
3566 				      PT32_ROOT_LEVEL, false);
3567 		mmu->pae_root[i] = root | pm_mask;
3568 	}
3569 
3570 	if (mmu->shadow_root_level == PT64_ROOT_5LEVEL)
3571 		mmu->root_hpa = __pa(mmu->pml5_root);
3572 	else if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
3573 		mmu->root_hpa = __pa(mmu->pml4_root);
3574 	else
3575 		mmu->root_hpa = __pa(mmu->pae_root);
3576 
3577 set_root_pgd:
3578 	mmu->root_pgd = root_pgd;
3579 out_unlock:
3580 	write_unlock(&vcpu->kvm->mmu_lock);
3581 
3582 	return 0;
3583 }
3584 
mmu_alloc_special_roots(struct kvm_vcpu * vcpu)3585 static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
3586 {
3587 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3588 	bool need_pml5 = mmu->shadow_root_level > PT64_ROOT_4LEVEL;
3589 	u64 *pml5_root = NULL;
3590 	u64 *pml4_root = NULL;
3591 	u64 *pae_root;
3592 
3593 	/*
3594 	 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
3595 	 * tables are allocated and initialized at root creation as there is no
3596 	 * equivalent level in the guest's NPT to shadow.  Allocate the tables
3597 	 * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
3598 	 */
3599 	if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL ||
3600 	    mmu->shadow_root_level < PT64_ROOT_4LEVEL)
3601 		return 0;
3602 
3603 	/*
3604 	 * NPT, the only paging mode that uses this horror, uses a fixed number
3605 	 * of levels for the shadow page tables, e.g. all MMUs are 4-level or
3606 	 * all MMus are 5-level.  Thus, this can safely require that pml5_root
3607 	 * is allocated if the other roots are valid and pml5 is needed, as any
3608 	 * prior MMU would also have required pml5.
3609 	 */
3610 	if (mmu->pae_root && mmu->pml4_root && (!need_pml5 || mmu->pml5_root))
3611 		return 0;
3612 
3613 	/*
3614 	 * The special roots should always be allocated in concert.  Yell and
3615 	 * bail if KVM ends up in a state where only one of the roots is valid.
3616 	 */
3617 	if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root ||
3618 			 (need_pml5 && mmu->pml5_root)))
3619 		return -EIO;
3620 
3621 	/*
3622 	 * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and
3623 	 * doesn't need to be decrypted.
3624 	 */
3625 	pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3626 	if (!pae_root)
3627 		return -ENOMEM;
3628 
3629 #ifdef CONFIG_X86_64
3630 	pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3631 	if (!pml4_root)
3632 		goto err_pml4;
3633 
3634 	if (need_pml5) {
3635 		pml5_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3636 		if (!pml5_root)
3637 			goto err_pml5;
3638 	}
3639 #endif
3640 
3641 	mmu->pae_root = pae_root;
3642 	mmu->pml4_root = pml4_root;
3643 	mmu->pml5_root = pml5_root;
3644 
3645 	return 0;
3646 
3647 #ifdef CONFIG_X86_64
3648 err_pml5:
3649 	free_page((unsigned long)pml4_root);
3650 err_pml4:
3651 	free_page((unsigned long)pae_root);
3652 	return -ENOMEM;
3653 #endif
3654 }
3655 
kvm_mmu_sync_roots(struct kvm_vcpu * vcpu)3656 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3657 {
3658 	int i;
3659 	struct kvm_mmu_page *sp;
3660 
3661 	if (vcpu->arch.mmu->direct_map)
3662 		return;
3663 
3664 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
3665 		return;
3666 
3667 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3668 
3669 	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
3670 		hpa_t root = vcpu->arch.mmu->root_hpa;
3671 		sp = to_shadow_page(root);
3672 
3673 		/*
3674 		 * Even if another CPU was marking the SP as unsync-ed
3675 		 * simultaneously, any guest page table changes are not
3676 		 * guaranteed to be visible anyway until this VCPU issues a TLB
3677 		 * flush strictly after those changes are made. We only need to
3678 		 * ensure that the other CPU sets these flags before any actual
3679 		 * changes to the page tables are made. The comments in
3680 		 * mmu_try_to_unsync_pages() describe what could go wrong if
3681 		 * this requirement isn't satisfied.
3682 		 */
3683 		if (!smp_load_acquire(&sp->unsync) &&
3684 		    !smp_load_acquire(&sp->unsync_children))
3685 			return;
3686 
3687 		write_lock(&vcpu->kvm->mmu_lock);
3688 		kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3689 
3690 		mmu_sync_children(vcpu, sp, true);
3691 
3692 		kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3693 		write_unlock(&vcpu->kvm->mmu_lock);
3694 		return;
3695 	}
3696 
3697 	write_lock(&vcpu->kvm->mmu_lock);
3698 	kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3699 
3700 	for (i = 0; i < 4; ++i) {
3701 		hpa_t root = vcpu->arch.mmu->pae_root[i];
3702 
3703 		if (IS_VALID_PAE_ROOT(root)) {
3704 			root &= PT64_BASE_ADDR_MASK;
3705 			sp = to_shadow_page(root);
3706 			mmu_sync_children(vcpu, sp, true);
3707 		}
3708 	}
3709 
3710 	kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3711 	write_unlock(&vcpu->kvm->mmu_lock);
3712 }
3713 
nonpaging_gva_to_gpa(struct kvm_vcpu * vcpu,gpa_t vaddr,u32 access,struct x86_exception * exception)3714 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
3715 				  u32 access, struct x86_exception *exception)
3716 {
3717 	if (exception)
3718 		exception->error_code = 0;
3719 	return vaddr;
3720 }
3721 
nonpaging_gva_to_gpa_nested(struct kvm_vcpu * vcpu,gpa_t vaddr,u32 access,struct x86_exception * exception)3722 static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
3723 					 u32 access,
3724 					 struct x86_exception *exception)
3725 {
3726 	if (exception)
3727 		exception->error_code = 0;
3728 	return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3729 }
3730 
mmio_info_in_cache(struct kvm_vcpu * vcpu,u64 addr,bool direct)3731 static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3732 {
3733 	/*
3734 	 * A nested guest cannot use the MMIO cache if it is using nested
3735 	 * page tables, because cr2 is a nGPA while the cache stores GPAs.
3736 	 */
3737 	if (mmu_is_nested(vcpu))
3738 		return false;
3739 
3740 	if (direct)
3741 		return vcpu_match_mmio_gpa(vcpu, addr);
3742 
3743 	return vcpu_match_mmio_gva(vcpu, addr);
3744 }
3745 
3746 /*
3747  * Return the level of the lowest level SPTE added to sptes.
3748  * That SPTE may be non-present.
3749  *
3750  * Must be called between walk_shadow_page_lockless_{begin,end}.
3751  */
get_walk(struct kvm_vcpu * vcpu,u64 addr,u64 * sptes,int * root_level)3752 static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
3753 {
3754 	struct kvm_shadow_walk_iterator iterator;
3755 	int leaf = -1;
3756 	u64 spte;
3757 
3758 	for (shadow_walk_init(&iterator, vcpu, addr),
3759 	     *root_level = iterator.level;
3760 	     shadow_walk_okay(&iterator);
3761 	     __shadow_walk_next(&iterator, spte)) {
3762 		leaf = iterator.level;
3763 		spte = mmu_spte_get_lockless(iterator.sptep);
3764 
3765 		sptes[leaf] = spte;
3766 
3767 		if (!is_shadow_present_pte(spte))
3768 			break;
3769 	}
3770 
3771 	return leaf;
3772 }
3773 
3774 /* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
get_mmio_spte(struct kvm_vcpu * vcpu,u64 addr,u64 * sptep)3775 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
3776 {
3777 	u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
3778 	struct rsvd_bits_validate *rsvd_check;
3779 	int root, leaf, level;
3780 	bool reserved = false;
3781 
3782 	walk_shadow_page_lockless_begin(vcpu);
3783 
3784 	if (is_tdp_mmu(vcpu->arch.mmu))
3785 		leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
3786 	else
3787 		leaf = get_walk(vcpu, addr, sptes, &root);
3788 
3789 	walk_shadow_page_lockless_end(vcpu);
3790 
3791 	if (unlikely(leaf < 0)) {
3792 		*sptep = 0ull;
3793 		return reserved;
3794 	}
3795 
3796 	*sptep = sptes[leaf];
3797 
3798 	/*
3799 	 * Skip reserved bits checks on the terminal leaf if it's not a valid
3800 	 * SPTE.  Note, this also (intentionally) skips MMIO SPTEs, which, by
3801 	 * design, always have reserved bits set.  The purpose of the checks is
3802 	 * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
3803 	 */
3804 	if (!is_shadow_present_pte(sptes[leaf]))
3805 		leaf++;
3806 
3807 	rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
3808 
3809 	for (level = root; level >= leaf; level--)
3810 		reserved |= is_rsvd_spte(rsvd_check, sptes[level], level);
3811 
3812 	if (reserved) {
3813 		pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
3814 		       __func__, addr);
3815 		for (level = root; level >= leaf; level--)
3816 			pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
3817 			       sptes[level], level,
3818 			       get_rsvd_bits(rsvd_check, sptes[level], level));
3819 	}
3820 
3821 	return reserved;
3822 }
3823 
handle_mmio_page_fault(struct kvm_vcpu * vcpu,u64 addr,bool direct)3824 static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3825 {
3826 	u64 spte;
3827 	bool reserved;
3828 
3829 	if (mmio_info_in_cache(vcpu, addr, direct))
3830 		return RET_PF_EMULATE;
3831 
3832 	reserved = get_mmio_spte(vcpu, addr, &spte);
3833 	if (WARN_ON(reserved))
3834 		return -EINVAL;
3835 
3836 	if (is_mmio_spte(spte)) {
3837 		gfn_t gfn = get_mmio_spte_gfn(spte);
3838 		unsigned int access = get_mmio_spte_access(spte);
3839 
3840 		if (!check_mmio_spte(vcpu, spte))
3841 			return RET_PF_INVALID;
3842 
3843 		if (direct)
3844 			addr = 0;
3845 
3846 		trace_handle_mmio_page_fault(addr, gfn, access);
3847 		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3848 		return RET_PF_EMULATE;
3849 	}
3850 
3851 	/*
3852 	 * If the page table is zapped by other cpus, let CPU fault again on
3853 	 * the address.
3854 	 */
3855 	return RET_PF_RETRY;
3856 }
3857 
page_fault_handle_page_track(struct kvm_vcpu * vcpu,u32 error_code,gfn_t gfn)3858 static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
3859 					 u32 error_code, gfn_t gfn)
3860 {
3861 	if (unlikely(error_code & PFERR_RSVD_MASK))
3862 		return false;
3863 
3864 	if (!(error_code & PFERR_PRESENT_MASK) ||
3865 	      !(error_code & PFERR_WRITE_MASK))
3866 		return false;
3867 
3868 	/*
3869 	 * guest is writing the page which is write tracked which can
3870 	 * not be fixed by page fault handler.
3871 	 */
3872 	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
3873 		return true;
3874 
3875 	return false;
3876 }
3877 
shadow_page_table_clear_flood(struct kvm_vcpu * vcpu,gva_t addr)3878 static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
3879 {
3880 	struct kvm_shadow_walk_iterator iterator;
3881 	u64 spte;
3882 
3883 	walk_shadow_page_lockless_begin(vcpu);
3884 	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
3885 		clear_sp_write_flooding_count(iterator.sptep);
3886 		if (!is_shadow_present_pte(spte))
3887 			break;
3888 	}
3889 	walk_shadow_page_lockless_end(vcpu);
3890 }
3891 
kvm_arch_setup_async_pf(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,gfn_t gfn)3892 static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
3893 				    gfn_t gfn)
3894 {
3895 	struct kvm_arch_async_pf arch;
3896 
3897 	arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
3898 	arch.gfn = gfn;
3899 	arch.direct_map = vcpu->arch.mmu->direct_map;
3900 	arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
3901 
3902 	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
3903 				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3904 }
3905 
kvm_faultin_pfn(struct kvm_vcpu * vcpu,bool prefault,gfn_t gfn,gpa_t cr2_or_gpa,kvm_pfn_t * pfn,hva_t * hva,bool write,bool * writable,int * r)3906 static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
3907 			 gpa_t cr2_or_gpa, kvm_pfn_t *pfn, hva_t *hva,
3908 			 bool write, bool *writable, int *r)
3909 {
3910 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3911 	bool async;
3912 
3913 	/*
3914 	 * Retry the page fault if the gfn hit a memslot that is being deleted
3915 	 * or moved.  This ensures any existing SPTEs for the old memslot will
3916 	 * be zapped before KVM inserts a new MMIO SPTE for the gfn.
3917 	 */
3918 	if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
3919 		goto out_retry;
3920 
3921 	if (!kvm_is_visible_memslot(slot)) {
3922 		/* Don't expose private memslots to L2. */
3923 		if (is_guest_mode(vcpu)) {
3924 			*pfn = KVM_PFN_NOSLOT;
3925 			*writable = false;
3926 			return false;
3927 		}
3928 		/*
3929 		 * If the APIC access page exists but is disabled, go directly
3930 		 * to emulation without caching the MMIO access or creating a
3931 		 * MMIO SPTE.  That way the cache doesn't need to be purged
3932 		 * when the AVIC is re-enabled.
3933 		 */
3934 		if (slot && slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT &&
3935 		    !kvm_apicv_activated(vcpu->kvm)) {
3936 			*r = RET_PF_EMULATE;
3937 			return true;
3938 		}
3939 	}
3940 
3941 	async = false;
3942 	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async,
3943 				    write, writable, hva);
3944 	if (!async)
3945 		return false; /* *pfn has correct page already */
3946 
3947 	if (!prefault && kvm_can_do_async_pf(vcpu)) {
3948 		trace_kvm_try_async_get_page(cr2_or_gpa, gfn);
3949 		if (kvm_find_async_pf_gfn(vcpu, gfn)) {
3950 			trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
3951 			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
3952 			goto out_retry;
3953 		} else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
3954 			goto out_retry;
3955 	}
3956 
3957 	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL,
3958 				    write, writable, hva);
3959 
3960 out_retry:
3961 	*r = RET_PF_RETRY;
3962 	return true;
3963 }
3964 
direct_page_fault(struct kvm_vcpu * vcpu,gpa_t gpa,u32 error_code,bool prefault,int max_level,bool is_tdp)3965 static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
3966 			     bool prefault, int max_level, bool is_tdp)
3967 {
3968 	bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
3969 	bool write = error_code & PFERR_WRITE_MASK;
3970 	bool map_writable;
3971 
3972 	gfn_t gfn = gpa >> PAGE_SHIFT;
3973 	unsigned long mmu_seq;
3974 	kvm_pfn_t pfn;
3975 	hva_t hva;
3976 	int r;
3977 
3978 	if (page_fault_handle_page_track(vcpu, error_code, gfn))
3979 		return RET_PF_EMULATE;
3980 
3981 	r = fast_page_fault(vcpu, gpa, error_code);
3982 	if (r != RET_PF_INVALID)
3983 		return r;
3984 
3985 	r = mmu_topup_memory_caches(vcpu, false);
3986 	if (r)
3987 		return r;
3988 
3989 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
3990 	smp_rmb();
3991 
3992 	if (kvm_faultin_pfn(vcpu, prefault, gfn, gpa, &pfn, &hva,
3993 			 write, &map_writable, &r))
3994 		return r;
3995 
3996 	if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
3997 		return r;
3998 
3999 	r = RET_PF_RETRY;
4000 
4001 	if (is_tdp_mmu_fault)
4002 		read_lock(&vcpu->kvm->mmu_lock);
4003 	else
4004 		write_lock(&vcpu->kvm->mmu_lock);
4005 
4006 	if (!is_noslot_pfn(pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, hva))
4007 		goto out_unlock;
4008 	r = make_mmu_pages_available(vcpu);
4009 	if (r)
4010 		goto out_unlock;
4011 
4012 	if (is_tdp_mmu_fault)
4013 		r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level,
4014 				    pfn, prefault);
4015 	else
4016 		r = __direct_map(vcpu, gpa, error_code, map_writable, max_level, pfn,
4017 				 prefault, is_tdp);
4018 
4019 out_unlock:
4020 	if (is_tdp_mmu_fault)
4021 		read_unlock(&vcpu->kvm->mmu_lock);
4022 	else
4023 		write_unlock(&vcpu->kvm->mmu_lock);
4024 	kvm_release_pfn_clean(pfn);
4025 	return r;
4026 }
4027 
nonpaging_page_fault(struct kvm_vcpu * vcpu,gpa_t gpa,u32 error_code,bool prefault)4028 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
4029 				u32 error_code, bool prefault)
4030 {
4031 	pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);
4032 
4033 	/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
4034 	return direct_page_fault(vcpu, gpa & PAGE_MASK, error_code, prefault,
4035 				 PG_LEVEL_2M, false);
4036 }
4037 
kvm_handle_page_fault(struct kvm_vcpu * vcpu,u64 error_code,u64 fault_address,char * insn,int insn_len)4038 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
4039 				u64 fault_address, char *insn, int insn_len)
4040 {
4041 	int r = 1;
4042 	u32 flags = vcpu->arch.apf.host_apf_flags;
4043 
4044 #ifndef CONFIG_X86_64
4045 	/* A 64-bit CR2 should be impossible on 32-bit KVM. */
4046 	if (WARN_ON_ONCE(fault_address >> 32))
4047 		return -EFAULT;
4048 #endif
4049 
4050 	vcpu->arch.l1tf_flush_l1d = true;
4051 	if (!flags) {
4052 		trace_kvm_page_fault(fault_address, error_code);
4053 
4054 		if (kvm_event_needs_reinjection(vcpu))
4055 			kvm_mmu_unprotect_page_virt(vcpu, fault_address);
4056 		r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
4057 				insn_len);
4058 	} else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
4059 		vcpu->arch.apf.host_apf_flags = 0;
4060 		local_irq_disable();
4061 		kvm_async_pf_task_wait_schedule(fault_address);
4062 		local_irq_enable();
4063 	} else {
4064 		WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
4065 	}
4066 
4067 	return r;
4068 }
4069 EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
4070 
kvm_tdp_page_fault(struct kvm_vcpu * vcpu,gpa_t gpa,u32 error_code,bool prefault)4071 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
4072 		       bool prefault)
4073 {
4074 	int max_level;
4075 
4076 	for (max_level = KVM_MAX_HUGEPAGE_LEVEL;
4077 	     max_level > PG_LEVEL_4K;
4078 	     max_level--) {
4079 		int page_num = KVM_PAGES_PER_HPAGE(max_level);
4080 		gfn_t base = (gpa >> PAGE_SHIFT) & ~(page_num - 1);
4081 
4082 		if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
4083 			break;
4084 	}
4085 
4086 	return direct_page_fault(vcpu, gpa, error_code, prefault,
4087 				 max_level, true);
4088 }
4089 
nonpaging_init_context(struct kvm_mmu * context)4090 static void nonpaging_init_context(struct kvm_mmu *context)
4091 {
4092 	context->page_fault = nonpaging_page_fault;
4093 	context->gva_to_gpa = nonpaging_gva_to_gpa;
4094 	context->sync_page = nonpaging_sync_page;
4095 	context->invlpg = NULL;
4096 	context->direct_map = true;
4097 }
4098 
is_root_usable(struct kvm_mmu_root_info * root,gpa_t pgd,union kvm_mmu_page_role role)4099 static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
4100 				  union kvm_mmu_page_role role)
4101 {
4102 	return (role.direct || pgd == root->pgd) &&
4103 	       VALID_PAGE(root->hpa) && to_shadow_page(root->hpa) &&
4104 	       role.word == to_shadow_page(root->hpa)->role.word;
4105 }
4106 
4107 /*
4108  * Find out if a previously cached root matching the new pgd/role is available.
4109  * The current root is also inserted into the cache.
4110  * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
4111  * returned.
4112  * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
4113  * false is returned. This root should now be freed by the caller.
4114  */
cached_root_available(struct kvm_vcpu * vcpu,gpa_t new_pgd,union kvm_mmu_page_role new_role)4115 static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4116 				  union kvm_mmu_page_role new_role)
4117 {
4118 	uint i;
4119 	struct kvm_mmu_root_info root;
4120 	struct kvm_mmu *mmu = vcpu->arch.mmu;
4121 
4122 	root.pgd = mmu->root_pgd;
4123 	root.hpa = mmu->root_hpa;
4124 
4125 	if (is_root_usable(&root, new_pgd, new_role))
4126 		return true;
4127 
4128 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
4129 		swap(root, mmu->prev_roots[i]);
4130 
4131 		if (is_root_usable(&root, new_pgd, new_role))
4132 			break;
4133 	}
4134 
4135 	mmu->root_hpa = root.hpa;
4136 	mmu->root_pgd = root.pgd;
4137 
4138 	return i < KVM_MMU_NUM_PREV_ROOTS;
4139 }
4140 
fast_pgd_switch(struct kvm_vcpu * vcpu,gpa_t new_pgd,union kvm_mmu_page_role new_role)4141 static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4142 			    union kvm_mmu_page_role new_role)
4143 {
4144 	struct kvm_mmu *mmu = vcpu->arch.mmu;
4145 
4146 	/*
4147 	 * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
4148 	 * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
4149 	 * later if necessary.
4150 	 */
4151 	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
4152 	    mmu->root_level >= PT64_ROOT_4LEVEL)
4153 		return cached_root_available(vcpu, new_pgd, new_role);
4154 
4155 	return false;
4156 }
4157 
__kvm_mmu_new_pgd(struct kvm_vcpu * vcpu,gpa_t new_pgd,union kvm_mmu_page_role new_role)4158 static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4159 			      union kvm_mmu_page_role new_role)
4160 {
4161 	if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
4162 		kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
4163 		return;
4164 	}
4165 
4166 	/*
4167 	 * It's possible that the cached previous root page is obsolete because
4168 	 * of a change in the MMU generation number. However, changing the
4169 	 * generation number is accompanied by KVM_REQ_MMU_RELOAD, which will
4170 	 * free the root set here and allocate a new one.
4171 	 */
4172 	kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
4173 
4174 	if (force_flush_and_sync_on_reuse) {
4175 		kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
4176 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
4177 	}
4178 
4179 	/*
4180 	 * The last MMIO access's GVA and GPA are cached in the VCPU. When
4181 	 * switching to a new CR3, that GVA->GPA mapping may no longer be
4182 	 * valid. So clear any cached MMIO info even when we don't need to sync
4183 	 * the shadow page tables.
4184 	 */
4185 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
4186 
4187 	/*
4188 	 * If this is a direct root page, it doesn't have a write flooding
4189 	 * count. Otherwise, clear the write flooding count.
4190 	 */
4191 	if (!new_role.direct)
4192 		__clear_sp_write_flooding_count(
4193 				to_shadow_page(vcpu->arch.mmu->root_hpa));
4194 }
4195 
kvm_mmu_new_pgd(struct kvm_vcpu * vcpu,gpa_t new_pgd)4196 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
4197 {
4198 	__kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu));
4199 }
4200 EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
4201 
get_cr3(struct kvm_vcpu * vcpu)4202 static unsigned long get_cr3(struct kvm_vcpu *vcpu)
4203 {
4204 	return kvm_read_cr3(vcpu);
4205 }
4206 
sync_mmio_spte(struct kvm_vcpu * vcpu,u64 * sptep,gfn_t gfn,unsigned int access,int * nr_present)4207 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
4208 			   unsigned int access, int *nr_present)
4209 {
4210 	if (unlikely(is_mmio_spte(*sptep))) {
4211 		if (gfn != get_mmio_spte_gfn(*sptep)) {
4212 			mmu_spte_clear_no_track(sptep);
4213 			return true;
4214 		}
4215 
4216 		(*nr_present)++;
4217 		mark_mmio_spte(vcpu, sptep, gfn, access);
4218 		return true;
4219 	}
4220 
4221 	return false;
4222 }
4223 
4224 #define PTTYPE_EPT 18 /* arbitrary */
4225 #define PTTYPE PTTYPE_EPT
4226 #include "paging_tmpl.h"
4227 #undef PTTYPE
4228 
4229 #define PTTYPE 64
4230 #include "paging_tmpl.h"
4231 #undef PTTYPE
4232 
4233 #define PTTYPE 32
4234 #include "paging_tmpl.h"
4235 #undef PTTYPE
4236 
4237 static void
__reset_rsvds_bits_mask(struct rsvd_bits_validate * rsvd_check,u64 pa_bits_rsvd,int level,bool nx,bool gbpages,bool pse,bool amd)4238 __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
4239 			u64 pa_bits_rsvd, int level, bool nx, bool gbpages,
4240 			bool pse, bool amd)
4241 {
4242 	u64 gbpages_bit_rsvd = 0;
4243 	u64 nonleaf_bit8_rsvd = 0;
4244 	u64 high_bits_rsvd;
4245 
4246 	rsvd_check->bad_mt_xwr = 0;
4247 
4248 	if (!gbpages)
4249 		gbpages_bit_rsvd = rsvd_bits(7, 7);
4250 
4251 	if (level == PT32E_ROOT_LEVEL)
4252 		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62);
4253 	else
4254 		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4255 
4256 	/* Note, NX doesn't exist in PDPTEs, this is handled below. */
4257 	if (!nx)
4258 		high_bits_rsvd |= rsvd_bits(63, 63);
4259 
4260 	/*
4261 	 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
4262 	 * leaf entries) on AMD CPUs only.
4263 	 */
4264 	if (amd)
4265 		nonleaf_bit8_rsvd = rsvd_bits(8, 8);
4266 
4267 	switch (level) {
4268 	case PT32_ROOT_LEVEL:
4269 		/* no rsvd bits for 2 level 4K page table entries */
4270 		rsvd_check->rsvd_bits_mask[0][1] = 0;
4271 		rsvd_check->rsvd_bits_mask[0][0] = 0;
4272 		rsvd_check->rsvd_bits_mask[1][0] =
4273 			rsvd_check->rsvd_bits_mask[0][0];
4274 
4275 		if (!pse) {
4276 			rsvd_check->rsvd_bits_mask[1][1] = 0;
4277 			break;
4278 		}
4279 
4280 		if (is_cpuid_PSE36())
4281 			/* 36bits PSE 4MB page */
4282 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4283 		else
4284 			/* 32 bits PSE 4MB page */
4285 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
4286 		break;
4287 	case PT32E_ROOT_LEVEL:
4288 		rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) |
4289 						   high_bits_rsvd |
4290 						   rsvd_bits(5, 8) |
4291 						   rsvd_bits(1, 2);	/* PDPTE */
4292 		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;	/* PDE */
4293 		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;	/* PTE */
4294 		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4295 						   rsvd_bits(13, 20);	/* large page */
4296 		rsvd_check->rsvd_bits_mask[1][0] =
4297 			rsvd_check->rsvd_bits_mask[0][0];
4298 		break;
4299 	case PT64_ROOT_5LEVEL:
4300 		rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
4301 						   nonleaf_bit8_rsvd |
4302 						   rsvd_bits(7, 7);
4303 		rsvd_check->rsvd_bits_mask[1][4] =
4304 			rsvd_check->rsvd_bits_mask[0][4];
4305 		fallthrough;
4306 	case PT64_ROOT_4LEVEL:
4307 		rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
4308 						   nonleaf_bit8_rsvd |
4309 						   rsvd_bits(7, 7);
4310 		rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
4311 						   gbpages_bit_rsvd;
4312 		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
4313 		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4314 		rsvd_check->rsvd_bits_mask[1][3] =
4315 			rsvd_check->rsvd_bits_mask[0][3];
4316 		rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
4317 						   gbpages_bit_rsvd |
4318 						   rsvd_bits(13, 29);
4319 		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
4320 						   rsvd_bits(13, 20); /* large page */
4321 		rsvd_check->rsvd_bits_mask[1][0] =
4322 			rsvd_check->rsvd_bits_mask[0][0];
4323 		break;
4324 	}
4325 }
4326 
guest_can_use_gbpages(struct kvm_vcpu * vcpu)4327 static bool guest_can_use_gbpages(struct kvm_vcpu *vcpu)
4328 {
4329 	/*
4330 	 * If TDP is enabled, let the guest use GBPAGES if they're supported in
4331 	 * hardware.  The hardware page walker doesn't let KVM disable GBPAGES,
4332 	 * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA
4333 	 * walk for performance and complexity reasons.  Not to mention KVM
4334 	 * _can't_ solve the problem because GVA->GPA walks aren't visible to
4335 	 * KVM once a TDP translation is installed.  Mimic hardware behavior so
4336 	 * that KVM's is at least consistent, i.e. doesn't randomly inject #PF.
4337 	 */
4338 	return tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
4339 			     guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
4340 }
4341 
reset_rsvds_bits_mask(struct kvm_vcpu * vcpu,struct kvm_mmu * context)4342 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
4343 				  struct kvm_mmu *context)
4344 {
4345 	__reset_rsvds_bits_mask(&context->guest_rsvd_check,
4346 				vcpu->arch.reserved_gpa_bits,
4347 				context->root_level, is_efer_nx(context),
4348 				guest_can_use_gbpages(vcpu),
4349 				is_cr4_pse(context),
4350 				guest_cpuid_is_amd_or_hygon(vcpu));
4351 }
4352 
4353 static void
__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate * rsvd_check,u64 pa_bits_rsvd,bool execonly)4354 __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
4355 			    u64 pa_bits_rsvd, bool execonly)
4356 {
4357 	u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4358 	u64 bad_mt_xwr;
4359 
4360 	rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
4361 	rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
4362 	rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6);
4363 	rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6);
4364 	rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4365 
4366 	/* large page */
4367 	rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
4368 	rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
4369 	rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29);
4370 	rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20);
4371 	rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
4372 
4373 	bad_mt_xwr = 0xFFull << (2 * 8);	/* bits 3..5 must not be 2 */
4374 	bad_mt_xwr |= 0xFFull << (3 * 8);	/* bits 3..5 must not be 3 */
4375 	bad_mt_xwr |= 0xFFull << (7 * 8);	/* bits 3..5 must not be 7 */
4376 	bad_mt_xwr |= REPEAT_BYTE(1ull << 2);	/* bits 0..2 must not be 010 */
4377 	bad_mt_xwr |= REPEAT_BYTE(1ull << 6);	/* bits 0..2 must not be 110 */
4378 	if (!execonly) {
4379 		/* bits 0..2 must not be 100 unless VMX capabilities allow it */
4380 		bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
4381 	}
4382 	rsvd_check->bad_mt_xwr = bad_mt_xwr;
4383 }
4384 
reset_rsvds_bits_mask_ept(struct kvm_vcpu * vcpu,struct kvm_mmu * context,bool execonly)4385 static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
4386 		struct kvm_mmu *context, bool execonly)
4387 {
4388 	__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
4389 				    vcpu->arch.reserved_gpa_bits, execonly);
4390 }
4391 
reserved_hpa_bits(void)4392 static inline u64 reserved_hpa_bits(void)
4393 {
4394 	return rsvd_bits(shadow_phys_bits, 63);
4395 }
4396 
4397 /*
4398  * the page table on host is the shadow page table for the page
4399  * table in guest or amd nested guest, its mmu features completely
4400  * follow the features in guest.
4401  */
reset_shadow_zero_bits_mask(struct kvm_vcpu * vcpu,struct kvm_mmu * context)4402 static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4403 					struct kvm_mmu *context)
4404 {
4405 	/*
4406 	 * KVM uses NX when TDP is disabled to handle a variety of scenarios,
4407 	 * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
4408 	 * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
4409 	 * The iTLB multi-hit workaround can be toggled at any time, so assume
4410 	 * NX can be used by any non-nested shadow MMU to avoid having to reset
4411 	 * MMU contexts.  Note, KVM forces EFER.NX=1 when TDP is disabled.
4412 	 */
4413 	bool uses_nx = is_efer_nx(context) || !tdp_enabled;
4414 
4415 	/* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */
4416 	bool is_amd = true;
4417 	/* KVM doesn't use 2-level page tables for the shadow MMU. */
4418 	bool is_pse = false;
4419 	struct rsvd_bits_validate *shadow_zero_check;
4420 	int i;
4421 
4422 	WARN_ON_ONCE(context->shadow_root_level < PT32E_ROOT_LEVEL);
4423 
4424 	shadow_zero_check = &context->shadow_zero_check;
4425 	__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4426 				context->shadow_root_level, uses_nx,
4427 				guest_can_use_gbpages(vcpu), is_pse, is_amd);
4428 
4429 	if (!shadow_me_mask)
4430 		return;
4431 
4432 	for (i = context->shadow_root_level; --i >= 0;) {
4433 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4434 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4435 	}
4436 
4437 }
4438 
boot_cpu_is_amd(void)4439 static inline bool boot_cpu_is_amd(void)
4440 {
4441 	WARN_ON_ONCE(!tdp_enabled);
4442 	return shadow_x_mask == 0;
4443 }
4444 
4445 /*
4446  * the direct page table on host, use as much mmu features as
4447  * possible, however, kvm currently does not do execution-protection.
4448  */
4449 static void
reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu * vcpu,struct kvm_mmu * context)4450 reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4451 				struct kvm_mmu *context)
4452 {
4453 	struct rsvd_bits_validate *shadow_zero_check;
4454 	int i;
4455 
4456 	shadow_zero_check = &context->shadow_zero_check;
4457 
4458 	if (boot_cpu_is_amd())
4459 		__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4460 					context->shadow_root_level, false,
4461 					boot_cpu_has(X86_FEATURE_GBPAGES),
4462 					false, true);
4463 	else
4464 		__reset_rsvds_bits_mask_ept(shadow_zero_check,
4465 					    reserved_hpa_bits(), false);
4466 
4467 	if (!shadow_me_mask)
4468 		return;
4469 
4470 	for (i = context->shadow_root_level; --i >= 0;) {
4471 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4472 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4473 	}
4474 }
4475 
4476 /*
4477  * as the comments in reset_shadow_zero_bits_mask() except it
4478  * is the shadow page table for intel nested guest.
4479  */
4480 static void
reset_ept_shadow_zero_bits_mask(struct kvm_vcpu * vcpu,struct kvm_mmu * context,bool execonly)4481 reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4482 				struct kvm_mmu *context, bool execonly)
4483 {
4484 	__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
4485 				    reserved_hpa_bits(), execonly);
4486 }
4487 
4488 #define BYTE_MASK(access) \
4489 	((1 & (access) ? 2 : 0) | \
4490 	 (2 & (access) ? 4 : 0) | \
4491 	 (3 & (access) ? 8 : 0) | \
4492 	 (4 & (access) ? 16 : 0) | \
4493 	 (5 & (access) ? 32 : 0) | \
4494 	 (6 & (access) ? 64 : 0) | \
4495 	 (7 & (access) ? 128 : 0))
4496 
4497 
update_permission_bitmask(struct kvm_mmu * mmu,bool ept)4498 static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
4499 {
4500 	unsigned byte;
4501 
4502 	const u8 x = BYTE_MASK(ACC_EXEC_MASK);
4503 	const u8 w = BYTE_MASK(ACC_WRITE_MASK);
4504 	const u8 u = BYTE_MASK(ACC_USER_MASK);
4505 
4506 	bool cr4_smep = is_cr4_smep(mmu);
4507 	bool cr4_smap = is_cr4_smap(mmu);
4508 	bool cr0_wp = is_cr0_wp(mmu);
4509 	bool efer_nx = is_efer_nx(mmu);
4510 
4511 	for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
4512 		unsigned pfec = byte << 1;
4513 
4514 		/*
4515 		 * Each "*f" variable has a 1 bit for each UWX value
4516 		 * that causes a fault with the given PFEC.
4517 		 */
4518 
4519 		/* Faults from writes to non-writable pages */
4520 		u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
4521 		/* Faults from user mode accesses to supervisor pages */
4522 		u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
4523 		/* Faults from fetches of non-executable pages*/
4524 		u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
4525 		/* Faults from kernel mode fetches of user pages */
4526 		u8 smepf = 0;
4527 		/* Faults from kernel mode accesses of user pages */
4528 		u8 smapf = 0;
4529 
4530 		if (!ept) {
4531 			/* Faults from kernel mode accesses to user pages */
4532 			u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
4533 
4534 			/* Not really needed: !nx will cause pte.nx to fault */
4535 			if (!efer_nx)
4536 				ff = 0;
4537 
4538 			/* Allow supervisor writes if !cr0.wp */
4539 			if (!cr0_wp)
4540 				wf = (pfec & PFERR_USER_MASK) ? wf : 0;
4541 
4542 			/* Disallow supervisor fetches of user code if cr4.smep */
4543 			if (cr4_smep)
4544 				smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;
4545 
4546 			/*
4547 			 * SMAP:kernel-mode data accesses from user-mode
4548 			 * mappings should fault. A fault is considered
4549 			 * as a SMAP violation if all of the following
4550 			 * conditions are true:
4551 			 *   - X86_CR4_SMAP is set in CR4
4552 			 *   - A user page is accessed
4553 			 *   - The access is not a fetch
4554 			 *   - Page fault in kernel mode
4555 			 *   - if CPL = 3 or X86_EFLAGS_AC is clear
4556 			 *
4557 			 * Here, we cover the first three conditions.
4558 			 * The fourth is computed dynamically in permission_fault();
4559 			 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
4560 			 * *not* subject to SMAP restrictions.
4561 			 */
4562 			if (cr4_smap)
4563 				smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
4564 		}
4565 
4566 		mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
4567 	}
4568 }
4569 
4570 /*
4571 * PKU is an additional mechanism by which the paging controls access to
4572 * user-mode addresses based on the value in the PKRU register.  Protection
4573 * key violations are reported through a bit in the page fault error code.
4574 * Unlike other bits of the error code, the PK bit is not known at the
4575 * call site of e.g. gva_to_gpa; it must be computed directly in
4576 * permission_fault based on two bits of PKRU, on some machine state (CR4,
4577 * CR0, EFER, CPL), and on other bits of the error code and the page tables.
4578 *
4579 * In particular the following conditions come from the error code, the
4580 * page tables and the machine state:
4581 * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
4582 * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
4583 * - PK is always zero if U=0 in the page tables
4584 * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
4585 *
4586 * The PKRU bitmask caches the result of these four conditions.  The error
4587 * code (minus the P bit) and the page table's U bit form an index into the
4588 * PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
4589 * with the two bits of the PKRU register corresponding to the protection key.
4590 * For the first three conditions above the bits will be 00, thus masking
4591 * away both AD and WD.  For all reads or if the last condition holds, WD
4592 * only will be masked away.
4593 */
update_pkru_bitmask(struct kvm_mmu * mmu)4594 static void update_pkru_bitmask(struct kvm_mmu *mmu)
4595 {
4596 	unsigned bit;
4597 	bool wp;
4598 
4599 	mmu->pkru_mask = 0;
4600 
4601 	if (!is_cr4_pke(mmu))
4602 		return;
4603 
4604 	wp = is_cr0_wp(mmu);
4605 
4606 	for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
4607 		unsigned pfec, pkey_bits;
4608 		bool check_pkey, check_write, ff, uf, wf, pte_user;
4609 
4610 		pfec = bit << 1;
4611 		ff = pfec & PFERR_FETCH_MASK;
4612 		uf = pfec & PFERR_USER_MASK;
4613 		wf = pfec & PFERR_WRITE_MASK;
4614 
4615 		/* PFEC.RSVD is replaced by ACC_USER_MASK. */
4616 		pte_user = pfec & PFERR_RSVD_MASK;
4617 
4618 		/*
4619 		 * Only need to check the access which is not an
4620 		 * instruction fetch and is to a user page.
4621 		 */
4622 		check_pkey = (!ff && pte_user);
4623 		/*
4624 		 * write access is controlled by PKRU if it is a
4625 		 * user access or CR0.WP = 1.
4626 		 */
4627 		check_write = check_pkey && wf && (uf || wp);
4628 
4629 		/* PKRU.AD stops both read and write access. */
4630 		pkey_bits = !!check_pkey;
4631 		/* PKRU.WD stops write access. */
4632 		pkey_bits |= (!!check_write) << 1;
4633 
4634 		mmu->pkru_mask |= (pkey_bits & 3) << pfec;
4635 	}
4636 }
4637 
reset_guest_paging_metadata(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)4638 static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
4639 					struct kvm_mmu *mmu)
4640 {
4641 	if (!is_cr0_pg(mmu))
4642 		return;
4643 
4644 	reset_rsvds_bits_mask(vcpu, mmu);
4645 	update_permission_bitmask(mmu, false);
4646 	update_pkru_bitmask(mmu);
4647 }
4648 
paging64_init_context(struct kvm_mmu * context)4649 static void paging64_init_context(struct kvm_mmu *context)
4650 {
4651 	context->page_fault = paging64_page_fault;
4652 	context->gva_to_gpa = paging64_gva_to_gpa;
4653 	context->sync_page = paging64_sync_page;
4654 	context->invlpg = paging64_invlpg;
4655 	context->direct_map = false;
4656 }
4657 
paging32_init_context(struct kvm_mmu * context)4658 static void paging32_init_context(struct kvm_mmu *context)
4659 {
4660 	context->page_fault = paging32_page_fault;
4661 	context->gva_to_gpa = paging32_gva_to_gpa;
4662 	context->sync_page = paging32_sync_page;
4663 	context->invlpg = paging32_invlpg;
4664 	context->direct_map = false;
4665 }
4666 
kvm_calc_mmu_role_ext(struct kvm_vcpu * vcpu,struct kvm_mmu_role_regs * regs)4667 static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
4668 							 struct kvm_mmu_role_regs *regs)
4669 {
4670 	union kvm_mmu_extended_role ext = {0};
4671 
4672 	if (____is_cr0_pg(regs)) {
4673 		ext.cr0_pg = 1;
4674 		ext.cr4_pae = ____is_cr4_pae(regs);
4675 		ext.cr4_smep = ____is_cr4_smep(regs);
4676 		ext.cr4_smap = ____is_cr4_smap(regs);
4677 		ext.cr4_pse = ____is_cr4_pse(regs);
4678 
4679 		/* PKEY and LA57 are active iff long mode is active. */
4680 		ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
4681 		ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
4682 	}
4683 
4684 	ext.valid = 1;
4685 
4686 	return ext;
4687 }
4688 
kvm_calc_mmu_role_common(struct kvm_vcpu * vcpu,struct kvm_mmu_role_regs * regs,bool base_only)4689 static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
4690 						   struct kvm_mmu_role_regs *regs,
4691 						   bool base_only)
4692 {
4693 	union kvm_mmu_role role = {0};
4694 
4695 	role.base.access = ACC_ALL;
4696 	if (____is_cr0_pg(regs)) {
4697 		role.base.efer_nx = ____is_efer_nx(regs);
4698 		role.base.cr0_wp = ____is_cr0_wp(regs);
4699 	}
4700 	role.base.smm = is_smm(vcpu);
4701 	role.base.guest_mode = is_guest_mode(vcpu);
4702 
4703 	if (base_only)
4704 		return role;
4705 
4706 	role.ext = kvm_calc_mmu_role_ext(vcpu, regs);
4707 
4708 	return role;
4709 }
4710 
kvm_mmu_get_tdp_level(struct kvm_vcpu * vcpu)4711 static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
4712 {
4713 	/* tdp_root_level is architecture forced level, use it if nonzero */
4714 	if (tdp_root_level)
4715 		return tdp_root_level;
4716 
4717 	/* Use 5-level TDP if and only if it's useful/necessary. */
4718 	if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
4719 		return 4;
4720 
4721 	return max_tdp_level;
4722 }
4723 
4724 static union kvm_mmu_role
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu * vcpu,struct kvm_mmu_role_regs * regs,bool base_only)4725 kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
4726 				struct kvm_mmu_role_regs *regs, bool base_only)
4727 {
4728 	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
4729 
4730 	role.base.ad_disabled = (shadow_accessed_mask == 0);
4731 	role.base.level = kvm_mmu_get_tdp_level(vcpu);
4732 	role.base.direct = true;
4733 	role.base.gpte_is_8_bytes = true;
4734 
4735 	return role;
4736 }
4737 
init_kvm_tdp_mmu(struct kvm_vcpu * vcpu)4738 static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
4739 {
4740 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
4741 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4742 	union kvm_mmu_role new_role =
4743 		kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, false);
4744 
4745 	if (new_role.as_u64 == context->mmu_role.as_u64)
4746 		return;
4747 
4748 	context->mmu_role.as_u64 = new_role.as_u64;
4749 	context->page_fault = kvm_tdp_page_fault;
4750 	context->sync_page = nonpaging_sync_page;
4751 	context->invlpg = NULL;
4752 	context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
4753 	context->direct_map = true;
4754 	context->get_guest_pgd = get_cr3;
4755 	context->get_pdptr = kvm_pdptr_read;
4756 	context->inject_page_fault = kvm_inject_page_fault;
4757 	context->root_level = role_regs_to_root_level(&regs);
4758 
4759 	if (!is_cr0_pg(context))
4760 		context->gva_to_gpa = nonpaging_gva_to_gpa;
4761 	else if (is_cr4_pae(context))
4762 		context->gva_to_gpa = paging64_gva_to_gpa;
4763 	else
4764 		context->gva_to_gpa = paging32_gva_to_gpa;
4765 
4766 	reset_guest_paging_metadata(vcpu, context);
4767 	reset_tdp_shadow_zero_bits_mask(vcpu, context);
4768 }
4769 
4770 static union kvm_mmu_role
kvm_calc_shadow_root_page_role_common(struct kvm_vcpu * vcpu,struct kvm_mmu_role_regs * regs,bool base_only)4771 kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
4772 				      struct kvm_mmu_role_regs *regs, bool base_only)
4773 {
4774 	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
4775 
4776 	role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs);
4777 	role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs);
4778 	role.base.gpte_is_8_bytes = ____is_cr0_pg(regs) && ____is_cr4_pae(regs);
4779 
4780 	return role;
4781 }
4782 
4783 static union kvm_mmu_role
kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu * vcpu,struct kvm_mmu_role_regs * regs,bool base_only)4784 kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
4785 				   struct kvm_mmu_role_regs *regs, bool base_only)
4786 {
4787 	union kvm_mmu_role role =
4788 		kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only);
4789 
4790 	role.base.direct = !____is_cr0_pg(regs);
4791 
4792 	if (!____is_efer_lma(regs))
4793 		role.base.level = PT32E_ROOT_LEVEL;
4794 	else if (____is_cr4_la57(regs))
4795 		role.base.level = PT64_ROOT_5LEVEL;
4796 	else
4797 		role.base.level = PT64_ROOT_4LEVEL;
4798 
4799 	return role;
4800 }
4801 
shadow_mmu_init_context(struct kvm_vcpu * vcpu,struct kvm_mmu * context,struct kvm_mmu_role_regs * regs,union kvm_mmu_role new_role)4802 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
4803 				    struct kvm_mmu_role_regs *regs,
4804 				    union kvm_mmu_role new_role)
4805 {
4806 	if (new_role.as_u64 == context->mmu_role.as_u64)
4807 		return;
4808 
4809 	context->mmu_role.as_u64 = new_role.as_u64;
4810 
4811 	if (!is_cr0_pg(context))
4812 		nonpaging_init_context(context);
4813 	else if (is_cr4_pae(context))
4814 		paging64_init_context(context);
4815 	else
4816 		paging32_init_context(context);
4817 	context->root_level = role_regs_to_root_level(regs);
4818 
4819 	reset_guest_paging_metadata(vcpu, context);
4820 	context->shadow_root_level = new_role.base.level;
4821 
4822 	reset_shadow_zero_bits_mask(vcpu, context);
4823 }
4824 
kvm_init_shadow_mmu(struct kvm_vcpu * vcpu,struct kvm_mmu_role_regs * regs)4825 static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
4826 				struct kvm_mmu_role_regs *regs)
4827 {
4828 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
4829 	union kvm_mmu_role new_role =
4830 		kvm_calc_shadow_mmu_root_page_role(vcpu, regs, false);
4831 
4832 	shadow_mmu_init_context(vcpu, context, regs, new_role);
4833 }
4834 
4835 static union kvm_mmu_role
kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu * vcpu,struct kvm_mmu_role_regs * regs)4836 kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
4837 				   struct kvm_mmu_role_regs *regs)
4838 {
4839 	union kvm_mmu_role role =
4840 		kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
4841 
4842 	role.base.direct = false;
4843 	role.base.level = kvm_mmu_get_tdp_level(vcpu);
4844 
4845 	return role;
4846 }
4847 
kvm_init_shadow_npt_mmu(struct kvm_vcpu * vcpu,unsigned long cr0,unsigned long cr4,u64 efer,gpa_t nested_cr3)4848 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
4849 			     unsigned long cr4, u64 efer, gpa_t nested_cr3)
4850 {
4851 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4852 	struct kvm_mmu_role_regs regs = {
4853 		.cr0 = cr0,
4854 		.cr4 = cr4,
4855 		.efer = efer,
4856 	};
4857 	union kvm_mmu_role new_role;
4858 
4859 	new_role = kvm_calc_shadow_npt_root_page_role(vcpu, &regs);
4860 
4861 	__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base);
4862 
4863 	shadow_mmu_init_context(vcpu, context, &regs, new_role);
4864 }
4865 EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
4866 
4867 static union kvm_mmu_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu * vcpu,bool accessed_dirty,bool execonly,u8 level)4868 kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
4869 				   bool execonly, u8 level)
4870 {
4871 	union kvm_mmu_role role = {0};
4872 
4873 	/* SMM flag is inherited from root_mmu */
4874 	role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
4875 
4876 	role.base.level = level;
4877 	role.base.gpte_is_8_bytes = true;
4878 	role.base.direct = false;
4879 	role.base.ad_disabled = !accessed_dirty;
4880 	role.base.guest_mode = true;
4881 	role.base.access = ACC_ALL;
4882 
4883 	/* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
4884 	role.ext.word = 0;
4885 	role.ext.execonly = execonly;
4886 	role.ext.valid = 1;
4887 
4888 	return role;
4889 }
4890 
kvm_init_shadow_ept_mmu(struct kvm_vcpu * vcpu,bool execonly,bool accessed_dirty,gpa_t new_eptp)4891 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
4892 			     bool accessed_dirty, gpa_t new_eptp)
4893 {
4894 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4895 	u8 level = vmx_eptp_page_walk_level(new_eptp);
4896 	union kvm_mmu_role new_role =
4897 		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
4898 						   execonly, level);
4899 
4900 	__kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base);
4901 
4902 	if (new_role.as_u64 == context->mmu_role.as_u64)
4903 		return;
4904 
4905 	context->mmu_role.as_u64 = new_role.as_u64;
4906 
4907 	context->shadow_root_level = level;
4908 
4909 	context->ept_ad = accessed_dirty;
4910 	context->page_fault = ept_page_fault;
4911 	context->gva_to_gpa = ept_gva_to_gpa;
4912 	context->sync_page = ept_sync_page;
4913 	context->invlpg = ept_invlpg;
4914 	context->root_level = level;
4915 	context->direct_map = false;
4916 
4917 	update_permission_bitmask(context, true);
4918 	update_pkru_bitmask(context);
4919 	reset_rsvds_bits_mask_ept(vcpu, context, execonly);
4920 	reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
4921 }
4922 EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
4923 
init_kvm_softmmu(struct kvm_vcpu * vcpu)4924 static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
4925 {
4926 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
4927 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4928 
4929 	kvm_init_shadow_mmu(vcpu, &regs);
4930 
4931 	context->get_guest_pgd     = get_cr3;
4932 	context->get_pdptr         = kvm_pdptr_read;
4933 	context->inject_page_fault = kvm_inject_page_fault;
4934 }
4935 
4936 static union kvm_mmu_role
kvm_calc_nested_mmu_role(struct kvm_vcpu * vcpu,struct kvm_mmu_role_regs * regs)4937 kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, struct kvm_mmu_role_regs *regs)
4938 {
4939 	union kvm_mmu_role role;
4940 
4941 	role = kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
4942 
4943 	/*
4944 	 * Nested MMUs are used only for walking L2's gva->gpa, they never have
4945 	 * shadow pages of their own and so "direct" has no meaning.   Set it
4946 	 * to "true" to try to detect bogus usage of the nested MMU.
4947 	 */
4948 	role.base.direct = true;
4949 	role.base.level = role_regs_to_root_level(regs);
4950 	return role;
4951 }
4952 
init_kvm_nested_mmu(struct kvm_vcpu * vcpu)4953 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
4954 {
4955 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4956 	union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, &regs);
4957 	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
4958 
4959 	if (new_role.as_u64 == g_context->mmu_role.as_u64)
4960 		return;
4961 
4962 	g_context->mmu_role.as_u64 = new_role.as_u64;
4963 	g_context->get_guest_pgd     = get_cr3;
4964 	g_context->get_pdptr         = kvm_pdptr_read;
4965 	g_context->inject_page_fault = kvm_inject_page_fault;
4966 	g_context->root_level        = new_role.base.level;
4967 
4968 	/*
4969 	 * L2 page tables are never shadowed, so there is no need to sync
4970 	 * SPTEs.
4971 	 */
4972 	g_context->invlpg            = NULL;
4973 
4974 	/*
4975 	 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
4976 	 * L1's nested page tables (e.g. EPT12). The nested translation
4977 	 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
4978 	 * L2's page tables as the first level of translation and L1's
4979 	 * nested page tables as the second level of translation. Basically
4980 	 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
4981 	 */
4982 	if (!is_paging(vcpu))
4983 		g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
4984 	else if (is_long_mode(vcpu))
4985 		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4986 	else if (is_pae(vcpu))
4987 		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4988 	else
4989 		g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
4990 
4991 	reset_guest_paging_metadata(vcpu, g_context);
4992 }
4993 
kvm_init_mmu(struct kvm_vcpu * vcpu)4994 void kvm_init_mmu(struct kvm_vcpu *vcpu)
4995 {
4996 	if (mmu_is_nested(vcpu))
4997 		init_kvm_nested_mmu(vcpu);
4998 	else if (tdp_enabled)
4999 		init_kvm_tdp_mmu(vcpu);
5000 	else
5001 		init_kvm_softmmu(vcpu);
5002 }
5003 EXPORT_SYMBOL_GPL(kvm_init_mmu);
5004 
5005 static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu * vcpu)5006 kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
5007 {
5008 	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
5009 	union kvm_mmu_role role;
5010 
5011 	if (tdp_enabled)
5012 		role = kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, true);
5013 	else
5014 		role = kvm_calc_shadow_mmu_root_page_role(vcpu, &regs, true);
5015 
5016 	return role.base;
5017 }
5018 
kvm_mmu_after_set_cpuid(struct kvm_vcpu * vcpu)5019 void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
5020 {
5021 	/*
5022 	 * Invalidate all MMU roles to force them to reinitialize as CPUID
5023 	 * information is factored into reserved bit calculations.
5024 	 */
5025 	vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
5026 	vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
5027 	vcpu->arch.nested_mmu.mmu_role.ext.valid = 0;
5028 	kvm_mmu_reset_context(vcpu);
5029 
5030 	/*
5031 	 * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
5032 	 * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
5033 	 * tracked in kvm_mmu_page_role.  As a result, KVM may miss guest page
5034 	 * faults due to reusing SPs/SPTEs.  Alert userspace, but otherwise
5035 	 * sweep the problem under the rug.
5036 	 *
5037 	 * KVM's horrific CPUID ABI makes the problem all but impossible to
5038 	 * solve, as correctly handling multiple vCPU models (with respect to
5039 	 * paging and physical address properties) in a single VM would require
5040 	 * tracking all relevant CPUID information in kvm_mmu_page_role.  That
5041 	 * is very undesirable as it would double the memory requirements for
5042 	 * gfn_track (see struct kvm_mmu_page_role comments), and in practice
5043 	 * no sane VMM mucks with the core vCPU model on the fly.
5044 	 */
5045 	if (vcpu->arch.last_vmentry_cpu != -1) {
5046 		pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n");
5047 		pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n");
5048 	}
5049 }
5050 
kvm_mmu_reset_context(struct kvm_vcpu * vcpu)5051 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
5052 {
5053 	kvm_mmu_unload(vcpu);
5054 	kvm_init_mmu(vcpu);
5055 }
5056 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
5057 
kvm_mmu_load(struct kvm_vcpu * vcpu)5058 int kvm_mmu_load(struct kvm_vcpu *vcpu)
5059 {
5060 	int r;
5061 
5062 	r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
5063 	if (r)
5064 		goto out;
5065 	r = mmu_alloc_special_roots(vcpu);
5066 	if (r)
5067 		goto out;
5068 	if (vcpu->arch.mmu->direct_map)
5069 		r = mmu_alloc_direct_roots(vcpu);
5070 	else
5071 		r = mmu_alloc_shadow_roots(vcpu);
5072 	if (r)
5073 		goto out;
5074 
5075 	kvm_mmu_sync_roots(vcpu);
5076 
5077 	kvm_mmu_load_pgd(vcpu);
5078 	static_call(kvm_x86_tlb_flush_current)(vcpu);
5079 out:
5080 	return r;
5081 }
5082 
kvm_mmu_unload(struct kvm_vcpu * vcpu)5083 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
5084 {
5085 	kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
5086 	WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
5087 	kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5088 	WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
5089 }
5090 
need_remote_flush(u64 old,u64 new)5091 static bool need_remote_flush(u64 old, u64 new)
5092 {
5093 	if (!is_shadow_present_pte(old))
5094 		return false;
5095 	if (!is_shadow_present_pte(new))
5096 		return true;
5097 	if ((old ^ new) & PT64_BASE_ADDR_MASK)
5098 		return true;
5099 	old ^= shadow_nx_mask;
5100 	new ^= shadow_nx_mask;
5101 	return (old & ~new & PT64_PERM_MASK) != 0;
5102 }
5103 
mmu_pte_write_fetch_gpte(struct kvm_vcpu * vcpu,gpa_t * gpa,int * bytes)5104 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5105 				    int *bytes)
5106 {
5107 	u64 gentry = 0;
5108 	int r;
5109 
5110 	/*
5111 	 * Assume that the pte write on a page table of the same type
5112 	 * as the current vcpu paging mode since we update the sptes only
5113 	 * when they have the same mode.
5114 	 */
5115 	if (is_pae(vcpu) && *bytes == 4) {
5116 		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
5117 		*gpa &= ~(gpa_t)7;
5118 		*bytes = 8;
5119 	}
5120 
5121 	if (*bytes == 4 || *bytes == 8) {
5122 		r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
5123 		if (r)
5124 			gentry = 0;
5125 	}
5126 
5127 	return gentry;
5128 }
5129 
5130 /*
5131  * If we're seeing too many writes to a page, it may no longer be a page table,
5132  * or we may be forking, in which case it is better to unmap the page.
5133  */
detect_write_flooding(struct kvm_mmu_page * sp)5134 static bool detect_write_flooding(struct kvm_mmu_page *sp)
5135 {
5136 	/*
5137 	 * Skip write-flooding detected for the sp whose level is 1, because
5138 	 * it can become unsync, then the guest page is not write-protected.
5139 	 */
5140 	if (sp->role.level == PG_LEVEL_4K)
5141 		return false;
5142 
5143 	atomic_inc(&sp->write_flooding_count);
5144 	return atomic_read(&sp->write_flooding_count) >= 3;
5145 }
5146 
5147 /*
5148  * Misaligned accesses are too much trouble to fix up; also, they usually
5149  * indicate a page is not used as a page table.
5150  */
detect_write_misaligned(struct kvm_mmu_page * sp,gpa_t gpa,int bytes)5151 static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
5152 				    int bytes)
5153 {
5154 	unsigned offset, pte_size, misaligned;
5155 
5156 	pgprintk("misaligned: gpa %llx bytes %d role %x\n",
5157 		 gpa, bytes, sp->role.word);
5158 
5159 	offset = offset_in_page(gpa);
5160 	pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
5161 
5162 	/*
5163 	 * Sometimes, the OS only writes the last one bytes to update status
5164 	 * bits, for example, in linux, andb instruction is used in clear_bit().
5165 	 */
5166 	if (!(offset & (pte_size - 1)) && bytes == 1)
5167 		return false;
5168 
5169 	misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
5170 	misaligned |= bytes < 4;
5171 
5172 	return misaligned;
5173 }
5174 
get_written_sptes(struct kvm_mmu_page * sp,gpa_t gpa,int * nspte)5175 static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
5176 {
5177 	unsigned page_offset, quadrant;
5178 	u64 *spte;
5179 	int level;
5180 
5181 	page_offset = offset_in_page(gpa);
5182 	level = sp->role.level;
5183 	*nspte = 1;
5184 	if (!sp->role.gpte_is_8_bytes) {
5185 		page_offset <<= 1;	/* 32->64 */
5186 		/*
5187 		 * A 32-bit pde maps 4MB while the shadow pdes map
5188 		 * only 2MB.  So we need to double the offset again
5189 		 * and zap two pdes instead of one.
5190 		 */
5191 		if (level == PT32_ROOT_LEVEL) {
5192 			page_offset &= ~7; /* kill rounding error */
5193 			page_offset <<= 1;
5194 			*nspte = 2;
5195 		}
5196 		quadrant = page_offset >> PAGE_SHIFT;
5197 		page_offset &= ~PAGE_MASK;
5198 		if (quadrant != sp->role.quadrant)
5199 			return NULL;
5200 	}
5201 
5202 	spte = &sp->spt[page_offset / sizeof(*spte)];
5203 	return spte;
5204 }
5205 
kvm_mmu_pte_write(struct kvm_vcpu * vcpu,gpa_t gpa,const u8 * new,int bytes,struct kvm_page_track_notifier_node * node)5206 static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
5207 			      const u8 *new, int bytes,
5208 			      struct kvm_page_track_notifier_node *node)
5209 {
5210 	gfn_t gfn = gpa >> PAGE_SHIFT;
5211 	struct kvm_mmu_page *sp;
5212 	LIST_HEAD(invalid_list);
5213 	u64 entry, gentry, *spte;
5214 	int npte;
5215 	bool remote_flush, local_flush;
5216 
5217 	/*
5218 	 * If we don't have indirect shadow pages, it means no page is
5219 	 * write-protected, so we can exit simply.
5220 	 */
5221 	if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
5222 		return;
5223 
5224 	remote_flush = local_flush = false;
5225 
5226 	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
5227 
5228 	/*
5229 	 * No need to care whether allocation memory is successful
5230 	 * or not since pte prefetch is skipped if it does not have
5231 	 * enough objects in the cache.
5232 	 */
5233 	mmu_topup_memory_caches(vcpu, true);
5234 
5235 	write_lock(&vcpu->kvm->mmu_lock);
5236 
5237 	gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
5238 
5239 	++vcpu->kvm->stat.mmu_pte_write;
5240 	kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
5241 
5242 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
5243 		if (detect_write_misaligned(sp, gpa, bytes) ||
5244 		      detect_write_flooding(sp)) {
5245 			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
5246 			++vcpu->kvm->stat.mmu_flooded;
5247 			continue;
5248 		}
5249 
5250 		spte = get_written_sptes(sp, gpa, &npte);
5251 		if (!spte)
5252 			continue;
5253 
5254 		local_flush = true;
5255 		while (npte--) {
5256 			entry = *spte;
5257 			mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
5258 			if (gentry && sp->role.level != PG_LEVEL_4K)
5259 				++vcpu->kvm->stat.mmu_pde_zapped;
5260 			if (need_remote_flush(entry, *spte))
5261 				remote_flush = true;
5262 			++spte;
5263 		}
5264 	}
5265 	kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
5266 	kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
5267 	write_unlock(&vcpu->kvm->mmu_lock);
5268 }
5269 
kvm_mmu_page_fault(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,u64 error_code,void * insn,int insn_len)5270 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
5271 		       void *insn, int insn_len)
5272 {
5273 	int r, emulation_type = EMULTYPE_PF;
5274 	bool direct = vcpu->arch.mmu->direct_map;
5275 
5276 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
5277 		return RET_PF_RETRY;
5278 
5279 	r = RET_PF_INVALID;
5280 	if (unlikely(error_code & PFERR_RSVD_MASK)) {
5281 		r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
5282 		if (r == RET_PF_EMULATE)
5283 			goto emulate;
5284 	}
5285 
5286 	if (r == RET_PF_INVALID) {
5287 		r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
5288 					  lower_32_bits(error_code), false);
5289 		if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm))
5290 			return -EIO;
5291 	}
5292 
5293 	if (r < 0)
5294 		return r;
5295 	if (r != RET_PF_EMULATE)
5296 		return 1;
5297 
5298 	/*
5299 	 * Before emulating the instruction, check if the error code
5300 	 * was due to a RO violation while translating the guest page.
5301 	 * This can occur when using nested virtualization with nested
5302 	 * paging in both guests. If true, we simply unprotect the page
5303 	 * and resume the guest.
5304 	 */
5305 	if (vcpu->arch.mmu->direct_map &&
5306 	    (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
5307 		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
5308 		return 1;
5309 	}
5310 
5311 	/*
5312 	 * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
5313 	 * optimistically try to just unprotect the page and let the processor
5314 	 * re-execute the instruction that caused the page fault.  Do not allow
5315 	 * retrying MMIO emulation, as it's not only pointless but could also
5316 	 * cause us to enter an infinite loop because the processor will keep
5317 	 * faulting on the non-existent MMIO address.  Retrying an instruction
5318 	 * from a nested guest is also pointless and dangerous as we are only
5319 	 * explicitly shadowing L1's page tables, i.e. unprotecting something
5320 	 * for L1 isn't going to magically fix whatever issue cause L2 to fail.
5321 	 */
5322 	if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
5323 		emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
5324 emulate:
5325 	return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
5326 				       insn_len);
5327 }
5328 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
5329 
kvm_mmu_invalidate_gva(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,gva_t gva,hpa_t root_hpa)5330 void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
5331 			    gva_t gva, hpa_t root_hpa)
5332 {
5333 	int i;
5334 
5335 	/* It's actually a GPA for vcpu->arch.guest_mmu.  */
5336 	if (mmu != &vcpu->arch.guest_mmu) {
5337 		/* INVLPG on a non-canonical address is a NOP according to the SDM.  */
5338 		if (is_noncanonical_address(gva, vcpu))
5339 			return;
5340 
5341 		static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
5342 	}
5343 
5344 	if (!mmu->invlpg)
5345 		return;
5346 
5347 	if (root_hpa == INVALID_PAGE) {
5348 		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5349 
5350 		/*
5351 		 * INVLPG is required to invalidate any global mappings for the VA,
5352 		 * irrespective of PCID. Since it would take us roughly similar amount
5353 		 * of work to determine whether any of the prev_root mappings of the VA
5354 		 * is marked global, or to just sync it blindly, so we might as well
5355 		 * just always sync it.
5356 		 *
5357 		 * Mappings not reachable via the current cr3 or the prev_roots will be
5358 		 * synced when switching to that cr3, so nothing needs to be done here
5359 		 * for them.
5360 		 */
5361 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5362 			if (VALID_PAGE(mmu->prev_roots[i].hpa))
5363 				mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5364 	} else {
5365 		mmu->invlpg(vcpu, gva, root_hpa);
5366 	}
5367 }
5368 
kvm_mmu_invlpg(struct kvm_vcpu * vcpu,gva_t gva)5369 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
5370 {
5371 	kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
5372 	++vcpu->stat.invlpg;
5373 }
5374 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
5375 
5376 
kvm_mmu_invpcid_gva(struct kvm_vcpu * vcpu,gva_t gva,unsigned long pcid)5377 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
5378 {
5379 	struct kvm_mmu *mmu = vcpu->arch.mmu;
5380 	bool tlb_flush = false;
5381 	uint i;
5382 
5383 	if (pcid == kvm_get_active_pcid(vcpu)) {
5384 		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5385 		tlb_flush = true;
5386 	}
5387 
5388 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5389 		if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
5390 		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
5391 			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5392 			tlb_flush = true;
5393 		}
5394 	}
5395 
5396 	if (tlb_flush)
5397 		static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
5398 
5399 	++vcpu->stat.invlpg;
5400 
5401 	/*
5402 	 * Mappings not reachable via the current cr3 or the prev_roots will be
5403 	 * synced when switching to that cr3, so nothing needs to be done here
5404 	 * for them.
5405 	 */
5406 }
5407 
kvm_configure_mmu(bool enable_tdp,int tdp_forced_root_level,int tdp_max_root_level,int tdp_huge_page_level)5408 void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
5409 		       int tdp_max_root_level, int tdp_huge_page_level)
5410 {
5411 	tdp_enabled = enable_tdp;
5412 	tdp_root_level = tdp_forced_root_level;
5413 	max_tdp_level = tdp_max_root_level;
5414 
5415 	/*
5416 	 * max_huge_page_level reflects KVM's MMU capabilities irrespective
5417 	 * of kernel support, e.g. KVM may be capable of using 1GB pages when
5418 	 * the kernel is not.  But, KVM never creates a page size greater than
5419 	 * what is used by the kernel for any given HVA, i.e. the kernel's
5420 	 * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
5421 	 */
5422 	if (tdp_enabled)
5423 		max_huge_page_level = tdp_huge_page_level;
5424 	else if (boot_cpu_has(X86_FEATURE_GBPAGES))
5425 		max_huge_page_level = PG_LEVEL_1G;
5426 	else
5427 		max_huge_page_level = PG_LEVEL_2M;
5428 }
5429 EXPORT_SYMBOL_GPL(kvm_configure_mmu);
5430 
5431 /* The return value indicates if tlb flush on all vcpus is needed. */
5432 typedef bool (*slot_level_handler) (struct kvm *kvm,
5433 				    struct kvm_rmap_head *rmap_head,
5434 				    const struct kvm_memory_slot *slot);
5435 
5436 /* The caller should hold mmu-lock before calling this function. */
5437 static __always_inline bool
slot_handle_level_range(struct kvm * kvm,const struct kvm_memory_slot * memslot,slot_level_handler fn,int start_level,int end_level,gfn_t start_gfn,gfn_t end_gfn,bool flush_on_yield,bool flush)5438 slot_handle_level_range(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5439 			slot_level_handler fn, int start_level, int end_level,
5440 			gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield,
5441 			bool flush)
5442 {
5443 	struct slot_rmap_walk_iterator iterator;
5444 
5445 	for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
5446 			end_gfn, &iterator) {
5447 		if (iterator.rmap)
5448 			flush |= fn(kvm, iterator.rmap, memslot);
5449 
5450 		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
5451 			if (flush && flush_on_yield) {
5452 				kvm_flush_remote_tlbs_with_address(kvm,
5453 						start_gfn,
5454 						iterator.gfn - start_gfn + 1);
5455 				flush = false;
5456 			}
5457 			cond_resched_rwlock_write(&kvm->mmu_lock);
5458 		}
5459 	}
5460 
5461 	return flush;
5462 }
5463 
5464 static __always_inline bool
slot_handle_level(struct kvm * kvm,const struct kvm_memory_slot * memslot,slot_level_handler fn,int start_level,int end_level,bool flush_on_yield)5465 slot_handle_level(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5466 		  slot_level_handler fn, int start_level, int end_level,
5467 		  bool flush_on_yield)
5468 {
5469 	return slot_handle_level_range(kvm, memslot, fn, start_level,
5470 			end_level, memslot->base_gfn,
5471 			memslot->base_gfn + memslot->npages - 1,
5472 			flush_on_yield, false);
5473 }
5474 
5475 static __always_inline bool
slot_handle_leaf(struct kvm * kvm,const struct kvm_memory_slot * memslot,slot_level_handler fn,bool flush_on_yield)5476 slot_handle_leaf(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5477 		 slot_level_handler fn, bool flush_on_yield)
5478 {
5479 	return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
5480 				 PG_LEVEL_4K, flush_on_yield);
5481 }
5482 
free_mmu_pages(struct kvm_mmu * mmu)5483 static void free_mmu_pages(struct kvm_mmu *mmu)
5484 {
5485 	if (!tdp_enabled && mmu->pae_root)
5486 		set_memory_encrypted((unsigned long)mmu->pae_root, 1);
5487 	free_page((unsigned long)mmu->pae_root);
5488 	free_page((unsigned long)mmu->pml4_root);
5489 	free_page((unsigned long)mmu->pml5_root);
5490 }
5491 
__kvm_mmu_create(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)5492 static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
5493 {
5494 	struct page *page;
5495 	int i;
5496 
5497 	mmu->root_hpa = INVALID_PAGE;
5498 	mmu->root_pgd = 0;
5499 	mmu->translate_gpa = translate_gpa;
5500 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5501 		mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
5502 
5503 	/*
5504 	 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
5505 	 * while the PDP table is a per-vCPU construct that's allocated at MMU
5506 	 * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
5507 	 * x86_64.  Therefore we need to allocate the PDP table in the first
5508 	 * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
5509 	 * generally doesn't use PAE paging and can skip allocating the PDP
5510 	 * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
5511 	 * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
5512 	 * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
5513 	 */
5514 	if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
5515 		return 0;
5516 
5517 	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
5518 	if (!page)
5519 		return -ENOMEM;
5520 
5521 	mmu->pae_root = page_address(page);
5522 
5523 	/*
5524 	 * CR3 is only 32 bits when PAE paging is used, thus it's impossible to
5525 	 * get the CPU to treat the PDPTEs as encrypted.  Decrypt the page so
5526 	 * that KVM's writes and the CPU's reads get along.  Note, this is
5527 	 * only necessary when using shadow paging, as 64-bit NPT can get at
5528 	 * the C-bit even when shadowing 32-bit NPT, and SME isn't supported
5529 	 * by 32-bit kernels (when KVM itself uses 32-bit NPT).
5530 	 */
5531 	if (!tdp_enabled)
5532 		set_memory_decrypted((unsigned long)mmu->pae_root, 1);
5533 	else
5534 		WARN_ON_ONCE(shadow_me_mask);
5535 
5536 	for (i = 0; i < 4; ++i)
5537 		mmu->pae_root[i] = INVALID_PAE_ROOT;
5538 
5539 	return 0;
5540 }
5541 
kvm_mmu_create(struct kvm_vcpu * vcpu)5542 int kvm_mmu_create(struct kvm_vcpu *vcpu)
5543 {
5544 	int ret;
5545 
5546 	vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
5547 	vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
5548 
5549 	vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
5550 	vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
5551 
5552 	vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
5553 
5554 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
5555 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
5556 
5557 	vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
5558 
5559 	ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
5560 	if (ret)
5561 		return ret;
5562 
5563 	ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
5564 	if (ret)
5565 		goto fail_allocate_root;
5566 
5567 	return ret;
5568  fail_allocate_root:
5569 	free_mmu_pages(&vcpu->arch.guest_mmu);
5570 	return ret;
5571 }
5572 
5573 #define BATCH_ZAP_PAGES	10
kvm_zap_obsolete_pages(struct kvm * kvm)5574 static void kvm_zap_obsolete_pages(struct kvm *kvm)
5575 {
5576 	struct kvm_mmu_page *sp, *node;
5577 	int nr_zapped, batch = 0;
5578 
5579 restart:
5580 	list_for_each_entry_safe_reverse(sp, node,
5581 	      &kvm->arch.active_mmu_pages, link) {
5582 		/*
5583 		 * No obsolete valid page exists before a newly created page
5584 		 * since active_mmu_pages is a FIFO list.
5585 		 */
5586 		if (!is_obsolete_sp(kvm, sp))
5587 			break;
5588 
5589 		/*
5590 		 * Invalid pages should never land back on the list of active
5591 		 * pages.  Skip the bogus page, otherwise we'll get stuck in an
5592 		 * infinite loop if the page gets put back on the list (again).
5593 		 */
5594 		if (WARN_ON(sp->role.invalid))
5595 			continue;
5596 
5597 		/*
5598 		 * No need to flush the TLB since we're only zapping shadow
5599 		 * pages with an obsolete generation number and all vCPUS have
5600 		 * loaded a new root, i.e. the shadow pages being zapped cannot
5601 		 * be in active use by the guest.
5602 		 */
5603 		if (batch >= BATCH_ZAP_PAGES &&
5604 		    cond_resched_rwlock_write(&kvm->mmu_lock)) {
5605 			batch = 0;
5606 			goto restart;
5607 		}
5608 
5609 		if (__kvm_mmu_prepare_zap_page(kvm, sp,
5610 				&kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
5611 			batch += nr_zapped;
5612 			goto restart;
5613 		}
5614 	}
5615 
5616 	/*
5617 	 * Trigger a remote TLB flush before freeing the page tables to ensure
5618 	 * KVM is not in the middle of a lockless shadow page table walk, which
5619 	 * may reference the pages.
5620 	 */
5621 	kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5622 }
5623 
5624 /*
5625  * Fast invalidate all shadow pages and use lock-break technique
5626  * to zap obsolete pages.
5627  *
5628  * It's required when memslot is being deleted or VM is being
5629  * destroyed, in these cases, we should ensure that KVM MMU does
5630  * not use any resource of the being-deleted slot or all slots
5631  * after calling the function.
5632  */
kvm_mmu_zap_all_fast(struct kvm * kvm)5633 static void kvm_mmu_zap_all_fast(struct kvm *kvm)
5634 {
5635 	lockdep_assert_held(&kvm->slots_lock);
5636 
5637 	write_lock(&kvm->mmu_lock);
5638 	trace_kvm_mmu_zap_all_fast(kvm);
5639 
5640 	/*
5641 	 * Toggle mmu_valid_gen between '0' and '1'.  Because slots_lock is
5642 	 * held for the entire duration of zapping obsolete pages, it's
5643 	 * impossible for there to be multiple invalid generations associated
5644 	 * with *valid* shadow pages at any given time, i.e. there is exactly
5645 	 * one valid generation and (at most) one invalid generation.
5646 	 */
5647 	kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
5648 
5649 	/* In order to ensure all threads see this change when
5650 	 * handling the MMU reload signal, this must happen in the
5651 	 * same critical section as kvm_reload_remote_mmus, and
5652 	 * before kvm_zap_obsolete_pages as kvm_zap_obsolete_pages
5653 	 * could drop the MMU lock and yield.
5654 	 */
5655 	if (is_tdp_mmu_enabled(kvm))
5656 		kvm_tdp_mmu_invalidate_all_roots(kvm);
5657 
5658 	/*
5659 	 * Notify all vcpus to reload its shadow page table and flush TLB.
5660 	 * Then all vcpus will switch to new shadow page table with the new
5661 	 * mmu_valid_gen.
5662 	 *
5663 	 * Note: we need to do this under the protection of mmu_lock,
5664 	 * otherwise, vcpu would purge shadow page but miss tlb flush.
5665 	 */
5666 	kvm_reload_remote_mmus(kvm);
5667 
5668 	kvm_zap_obsolete_pages(kvm);
5669 
5670 	write_unlock(&kvm->mmu_lock);
5671 
5672 	if (is_tdp_mmu_enabled(kvm)) {
5673 		read_lock(&kvm->mmu_lock);
5674 		kvm_tdp_mmu_zap_invalidated_roots(kvm);
5675 		read_unlock(&kvm->mmu_lock);
5676 	}
5677 }
5678 
kvm_has_zapped_obsolete_pages(struct kvm * kvm)5679 static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
5680 {
5681 	return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
5682 }
5683 
kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm * kvm,struct kvm_memory_slot * slot,struct kvm_page_track_notifier_node * node)5684 static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5685 			struct kvm_memory_slot *slot,
5686 			struct kvm_page_track_notifier_node *node)
5687 {
5688 	kvm_mmu_zap_all_fast(kvm);
5689 }
5690 
kvm_mmu_init_vm(struct kvm * kvm)5691 void kvm_mmu_init_vm(struct kvm *kvm)
5692 {
5693 	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5694 
5695 	spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
5696 
5697 	if (!kvm_mmu_init_tdp_mmu(kvm))
5698 		/*
5699 		 * No smp_load/store wrappers needed here as we are in
5700 		 * VM init and there cannot be any memslots / other threads
5701 		 * accessing this struct kvm yet.
5702 		 */
5703 		kvm->arch.memslots_have_rmaps = true;
5704 
5705 	node->track_write = kvm_mmu_pte_write;
5706 	node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
5707 	kvm_page_track_register_notifier(kvm, node);
5708 }
5709 
kvm_mmu_uninit_vm(struct kvm * kvm)5710 void kvm_mmu_uninit_vm(struct kvm *kvm)
5711 {
5712 	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5713 
5714 	kvm_page_track_unregister_notifier(kvm, node);
5715 
5716 	kvm_mmu_uninit_tdp_mmu(kvm);
5717 }
5718 
5719 /*
5720  * Invalidate (zap) SPTEs that cover GFNs from gfn_start and up to gfn_end
5721  * (not including it)
5722  */
kvm_zap_gfn_range(struct kvm * kvm,gfn_t gfn_start,gfn_t gfn_end)5723 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
5724 {
5725 	struct kvm_memslots *slots;
5726 	struct kvm_memory_slot *memslot;
5727 	int i;
5728 	bool flush = false;
5729 
5730 	write_lock(&kvm->mmu_lock);
5731 
5732 	kvm_inc_notifier_count(kvm, gfn_start, gfn_end);
5733 
5734 	if (kvm_memslots_have_rmaps(kvm)) {
5735 		for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5736 			slots = __kvm_memslots(kvm, i);
5737 			kvm_for_each_memslot(memslot, slots) {
5738 				gfn_t start, end;
5739 
5740 				start = max(gfn_start, memslot->base_gfn);
5741 				end = min(gfn_end, memslot->base_gfn + memslot->npages);
5742 				if (start >= end)
5743 					continue;
5744 
5745 				flush = slot_handle_level_range(kvm,
5746 						(const struct kvm_memory_slot *) memslot,
5747 						kvm_zap_rmapp, PG_LEVEL_4K,
5748 						KVM_MAX_HUGEPAGE_LEVEL, start,
5749 						end - 1, true, flush);
5750 			}
5751 		}
5752 		if (flush)
5753 			kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
5754 							   gfn_end - gfn_start);
5755 	}
5756 
5757 	if (is_tdp_mmu_enabled(kvm)) {
5758 		for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
5759 			flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
5760 							  gfn_end, flush);
5761 		if (flush)
5762 			kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
5763 							   gfn_end - gfn_start);
5764 	}
5765 
5766 	if (flush)
5767 		kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
5768 
5769 	kvm_dec_notifier_count(kvm, gfn_start, gfn_end);
5770 
5771 	write_unlock(&kvm->mmu_lock);
5772 }
5773 
slot_rmap_write_protect(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)5774 static bool slot_rmap_write_protect(struct kvm *kvm,
5775 				    struct kvm_rmap_head *rmap_head,
5776 				    const struct kvm_memory_slot *slot)
5777 {
5778 	return __rmap_write_protect(kvm, rmap_head, false);
5779 }
5780 
kvm_mmu_slot_remove_write_access(struct kvm * kvm,const struct kvm_memory_slot * memslot,int start_level)5781 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
5782 				      const struct kvm_memory_slot *memslot,
5783 				      int start_level)
5784 {
5785 	bool flush = false;
5786 
5787 	if (kvm_memslots_have_rmaps(kvm)) {
5788 		write_lock(&kvm->mmu_lock);
5789 		flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
5790 					  start_level, KVM_MAX_HUGEPAGE_LEVEL,
5791 					  false);
5792 		write_unlock(&kvm->mmu_lock);
5793 	}
5794 
5795 	if (is_tdp_mmu_enabled(kvm)) {
5796 		read_lock(&kvm->mmu_lock);
5797 		flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
5798 		read_unlock(&kvm->mmu_lock);
5799 	}
5800 
5801 	/*
5802 	 * We can flush all the TLBs out of the mmu lock without TLB
5803 	 * corruption since we just change the spte from writable to
5804 	 * readonly so that we only need to care the case of changing
5805 	 * spte from present to present (changing the spte from present
5806 	 * to nonpresent will flush all the TLBs immediately), in other
5807 	 * words, the only case we care is mmu_spte_update() where we
5808 	 * have checked Host-writable | MMU-writable instead of
5809 	 * PT_WRITABLE_MASK, that means it does not depend on PT_WRITABLE_MASK
5810 	 * anymore.
5811 	 */
5812 	if (flush)
5813 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5814 }
5815 
kvm_mmu_zap_collapsible_spte(struct kvm * kvm,struct kvm_rmap_head * rmap_head,const struct kvm_memory_slot * slot)5816 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
5817 					 struct kvm_rmap_head *rmap_head,
5818 					 const struct kvm_memory_slot *slot)
5819 {
5820 	u64 *sptep;
5821 	struct rmap_iterator iter;
5822 	int need_tlb_flush = 0;
5823 	kvm_pfn_t pfn;
5824 	struct kvm_mmu_page *sp;
5825 
5826 restart:
5827 	for_each_rmap_spte(rmap_head, &iter, sptep) {
5828 		sp = sptep_to_sp(sptep);
5829 		pfn = spte_to_pfn(*sptep);
5830 
5831 		/*
5832 		 * We cannot do huge page mapping for indirect shadow pages,
5833 		 * which are found on the last rmap (level = 1) when not using
5834 		 * tdp; such shadow pages are synced with the page table in
5835 		 * the guest, and the guest page table is using 4K page size
5836 		 * mapping if the indirect sp has level = 1.
5837 		 */
5838 		if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
5839 		    sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
5840 							       pfn, PG_LEVEL_NUM)) {
5841 			pte_list_remove(kvm, rmap_head, sptep);
5842 
5843 			if (kvm_available_flush_tlb_with_range())
5844 				kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
5845 					KVM_PAGES_PER_HPAGE(sp->role.level));
5846 			else
5847 				need_tlb_flush = 1;
5848 
5849 			goto restart;
5850 		}
5851 	}
5852 
5853 	return need_tlb_flush;
5854 }
5855 
kvm_mmu_zap_collapsible_sptes(struct kvm * kvm,const struct kvm_memory_slot * slot)5856 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
5857 				   const struct kvm_memory_slot *slot)
5858 {
5859 	bool flush = false;
5860 
5861 	if (kvm_memslots_have_rmaps(kvm)) {
5862 		write_lock(&kvm->mmu_lock);
5863 		flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
5864 		if (flush)
5865 			kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
5866 		write_unlock(&kvm->mmu_lock);
5867 	}
5868 
5869 	if (is_tdp_mmu_enabled(kvm)) {
5870 		read_lock(&kvm->mmu_lock);
5871 		flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
5872 		if (flush)
5873 			kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
5874 		read_unlock(&kvm->mmu_lock);
5875 	}
5876 }
5877 
kvm_arch_flush_remote_tlbs_memslot(struct kvm * kvm,const struct kvm_memory_slot * memslot)5878 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
5879 					const struct kvm_memory_slot *memslot)
5880 {
5881 	/*
5882 	 * All current use cases for flushing the TLBs for a specific memslot
5883 	 * related to dirty logging, and many do the TLB flush out of mmu_lock.
5884 	 * The interaction between the various operations on memslot must be
5885 	 * serialized by slots_locks to ensure the TLB flush from one operation
5886 	 * is observed by any other operation on the same memslot.
5887 	 */
5888 	lockdep_assert_held(&kvm->slots_lock);
5889 	kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
5890 					   memslot->npages);
5891 }
5892 
kvm_mmu_slot_leaf_clear_dirty(struct kvm * kvm,const struct kvm_memory_slot * memslot)5893 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
5894 				   const struct kvm_memory_slot *memslot)
5895 {
5896 	bool flush = false;
5897 
5898 	if (kvm_memslots_have_rmaps(kvm)) {
5899 		write_lock(&kvm->mmu_lock);
5900 		flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty,
5901 					 false);
5902 		write_unlock(&kvm->mmu_lock);
5903 	}
5904 
5905 	if (is_tdp_mmu_enabled(kvm)) {
5906 		read_lock(&kvm->mmu_lock);
5907 		flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
5908 		read_unlock(&kvm->mmu_lock);
5909 	}
5910 
5911 	/*
5912 	 * It's also safe to flush TLBs out of mmu lock here as currently this
5913 	 * function is only used for dirty logging, in which case flushing TLB
5914 	 * out of mmu lock also guarantees no dirty pages will be lost in
5915 	 * dirty_bitmap.
5916 	 */
5917 	if (flush)
5918 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5919 }
5920 
kvm_mmu_zap_all(struct kvm * kvm)5921 void kvm_mmu_zap_all(struct kvm *kvm)
5922 {
5923 	struct kvm_mmu_page *sp, *node;
5924 	LIST_HEAD(invalid_list);
5925 	int ign;
5926 
5927 	write_lock(&kvm->mmu_lock);
5928 restart:
5929 	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
5930 		if (WARN_ON(sp->role.invalid))
5931 			continue;
5932 		if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
5933 			goto restart;
5934 		if (cond_resched_rwlock_write(&kvm->mmu_lock))
5935 			goto restart;
5936 	}
5937 
5938 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
5939 
5940 	if (is_tdp_mmu_enabled(kvm))
5941 		kvm_tdp_mmu_zap_all(kvm);
5942 
5943 	write_unlock(&kvm->mmu_lock);
5944 }
5945 
kvm_mmu_invalidate_mmio_sptes(struct kvm * kvm,u64 gen)5946 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
5947 {
5948 	WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
5949 
5950 	gen &= MMIO_SPTE_GEN_MASK;
5951 
5952 	/*
5953 	 * Generation numbers are incremented in multiples of the number of
5954 	 * address spaces in order to provide unique generations across all
5955 	 * address spaces.  Strip what is effectively the address space
5956 	 * modifier prior to checking for a wrap of the MMIO generation so
5957 	 * that a wrap in any address space is detected.
5958 	 */
5959 	gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
5960 
5961 	/*
5962 	 * The very rare case: if the MMIO generation number has wrapped,
5963 	 * zap all shadow pages.
5964 	 */
5965 	if (unlikely(gen == 0)) {
5966 		kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
5967 		kvm_mmu_zap_all_fast(kvm);
5968 	}
5969 }
5970 
5971 static unsigned long
mmu_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)5972 mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5973 {
5974 	struct kvm *kvm;
5975 	int nr_to_scan = sc->nr_to_scan;
5976 	unsigned long freed = 0;
5977 
5978 	mutex_lock(&kvm_lock);
5979 
5980 	list_for_each_entry(kvm, &vm_list, vm_list) {
5981 		int idx;
5982 		LIST_HEAD(invalid_list);
5983 
5984 		/*
5985 		 * Never scan more than sc->nr_to_scan VM instances.
5986 		 * Will not hit this condition practically since we do not try
5987 		 * to shrink more than one VM and it is very unlikely to see
5988 		 * !n_used_mmu_pages so many times.
5989 		 */
5990 		if (!nr_to_scan--)
5991 			break;
5992 		/*
5993 		 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
5994 		 * here. We may skip a VM instance errorneosly, but we do not
5995 		 * want to shrink a VM that only started to populate its MMU
5996 		 * anyway.
5997 		 */
5998 		if (!kvm->arch.n_used_mmu_pages &&
5999 		    !kvm_has_zapped_obsolete_pages(kvm))
6000 			continue;
6001 
6002 		idx = srcu_read_lock(&kvm->srcu);
6003 		write_lock(&kvm->mmu_lock);
6004 
6005 		if (kvm_has_zapped_obsolete_pages(kvm)) {
6006 			kvm_mmu_commit_zap_page(kvm,
6007 			      &kvm->arch.zapped_obsolete_pages);
6008 			goto unlock;
6009 		}
6010 
6011 		freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan);
6012 
6013 unlock:
6014 		write_unlock(&kvm->mmu_lock);
6015 		srcu_read_unlock(&kvm->srcu, idx);
6016 
6017 		/*
6018 		 * unfair on small ones
6019 		 * per-vm shrinkers cry out
6020 		 * sadness comes quickly
6021 		 */
6022 		list_move_tail(&kvm->vm_list, &vm_list);
6023 		break;
6024 	}
6025 
6026 	mutex_unlock(&kvm_lock);
6027 	return freed;
6028 }
6029 
6030 static unsigned long
mmu_shrink_count(struct shrinker * shrink,struct shrink_control * sc)6031 mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
6032 {
6033 	return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
6034 }
6035 
6036 static struct shrinker mmu_shrinker = {
6037 	.count_objects = mmu_shrink_count,
6038 	.scan_objects = mmu_shrink_scan,
6039 	.seeks = DEFAULT_SEEKS * 10,
6040 };
6041 
mmu_destroy_caches(void)6042 static void mmu_destroy_caches(void)
6043 {
6044 	kmem_cache_destroy(pte_list_desc_cache);
6045 	kmem_cache_destroy(mmu_page_header_cache);
6046 }
6047 
get_nx_auto_mode(void)6048 static bool get_nx_auto_mode(void)
6049 {
6050 	/* Return true when CPU has the bug, and mitigations are ON */
6051 	return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
6052 }
6053 
__set_nx_huge_pages(bool val)6054 static void __set_nx_huge_pages(bool val)
6055 {
6056 	nx_huge_pages = itlb_multihit_kvm_mitigation = val;
6057 }
6058 
set_nx_huge_pages(const char * val,const struct kernel_param * kp)6059 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
6060 {
6061 	bool old_val = nx_huge_pages;
6062 	bool new_val;
6063 
6064 	/* In "auto" mode deploy workaround only if CPU has the bug. */
6065 	if (sysfs_streq(val, "off"))
6066 		new_val = 0;
6067 	else if (sysfs_streq(val, "force"))
6068 		new_val = 1;
6069 	else if (sysfs_streq(val, "auto"))
6070 		new_val = get_nx_auto_mode();
6071 	else if (strtobool(val, &new_val) < 0)
6072 		return -EINVAL;
6073 
6074 	__set_nx_huge_pages(new_val);
6075 
6076 	if (new_val != old_val) {
6077 		struct kvm *kvm;
6078 
6079 		mutex_lock(&kvm_lock);
6080 
6081 		list_for_each_entry(kvm, &vm_list, vm_list) {
6082 			mutex_lock(&kvm->slots_lock);
6083 			kvm_mmu_zap_all_fast(kvm);
6084 			mutex_unlock(&kvm->slots_lock);
6085 
6086 			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
6087 		}
6088 		mutex_unlock(&kvm_lock);
6089 	}
6090 
6091 	return 0;
6092 }
6093 
kvm_mmu_module_init(void)6094 int kvm_mmu_module_init(void)
6095 {
6096 	int ret = -ENOMEM;
6097 
6098 	if (nx_huge_pages == -1)
6099 		__set_nx_huge_pages(get_nx_auto_mode());
6100 
6101 	/*
6102 	 * MMU roles use union aliasing which is, generally speaking, an
6103 	 * undefined behavior. However, we supposedly know how compilers behave
6104 	 * and the current status quo is unlikely to change. Guardians below are
6105 	 * supposed to let us know if the assumption becomes false.
6106 	 */
6107 	BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
6108 	BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
6109 	BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));
6110 
6111 	kvm_mmu_reset_all_pte_masks();
6112 
6113 	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
6114 					    sizeof(struct pte_list_desc),
6115 					    0, SLAB_ACCOUNT, NULL);
6116 	if (!pte_list_desc_cache)
6117 		goto out;
6118 
6119 	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
6120 						  sizeof(struct kvm_mmu_page),
6121 						  0, SLAB_ACCOUNT, NULL);
6122 	if (!mmu_page_header_cache)
6123 		goto out;
6124 
6125 	if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
6126 		goto out;
6127 
6128 	ret = register_shrinker(&mmu_shrinker);
6129 	if (ret)
6130 		goto out;
6131 
6132 	return 0;
6133 
6134 out:
6135 	mmu_destroy_caches();
6136 	return ret;
6137 }
6138 
6139 /*
6140  * Calculate mmu pages needed for kvm.
6141  */
kvm_mmu_calculate_default_mmu_pages(struct kvm * kvm)6142 unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
6143 {
6144 	unsigned long nr_mmu_pages;
6145 	unsigned long nr_pages = 0;
6146 	struct kvm_memslots *slots;
6147 	struct kvm_memory_slot *memslot;
6148 	int i;
6149 
6150 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
6151 		slots = __kvm_memslots(kvm, i);
6152 
6153 		kvm_for_each_memslot(memslot, slots)
6154 			nr_pages += memslot->npages;
6155 	}
6156 
6157 	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
6158 	nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
6159 
6160 	return nr_mmu_pages;
6161 }
6162 
kvm_mmu_destroy(struct kvm_vcpu * vcpu)6163 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
6164 {
6165 	kvm_mmu_unload(vcpu);
6166 	free_mmu_pages(&vcpu->arch.root_mmu);
6167 	free_mmu_pages(&vcpu->arch.guest_mmu);
6168 	mmu_free_memory_caches(vcpu);
6169 }
6170 
kvm_mmu_module_exit(void)6171 void kvm_mmu_module_exit(void)
6172 {
6173 	mmu_destroy_caches();
6174 	percpu_counter_destroy(&kvm_total_used_mmu_pages);
6175 	unregister_shrinker(&mmu_shrinker);
6176 	mmu_audit_disable();
6177 }
6178 
set_nx_huge_pages_recovery_ratio(const char * val,const struct kernel_param * kp)6179 static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp)
6180 {
6181 	unsigned int old_val;
6182 	int err;
6183 
6184 	old_val = nx_huge_pages_recovery_ratio;
6185 	err = param_set_uint(val, kp);
6186 	if (err)
6187 		return err;
6188 
6189 	if (READ_ONCE(nx_huge_pages) &&
6190 	    !old_val && nx_huge_pages_recovery_ratio) {
6191 		struct kvm *kvm;
6192 
6193 		mutex_lock(&kvm_lock);
6194 
6195 		list_for_each_entry(kvm, &vm_list, vm_list)
6196 			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
6197 
6198 		mutex_unlock(&kvm_lock);
6199 	}
6200 
6201 	return err;
6202 }
6203 
kvm_recover_nx_lpages(struct kvm * kvm)6204 static void kvm_recover_nx_lpages(struct kvm *kvm)
6205 {
6206 	unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
6207 	int rcu_idx;
6208 	struct kvm_mmu_page *sp;
6209 	unsigned int ratio;
6210 	LIST_HEAD(invalid_list);
6211 	bool flush = false;
6212 	ulong to_zap;
6213 
6214 	rcu_idx = srcu_read_lock(&kvm->srcu);
6215 	write_lock(&kvm->mmu_lock);
6216 
6217 	ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
6218 	to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
6219 	for ( ; to_zap; --to_zap) {
6220 		if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
6221 			break;
6222 
6223 		/*
6224 		 * We use a separate list instead of just using active_mmu_pages
6225 		 * because the number of lpage_disallowed pages is expected to
6226 		 * be relatively small compared to the total.
6227 		 */
6228 		sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
6229 				      struct kvm_mmu_page,
6230 				      lpage_disallowed_link);
6231 		WARN_ON_ONCE(!sp->lpage_disallowed);
6232 		if (is_tdp_mmu_page(sp)) {
6233 			flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
6234 		} else {
6235 			kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
6236 			WARN_ON_ONCE(sp->lpage_disallowed);
6237 		}
6238 
6239 		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6240 			kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6241 			cond_resched_rwlock_write(&kvm->mmu_lock);
6242 			flush = false;
6243 		}
6244 	}
6245 	kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6246 
6247 	write_unlock(&kvm->mmu_lock);
6248 	srcu_read_unlock(&kvm->srcu, rcu_idx);
6249 }
6250 
get_nx_lpage_recovery_timeout(u64 start_time)6251 static long get_nx_lpage_recovery_timeout(u64 start_time)
6252 {
6253 	return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio)
6254 		? start_time + 60 * HZ - get_jiffies_64()
6255 		: MAX_SCHEDULE_TIMEOUT;
6256 }
6257 
kvm_nx_lpage_recovery_worker(struct kvm * kvm,uintptr_t data)6258 static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
6259 {
6260 	u64 start_time;
6261 	long remaining_time;
6262 
6263 	while (true) {
6264 		start_time = get_jiffies_64();
6265 		remaining_time = get_nx_lpage_recovery_timeout(start_time);
6266 
6267 		set_current_state(TASK_INTERRUPTIBLE);
6268 		while (!kthread_should_stop() && remaining_time > 0) {
6269 			schedule_timeout(remaining_time);
6270 			remaining_time = get_nx_lpage_recovery_timeout(start_time);
6271 			set_current_state(TASK_INTERRUPTIBLE);
6272 		}
6273 
6274 		set_current_state(TASK_RUNNING);
6275 
6276 		if (kthread_should_stop())
6277 			return 0;
6278 
6279 		kvm_recover_nx_lpages(kvm);
6280 	}
6281 }
6282 
kvm_mmu_post_init_vm(struct kvm * kvm)6283 int kvm_mmu_post_init_vm(struct kvm *kvm)
6284 {
6285 	int err;
6286 
6287 	err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
6288 					  "kvm-nx-lpage-recovery",
6289 					  &kvm->arch.nx_lpage_recovery_thread);
6290 	if (!err)
6291 		kthread_unpark(kvm->arch.nx_lpage_recovery_thread);
6292 
6293 	return err;
6294 }
6295 
kvm_mmu_pre_destroy_vm(struct kvm * kvm)6296 void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
6297 {
6298 	if (kvm->arch.nx_lpage_recovery_thread)
6299 		kthread_stop(kvm->arch.nx_lpage_recovery_thread);
6300 }
6301