1 // SPDX-License-Identifier: GPL-2.0
2
3 #ifndef __KVM_X86_MMU_TDP_MMU_H
4 #define __KVM_X86_MMU_TDP_MMU_H
5
6 #include <linux/kvm_host.h>
7
8 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
9
kvm_tdp_mmu_get_root(struct kvm * kvm,struct kvm_mmu_page * root)10 __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm *kvm,
11 struct kvm_mmu_page *root)
12 {
13 if (root->role.invalid)
14 return false;
15
16 return refcount_inc_not_zero(&root->tdp_mmu_root_count);
17 }
18
19 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
20 bool shared);
21
22 bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
23 gfn_t end, bool can_yield, bool flush);
kvm_tdp_mmu_zap_gfn_range(struct kvm * kvm,int as_id,gfn_t start,gfn_t end,bool flush)24 static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
25 gfn_t start, gfn_t end, bool flush)
26 {
27 return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush);
28 }
kvm_tdp_mmu_zap_sp(struct kvm * kvm,struct kvm_mmu_page * sp)29 static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
30 {
31 gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level + 1);
32
33 /*
34 * Don't allow yielding, as the caller may have a flush pending. Note,
35 * if mmu_lock is held for write, zapping will never yield in this case,
36 * but explicitly disallow it for safety. The TDP MMU does not yield
37 * until it has made forward progress (steps sideways), and when zapping
38 * a single shadow page that it's guaranteed to see (thus the mmu_lock
39 * requirement), its "step sideways" will always step beyond the bounds
40 * of the shadow page's gfn range and stop iterating before yielding.
41 */
42 lockdep_assert_held_write(&kvm->mmu_lock);
43 return __kvm_tdp_mmu_zap_gfn_range(kvm, kvm_mmu_page_as_id(sp),
44 sp->gfn, end, false, false);
45 }
46
47 void kvm_tdp_mmu_zap_all(struct kvm *kvm);
48 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
49 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm);
50
51 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
52 int map_writable, int max_level, kvm_pfn_t pfn,
53 bool prefault);
54
55 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
56 bool flush);
57 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
58 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
59 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
60
61 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
62 const struct kvm_memory_slot *slot, int min_level);
63 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
64 const struct kvm_memory_slot *slot);
65 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
66 struct kvm_memory_slot *slot,
67 gfn_t gfn, unsigned long mask,
68 bool wrprot);
69 bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
70 const struct kvm_memory_slot *slot,
71 bool flush);
72
73 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
74 struct kvm_memory_slot *slot, gfn_t gfn,
75 int min_level);
76
kvm_tdp_mmu_walk_lockless_begin(void)77 static inline void kvm_tdp_mmu_walk_lockless_begin(void)
78 {
79 rcu_read_lock();
80 }
81
kvm_tdp_mmu_walk_lockless_end(void)82 static inline void kvm_tdp_mmu_walk_lockless_end(void)
83 {
84 rcu_read_unlock();
85 }
86
87 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
88 int *root_level);
89 u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
90 u64 *spte);
91
92 #ifdef CONFIG_X86_64
93 bool kvm_mmu_init_tdp_mmu(struct kvm *kvm);
94 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
is_tdp_mmu_enabled(struct kvm * kvm)95 static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
is_tdp_mmu_page(struct kvm_mmu_page * sp)96 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
97
is_tdp_mmu(struct kvm_mmu * mmu)98 static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
99 {
100 struct kvm_mmu_page *sp;
101 hpa_t hpa = mmu->root_hpa;
102
103 if (WARN_ON(!VALID_PAGE(hpa)))
104 return false;
105
106 /*
107 * A NULL shadow page is legal when shadowing a non-paging guest with
108 * PAE paging, as the MMU will be direct with root_hpa pointing at the
109 * pae_root page, not a shadow page.
110 */
111 sp = to_shadow_page(hpa);
112 return sp && is_tdp_mmu_page(sp) && sp->root_count;
113 }
114 #else
kvm_mmu_init_tdp_mmu(struct kvm * kvm)115 static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; }
kvm_mmu_uninit_tdp_mmu(struct kvm * kvm)116 static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
is_tdp_mmu_enabled(struct kvm * kvm)117 static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
is_tdp_mmu_page(struct kvm_mmu_page * sp)118 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
is_tdp_mmu(struct kvm_mmu * mmu)119 static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
120 #endif
121
122 #endif /* __KVM_X86_MMU_TDP_MMU_H */
123