1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #include <asm/kvm_hyp.h>
8 #include <asm/kvm_mmu.h>
9 #include <asm/tlbflush.h>
10
11 #include <nvhe/mem_protect.h>
12
13 struct tlb_inv_context {
14 u64 tcr;
15 };
16
__tlb_switch_to_guest(struct kvm_s2_mmu * mmu,struct tlb_inv_context * cxt,bool nsh)17 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
18 struct tlb_inv_context *cxt,
19 bool nsh)
20 {
21 /*
22 * We have two requirements:
23 *
24 * - ensure that the page table updates are visible to all
25 * CPUs, for which a dsb(DOMAIN-st) is what we need, DOMAIN
26 * being either ish or nsh, depending on the invalidation
27 * type.
28 *
29 * - complete any speculative page table walk started before
30 * we trapped to EL2 so that we can mess with the MM
31 * registers out of context, for which dsb(nsh) is enough
32 *
33 * The composition of these two barriers is a dsb(DOMAIN), and
34 * the 'nsh' parameter tracks the distinction between
35 * Inner-Shareable and Non-Shareable, as specified by the
36 * callers.
37 */
38 if (nsh)
39 dsb(nsh);
40 else
41 dsb(ish);
42
43 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
44 u64 val;
45
46 /*
47 * For CPUs that are affected by ARM 1319367, we need to
48 * avoid a host Stage-1 walk while we have the guest's
49 * VMID set in the VTTBR in order to invalidate TLBs.
50 * We're guaranteed that the S1 MMU is enabled, so we can
51 * simply set the EPD bits to avoid any further TLB fill.
52 */
53 val = cxt->tcr = read_sysreg_el1(SYS_TCR);
54 val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
55 write_sysreg_el1(val, SYS_TCR);
56 isb();
57 }
58
59 /*
60 * __load_stage2() includes an ISB only when the AT
61 * workaround is applied. Take care of the opposite condition,
62 * ensuring that we always have an ISB, but not two ISBs back
63 * to back.
64 */
65 __load_stage2(mmu, kern_hyp_va(mmu->arch));
66 asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
67 }
68
__tlb_switch_to_host(struct tlb_inv_context * cxt)69 static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
70 {
71 __load_host_stage2();
72
73 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
74 /* Ensure write of the host VMID */
75 isb();
76 /* Restore the host's TCR_EL1 */
77 write_sysreg_el1(cxt->tcr, SYS_TCR);
78 }
79 }
80
__kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu * mmu,phys_addr_t ipa,int level)81 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
82 phys_addr_t ipa, int level)
83 {
84 struct tlb_inv_context cxt;
85
86 /* Switch to requested VMID */
87 __tlb_switch_to_guest(mmu, &cxt, false);
88
89 /*
90 * We could do so much better if we had the VA as well.
91 * Instead, we invalidate Stage-2 for this IPA, and the
92 * whole of Stage-1. Weep...
93 */
94 ipa >>= 12;
95 __tlbi_level(ipas2e1is, ipa, level);
96
97 /*
98 * We have to ensure completion of the invalidation at Stage-2,
99 * since a table walk on another CPU could refill a TLB with a
100 * complete (S1 + S2) walk based on the old Stage-2 mapping if
101 * the Stage-1 invalidation happened first.
102 */
103 dsb(ish);
104 __tlbi(vmalle1is);
105 dsb(ish);
106 isb();
107
108 /*
109 * If the host is running at EL1 and we have a VPIPT I-cache,
110 * then we must perform I-cache maintenance at EL2 in order for
111 * it to have an effect on the guest. Since the guest cannot hit
112 * I-cache lines allocated with a different VMID, we don't need
113 * to worry about junk out of guest reset (we nuke the I-cache on
114 * VMID rollover), but we do need to be careful when remapping
115 * executable pages for the same guest. This can happen when KSM
116 * takes a CoW fault on an executable page, copies the page into
117 * a page that was previously mapped in the guest and then needs
118 * to invalidate the guest view of the I-cache for that page
119 * from EL1. To solve this, we invalidate the entire I-cache when
120 * unmapping a page from a guest if we have a VPIPT I-cache but
121 * the host is running at EL1. As above, we could do better if
122 * we had the VA.
123 *
124 * The moral of this story is: if you have a VPIPT I-cache, then
125 * you should be running with VHE enabled.
126 */
127 if (icache_is_vpipt())
128 icache_inval_all_pou();
129
130 __tlb_switch_to_host(&cxt);
131 }
132
__kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu * mmu,phys_addr_t ipa,int level)133 void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
134 phys_addr_t ipa, int level)
135 {
136 struct tlb_inv_context cxt;
137
138 /* Switch to requested VMID */
139 __tlb_switch_to_guest(mmu, &cxt, true);
140
141 /*
142 * We could do so much better if we had the VA as well.
143 * Instead, we invalidate Stage-2 for this IPA, and the
144 * whole of Stage-1. Weep...
145 */
146 ipa >>= 12;
147 __tlbi_level(ipas2e1, ipa, level);
148
149 /*
150 * We have to ensure completion of the invalidation at Stage-2,
151 * since a table walk on another CPU could refill a TLB with a
152 * complete (S1 + S2) walk based on the old Stage-2 mapping if
153 * the Stage-1 invalidation happened first.
154 */
155 dsb(nsh);
156 __tlbi(vmalle1);
157 dsb(nsh);
158 isb();
159
160 /*
161 * If the host is running at EL1 and we have a VPIPT I-cache,
162 * then we must perform I-cache maintenance at EL2 in order for
163 * it to have an effect on the guest. Since the guest cannot hit
164 * I-cache lines allocated with a different VMID, we don't need
165 * to worry about junk out of guest reset (we nuke the I-cache on
166 * VMID rollover), but we do need to be careful when remapping
167 * executable pages for the same guest. This can happen when KSM
168 * takes a CoW fault on an executable page, copies the page into
169 * a page that was previously mapped in the guest and then needs
170 * to invalidate the guest view of the I-cache for that page
171 * from EL1. To solve this, we invalidate the entire I-cache when
172 * unmapping a page from a guest if we have a VPIPT I-cache but
173 * the host is running at EL1. As above, we could do better if
174 * we had the VA.
175 *
176 * The moral of this story is: if you have a VPIPT I-cache, then
177 * you should be running with VHE enabled.
178 */
179 if (icache_is_vpipt())
180 icache_inval_all_pou();
181
182 __tlb_switch_to_host(&cxt);
183 }
184
__kvm_tlb_flush_vmid_range(struct kvm_s2_mmu * mmu,phys_addr_t start,unsigned long pages)185 void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
186 phys_addr_t start, unsigned long pages)
187 {
188 struct tlb_inv_context cxt;
189 unsigned long stride;
190
191 /*
192 * Since the range of addresses may not be mapped at
193 * the same level, assume the worst case as PAGE_SIZE
194 */
195 stride = PAGE_SIZE;
196 start = round_down(start, stride);
197
198 /* Switch to requested VMID */
199 __tlb_switch_to_guest(mmu, &cxt, false);
200
201 __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
202
203 dsb(ish);
204 __tlbi(vmalle1is);
205 dsb(ish);
206 isb();
207
208 /* See the comment in __kvm_tlb_flush_vmid_ipa() */
209 if (icache_is_vpipt())
210 icache_inval_all_pou();
211
212 __tlb_switch_to_host(&cxt);
213 }
214
__kvm_tlb_flush_vmid(struct kvm_s2_mmu * mmu)215 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
216 {
217 struct tlb_inv_context cxt;
218
219 /* Switch to requested VMID */
220 __tlb_switch_to_guest(mmu, &cxt, false);
221
222 __tlbi(vmalls12e1is);
223 dsb(ish);
224 isb();
225
226 __tlb_switch_to_host(&cxt);
227 }
228
__kvm_flush_cpu_context(struct kvm_s2_mmu * mmu)229 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
230 {
231 struct tlb_inv_context cxt;
232
233 /* Switch to requested VMID */
234 __tlb_switch_to_guest(mmu, &cxt, false);
235
236 __tlbi(vmalle1);
237 asm volatile("ic iallu");
238 dsb(nsh);
239 isb();
240
241 __tlb_switch_to_host(&cxt);
242 }
243
__kvm_flush_vm_context(void)244 void __kvm_flush_vm_context(void)
245 {
246 /* Same remark as in __tlb_switch_to_guest() */
247 dsb(ish);
248 __tlbi(alle1is);
249
250 /*
251 * VIPT and PIPT caches are not affected by VMID, so no maintenance
252 * is necessary across a VMID rollover.
253 *
254 * VPIPT caches constrain lookup and maintenance to the active VMID,
255 * so we need to invalidate lines with a stale VMID to avoid an ABA
256 * race after multiple rollovers.
257 *
258 */
259 if (icache_is_vpipt())
260 asm volatile("ic ialluis");
261
262 dsb(ish);
263 }
264