1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2022 Ventana Micro Systems Inc.
4 */
5
6 #include <linux/bitmap.h>
7 #include <linux/cpumask.h>
8 #include <linux/errno.h>
9 #include <linux/err.h>
10 #include <linux/module.h>
11 #include <linux/smp.h>
12 #include <linux/kvm_host.h>
13 #include <asm/cacheflush.h>
14 #include <asm/csr.h>
15 #include <asm/hwcap.h>
16 #include <asm/insn-def.h>
17
18 #define has_svinval() \
19 static_branch_unlikely(&riscv_isa_ext_keys[RISCV_ISA_EXT_KEY_SVINVAL])
20
kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,gpa_t gpa,gpa_t gpsz,unsigned long order)21 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
22 gpa_t gpa, gpa_t gpsz,
23 unsigned long order)
24 {
25 gpa_t pos;
26
27 if (PTRS_PER_PTE < (gpsz >> order)) {
28 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
29 return;
30 }
31
32 if (has_svinval()) {
33 asm volatile (SFENCE_W_INVAL() ::: "memory");
34 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
35 asm volatile (HINVAL_GVMA(%0, %1)
36 : : "r" (pos >> 2), "r" (vmid) : "memory");
37 asm volatile (SFENCE_INVAL_IR() ::: "memory");
38 } else {
39 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
40 asm volatile (HFENCE_GVMA(%0, %1)
41 : : "r" (pos >> 2), "r" (vmid) : "memory");
42 }
43 }
44
kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)45 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)
46 {
47 asm volatile(HFENCE_GVMA(zero, %0) : : "r" (vmid) : "memory");
48 }
49
kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa,gpa_t gpsz,unsigned long order)50 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
51 unsigned long order)
52 {
53 gpa_t pos;
54
55 if (PTRS_PER_PTE < (gpsz >> order)) {
56 kvm_riscv_local_hfence_gvma_all();
57 return;
58 }
59
60 if (has_svinval()) {
61 asm volatile (SFENCE_W_INVAL() ::: "memory");
62 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
63 asm volatile(HINVAL_GVMA(%0, zero)
64 : : "r" (pos >> 2) : "memory");
65 asm volatile (SFENCE_INVAL_IR() ::: "memory");
66 } else {
67 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
68 asm volatile(HFENCE_GVMA(%0, zero)
69 : : "r" (pos >> 2) : "memory");
70 }
71 }
72
kvm_riscv_local_hfence_gvma_all(void)73 void kvm_riscv_local_hfence_gvma_all(void)
74 {
75 asm volatile(HFENCE_GVMA(zero, zero) : : : "memory");
76 }
77
kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,unsigned long asid,unsigned long gva,unsigned long gvsz,unsigned long order)78 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
79 unsigned long asid,
80 unsigned long gva,
81 unsigned long gvsz,
82 unsigned long order)
83 {
84 unsigned long pos, hgatp;
85
86 if (PTRS_PER_PTE < (gvsz >> order)) {
87 kvm_riscv_local_hfence_vvma_asid_all(vmid, asid);
88 return;
89 }
90
91 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
92
93 if (has_svinval()) {
94 asm volatile (SFENCE_W_INVAL() ::: "memory");
95 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
96 asm volatile(HINVAL_VVMA(%0, %1)
97 : : "r" (pos), "r" (asid) : "memory");
98 asm volatile (SFENCE_INVAL_IR() ::: "memory");
99 } else {
100 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
101 asm volatile(HFENCE_VVMA(%0, %1)
102 : : "r" (pos), "r" (asid) : "memory");
103 }
104
105 csr_write(CSR_HGATP, hgatp);
106 }
107
kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,unsigned long asid)108 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
109 unsigned long asid)
110 {
111 unsigned long hgatp;
112
113 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
114
115 asm volatile(HFENCE_VVMA(zero, %0) : : "r" (asid) : "memory");
116
117 csr_write(CSR_HGATP, hgatp);
118 }
119
kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,unsigned long gva,unsigned long gvsz,unsigned long order)120 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
121 unsigned long gva, unsigned long gvsz,
122 unsigned long order)
123 {
124 unsigned long pos, hgatp;
125
126 if (PTRS_PER_PTE < (gvsz >> order)) {
127 kvm_riscv_local_hfence_vvma_all(vmid);
128 return;
129 }
130
131 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
132
133 if (has_svinval()) {
134 asm volatile (SFENCE_W_INVAL() ::: "memory");
135 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
136 asm volatile(HINVAL_VVMA(%0, zero)
137 : : "r" (pos) : "memory");
138 asm volatile (SFENCE_INVAL_IR() ::: "memory");
139 } else {
140 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
141 asm volatile(HFENCE_VVMA(%0, zero)
142 : : "r" (pos) : "memory");
143 }
144
145 csr_write(CSR_HGATP, hgatp);
146 }
147
kvm_riscv_local_hfence_vvma_all(unsigned long vmid)148 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid)
149 {
150 unsigned long hgatp;
151
152 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
153
154 asm volatile(HFENCE_VVMA(zero, zero) : : : "memory");
155
156 csr_write(CSR_HGATP, hgatp);
157 }
158
kvm_riscv_local_tlb_sanitize(struct kvm_vcpu * vcpu)159 void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu)
160 {
161 unsigned long vmid;
162
163 if (!kvm_riscv_gstage_vmid_bits() ||
164 vcpu->arch.last_exit_cpu == vcpu->cpu)
165 return;
166
167 /*
168 * On RISC-V platforms with hardware VMID support, we share same
169 * VMID for all VCPUs of a particular Guest/VM. This means we might
170 * have stale G-stage TLB entries on the current Host CPU due to
171 * some other VCPU of the same Guest which ran previously on the
172 * current Host CPU.
173 *
174 * To cleanup stale TLB entries, we simply flush all G-stage TLB
175 * entries by VMID whenever underlying Host CPU changes for a VCPU.
176 */
177
178 vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
179 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
180 }
181
kvm_riscv_fence_i_process(struct kvm_vcpu * vcpu)182 void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
183 {
184 local_flush_icache_all();
185 }
186
kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu * vcpu)187 void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu)
188 {
189 struct kvm_vmid *vmid;
190
191 vmid = &vcpu->kvm->arch.vmid;
192 kvm_riscv_local_hfence_gvma_vmid_all(READ_ONCE(vmid->vmid));
193 }
194
kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu * vcpu)195 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu)
196 {
197 struct kvm_vmid *vmid;
198
199 vmid = &vcpu->kvm->arch.vmid;
200 kvm_riscv_local_hfence_vvma_all(READ_ONCE(vmid->vmid));
201 }
202
vcpu_hfence_dequeue(struct kvm_vcpu * vcpu,struct kvm_riscv_hfence * out_data)203 static bool vcpu_hfence_dequeue(struct kvm_vcpu *vcpu,
204 struct kvm_riscv_hfence *out_data)
205 {
206 bool ret = false;
207 struct kvm_vcpu_arch *varch = &vcpu->arch;
208
209 spin_lock(&varch->hfence_lock);
210
211 if (varch->hfence_queue[varch->hfence_head].type) {
212 memcpy(out_data, &varch->hfence_queue[varch->hfence_head],
213 sizeof(*out_data));
214 varch->hfence_queue[varch->hfence_head].type = 0;
215
216 varch->hfence_head++;
217 if (varch->hfence_head == KVM_RISCV_VCPU_MAX_HFENCE)
218 varch->hfence_head = 0;
219
220 ret = true;
221 }
222
223 spin_unlock(&varch->hfence_lock);
224
225 return ret;
226 }
227
vcpu_hfence_enqueue(struct kvm_vcpu * vcpu,const struct kvm_riscv_hfence * data)228 static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,
229 const struct kvm_riscv_hfence *data)
230 {
231 bool ret = false;
232 struct kvm_vcpu_arch *varch = &vcpu->arch;
233
234 spin_lock(&varch->hfence_lock);
235
236 if (!varch->hfence_queue[varch->hfence_tail].type) {
237 memcpy(&varch->hfence_queue[varch->hfence_tail],
238 data, sizeof(*data));
239
240 varch->hfence_tail++;
241 if (varch->hfence_tail == KVM_RISCV_VCPU_MAX_HFENCE)
242 varch->hfence_tail = 0;
243
244 ret = true;
245 }
246
247 spin_unlock(&varch->hfence_lock);
248
249 return ret;
250 }
251
kvm_riscv_hfence_process(struct kvm_vcpu * vcpu)252 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
253 {
254 struct kvm_riscv_hfence d = { 0 };
255 struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
256
257 while (vcpu_hfence_dequeue(vcpu, &d)) {
258 switch (d.type) {
259 case KVM_RISCV_HFENCE_UNKNOWN:
260 break;
261 case KVM_RISCV_HFENCE_GVMA_VMID_GPA:
262 kvm_riscv_local_hfence_gvma_vmid_gpa(
263 READ_ONCE(v->vmid),
264 d.addr, d.size, d.order);
265 break;
266 case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
267 kvm_riscv_local_hfence_vvma_asid_gva(
268 READ_ONCE(v->vmid), d.asid,
269 d.addr, d.size, d.order);
270 break;
271 case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
272 kvm_riscv_local_hfence_vvma_asid_all(
273 READ_ONCE(v->vmid), d.asid);
274 break;
275 case KVM_RISCV_HFENCE_VVMA_GVA:
276 kvm_riscv_local_hfence_vvma_gva(
277 READ_ONCE(v->vmid),
278 d.addr, d.size, d.order);
279 break;
280 default:
281 break;
282 }
283 }
284 }
285
make_xfence_request(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned int req,unsigned int fallback_req,const struct kvm_riscv_hfence * data)286 static void make_xfence_request(struct kvm *kvm,
287 unsigned long hbase, unsigned long hmask,
288 unsigned int req, unsigned int fallback_req,
289 const struct kvm_riscv_hfence *data)
290 {
291 unsigned long i;
292 struct kvm_vcpu *vcpu;
293 unsigned int actual_req = req;
294 DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
295
296 bitmap_clear(vcpu_mask, 0, KVM_MAX_VCPUS);
297 kvm_for_each_vcpu(i, vcpu, kvm) {
298 if (hbase != -1UL) {
299 if (vcpu->vcpu_id < hbase)
300 continue;
301 if (!(hmask & (1UL << (vcpu->vcpu_id - hbase))))
302 continue;
303 }
304
305 bitmap_set(vcpu_mask, i, 1);
306
307 if (!data || !data->type)
308 continue;
309
310 /*
311 * Enqueue hfence data to VCPU hfence queue. If we don't
312 * have space in the VCPU hfence queue then fallback to
313 * a more conservative hfence request.
314 */
315 if (!vcpu_hfence_enqueue(vcpu, data))
316 actual_req = fallback_req;
317 }
318
319 kvm_make_vcpus_request_mask(kvm, actual_req, vcpu_mask);
320 }
321
kvm_riscv_fence_i(struct kvm * kvm,unsigned long hbase,unsigned long hmask)322 void kvm_riscv_fence_i(struct kvm *kvm,
323 unsigned long hbase, unsigned long hmask)
324 {
325 make_xfence_request(kvm, hbase, hmask, KVM_REQ_FENCE_I,
326 KVM_REQ_FENCE_I, NULL);
327 }
328
kvm_riscv_hfence_gvma_vmid_gpa(struct kvm * kvm,unsigned long hbase,unsigned long hmask,gpa_t gpa,gpa_t gpsz,unsigned long order)329 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
330 unsigned long hbase, unsigned long hmask,
331 gpa_t gpa, gpa_t gpsz,
332 unsigned long order)
333 {
334 struct kvm_riscv_hfence data;
335
336 data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA;
337 data.asid = 0;
338 data.addr = gpa;
339 data.size = gpsz;
340 data.order = order;
341 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
342 KVM_REQ_HFENCE_GVMA_VMID_ALL, &data);
343 }
344
kvm_riscv_hfence_gvma_vmid_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask)345 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
346 unsigned long hbase, unsigned long hmask)
347 {
348 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_GVMA_VMID_ALL,
349 KVM_REQ_HFENCE_GVMA_VMID_ALL, NULL);
350 }
351
kvm_riscv_hfence_vvma_asid_gva(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long gva,unsigned long gvsz,unsigned long order,unsigned long asid)352 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
353 unsigned long hbase, unsigned long hmask,
354 unsigned long gva, unsigned long gvsz,
355 unsigned long order, unsigned long asid)
356 {
357 struct kvm_riscv_hfence data;
358
359 data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA;
360 data.asid = asid;
361 data.addr = gva;
362 data.size = gvsz;
363 data.order = order;
364 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
365 KVM_REQ_HFENCE_VVMA_ALL, &data);
366 }
367
kvm_riscv_hfence_vvma_asid_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long asid)368 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
369 unsigned long hbase, unsigned long hmask,
370 unsigned long asid)
371 {
372 struct kvm_riscv_hfence data;
373
374 data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL;
375 data.asid = asid;
376 data.addr = data.size = data.order = 0;
377 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
378 KVM_REQ_HFENCE_VVMA_ALL, &data);
379 }
380
kvm_riscv_hfence_vvma_gva(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long gva,unsigned long gvsz,unsigned long order)381 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
382 unsigned long hbase, unsigned long hmask,
383 unsigned long gva, unsigned long gvsz,
384 unsigned long order)
385 {
386 struct kvm_riscv_hfence data;
387
388 data.type = KVM_RISCV_HFENCE_VVMA_GVA;
389 data.asid = 0;
390 data.addr = gva;
391 data.size = gvsz;
392 data.order = order;
393 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
394 KVM_REQ_HFENCE_VVMA_ALL, &data);
395 }
396
kvm_riscv_hfence_vvma_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask)397 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
398 unsigned long hbase, unsigned long hmask)
399 {
400 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_VVMA_ALL,
401 KVM_REQ_HFENCE_VVMA_ALL, NULL);
402 }
403