1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2017 ARM Ltd.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #include <linux/interrupt.h>
8 #include <linux/irq.h>
9 #include <linux/irqdomain.h>
10 #include <linux/kvm_host.h>
11 #include <linux/irqchip/arm-gic-v3.h>
12
13 #include "vgic.h"
14
15 /*
16 * How KVM uses GICv4 (insert rude comments here):
17 *
18 * The vgic-v4 layer acts as a bridge between several entities:
19 * - The GICv4 ITS representation offered by the ITS driver
20 * - VFIO, which is in charge of the PCI endpoint
21 * - The virtual ITS, which is the only thing the guest sees
22 *
23 * The configuration of VLPIs is triggered by a callback from VFIO,
24 * instructing KVM that a PCI device has been configured to deliver
25 * MSIs to a vITS.
26 *
27 * kvm_vgic_v4_set_forwarding() is thus called with the routing entry,
28 * and this is used to find the corresponding vITS data structures
29 * (ITS instance, device, event and irq) using a process that is
30 * extremely similar to the injection of an MSI.
31 *
32 * At this stage, we can link the guest's view of an LPI (uniquely
33 * identified by the routing entry) and the host irq, using the GICv4
34 * driver mapping operation. Should the mapping succeed, we've then
35 * successfully upgraded the guest's LPI to a VLPI. We can then start
36 * with updating GICv4's view of the property table and generating an
37 * INValidation in order to kickstart the delivery of this VLPI to the
38 * guest directly, without software intervention. Well, almost.
39 *
40 * When the PCI endpoint is deconfigured, this operation is reversed
41 * with VFIO calling kvm_vgic_v4_unset_forwarding().
42 *
43 * Once the VLPI has been mapped, it needs to follow any change the
44 * guest performs on its LPI through the vITS. For that, a number of
45 * command handlers have hooks to communicate these changes to the HW:
46 * - Any invalidation triggers a call to its_prop_update_vlpi()
47 * - The INT command results in a irq_set_irqchip_state(), which
48 * generates an INT on the corresponding VLPI.
49 * - The CLEAR command results in a irq_set_irqchip_state(), which
50 * generates an CLEAR on the corresponding VLPI.
51 * - DISCARD translates into an unmap, similar to a call to
52 * kvm_vgic_v4_unset_forwarding().
53 * - MOVI is translated by an update of the existing mapping, changing
54 * the target vcpu, resulting in a VMOVI being generated.
55 * - MOVALL is translated by a string of mapping updates (similar to
56 * the handling of MOVI). MOVALL is horrible.
57 *
58 * Note that a DISCARD/MAPTI sequence emitted from the guest without
59 * reprogramming the PCI endpoint after MAPTI does not result in a
60 * VLPI being mapped, as there is no callback from VFIO (the guest
61 * will get the interrupt via the normal SW injection). Fixing this is
62 * not trivial, and requires some horrible messing with the VFIO
63 * internals. Not fun. Don't do that.
64 *
65 * Then there is the scheduling. Each time a vcpu is about to run on a
66 * physical CPU, KVM must tell the corresponding redistributor about
67 * it. And if we've migrated our vcpu from one CPU to another, we must
68 * tell the ITS (so that the messages reach the right redistributor).
69 * This is done in two steps: first issue a irq_set_affinity() on the
70 * irq corresponding to the vcpu, then call its_make_vpe_resident().
71 * You must be in a non-preemptible context. On exit, a call to
72 * its_make_vpe_non_resident() tells the redistributor that we're done
73 * with the vcpu.
74 *
75 * Finally, the doorbell handling: Each vcpu is allocated an interrupt
76 * which will fire each time a VLPI is made pending whilst the vcpu is
77 * not running. Each time the vcpu gets blocked, the doorbell
78 * interrupt gets enabled. When the vcpu is unblocked (for whatever
79 * reason), the doorbell interrupt is disabled.
80 */
81
82 #define DB_IRQ_FLAGS (IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY | IRQ_NO_BALANCING)
83
vgic_v4_doorbell_handler(int irq,void * info)84 static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
85 {
86 struct kvm_vcpu *vcpu = info;
87
88 /* We got the message, no need to fire again */
89 if (!kvm_vgic_global_state.has_gicv4_1 &&
90 !irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
91 disable_irq_nosync(irq);
92
93 /*
94 * The v4.1 doorbell can fire concurrently with the vPE being
95 * made non-resident. Ensure we only update pending_last
96 * *after* the non-residency sequence has completed.
97 */
98 raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
99 vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
100 raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
101
102 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
103 kvm_vcpu_kick(vcpu);
104
105 return IRQ_HANDLED;
106 }
107
vgic_v4_sync_sgi_config(struct its_vpe * vpe,struct vgic_irq * irq)108 static void vgic_v4_sync_sgi_config(struct its_vpe *vpe, struct vgic_irq *irq)
109 {
110 vpe->sgi_config[irq->intid].enabled = irq->enabled;
111 vpe->sgi_config[irq->intid].group = irq->group;
112 vpe->sgi_config[irq->intid].priority = irq->priority;
113 }
114
vgic_v4_enable_vsgis(struct kvm_vcpu * vcpu)115 static void vgic_v4_enable_vsgis(struct kvm_vcpu *vcpu)
116 {
117 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
118 int i;
119
120 /*
121 * With GICv4.1, every virtual SGI can be directly injected. So
122 * let's pretend that they are HW interrupts, tied to a host
123 * IRQ. The SGI code will do its magic.
124 */
125 for (i = 0; i < VGIC_NR_SGIS; i++) {
126 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
127 struct irq_desc *desc;
128 unsigned long flags;
129 int ret;
130
131 raw_spin_lock_irqsave(&irq->irq_lock, flags);
132
133 if (irq->hw)
134 goto unlock;
135
136 irq->hw = true;
137 irq->host_irq = irq_find_mapping(vpe->sgi_domain, i);
138
139 /* Transfer the full irq state to the vPE */
140 vgic_v4_sync_sgi_config(vpe, irq);
141 desc = irq_to_desc(irq->host_irq);
142 ret = irq_domain_activate_irq(irq_desc_get_irq_data(desc),
143 false);
144 if (!WARN_ON(ret)) {
145 /* Transfer pending state */
146 ret = irq_set_irqchip_state(irq->host_irq,
147 IRQCHIP_STATE_PENDING,
148 irq->pending_latch);
149 WARN_ON(ret);
150 irq->pending_latch = false;
151 }
152 unlock:
153 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
154 vgic_put_irq(vcpu->kvm, irq);
155 }
156 }
157
vgic_v4_disable_vsgis(struct kvm_vcpu * vcpu)158 static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu)
159 {
160 int i;
161
162 for (i = 0; i < VGIC_NR_SGIS; i++) {
163 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
164 struct irq_desc *desc;
165 unsigned long flags;
166 int ret;
167
168 raw_spin_lock_irqsave(&irq->irq_lock, flags);
169
170 if (!irq->hw)
171 goto unlock;
172
173 irq->hw = false;
174 ret = irq_get_irqchip_state(irq->host_irq,
175 IRQCHIP_STATE_PENDING,
176 &irq->pending_latch);
177 WARN_ON(ret);
178
179 desc = irq_to_desc(irq->host_irq);
180 irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
181 unlock:
182 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
183 vgic_put_irq(vcpu->kvm, irq);
184 }
185 }
186
187 /* Must be called with the kvm lock held */
vgic_v4_configure_vsgis(struct kvm * kvm)188 void vgic_v4_configure_vsgis(struct kvm *kvm)
189 {
190 struct vgic_dist *dist = &kvm->arch.vgic;
191 struct kvm_vcpu *vcpu;
192 int i;
193
194 kvm_arm_halt_guest(kvm);
195
196 kvm_for_each_vcpu(i, vcpu, kvm) {
197 if (dist->nassgireq)
198 vgic_v4_enable_vsgis(vcpu);
199 else
200 vgic_v4_disable_vsgis(vcpu);
201 }
202
203 kvm_arm_resume_guest(kvm);
204 }
205
206 /*
207 * Must be called with GICv4.1 and the vPE unmapped, which
208 * indicates the invalidation of any VPT caches associated
209 * with the vPE, thus we can get the VLPI state by peeking
210 * at the VPT.
211 */
vgic_v4_get_vlpi_state(struct vgic_irq * irq,bool * val)212 void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val)
213 {
214 struct its_vpe *vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
215 int mask = BIT(irq->intid % BITS_PER_BYTE);
216 void *va;
217 u8 *ptr;
218
219 va = page_address(vpe->vpt_page);
220 ptr = va + irq->intid / BITS_PER_BYTE;
221
222 *val = !!(*ptr & mask);
223 }
224
225 /**
226 * vgic_v4_init - Initialize the GICv4 data structures
227 * @kvm: Pointer to the VM being initialized
228 *
229 * We may be called each time a vITS is created, or when the
230 * vgic is initialized. This relies on kvm->lock to be
231 * held. In both cases, the number of vcpus should now be
232 * fixed.
233 */
vgic_v4_init(struct kvm * kvm)234 int vgic_v4_init(struct kvm *kvm)
235 {
236 struct vgic_dist *dist = &kvm->arch.vgic;
237 struct kvm_vcpu *vcpu;
238 int i, nr_vcpus, ret;
239
240 if (!kvm_vgic_global_state.has_gicv4)
241 return 0; /* Nothing to see here... move along. */
242
243 if (dist->its_vm.vpes)
244 return 0;
245
246 nr_vcpus = atomic_read(&kvm->online_vcpus);
247
248 dist->its_vm.vpes = kcalloc(nr_vcpus, sizeof(*dist->its_vm.vpes),
249 GFP_KERNEL);
250 if (!dist->its_vm.vpes)
251 return -ENOMEM;
252
253 dist->its_vm.nr_vpes = nr_vcpus;
254
255 kvm_for_each_vcpu(i, vcpu, kvm)
256 dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
257
258 ret = its_alloc_vcpu_irqs(&dist->its_vm);
259 if (ret < 0) {
260 kvm_err("VPE IRQ allocation failure\n");
261 kfree(dist->its_vm.vpes);
262 dist->its_vm.nr_vpes = 0;
263 dist->its_vm.vpes = NULL;
264 return ret;
265 }
266
267 kvm_for_each_vcpu(i, vcpu, kvm) {
268 int irq = dist->its_vm.vpes[i]->irq;
269 unsigned long irq_flags = DB_IRQ_FLAGS;
270
271 /*
272 * Don't automatically enable the doorbell, as we're
273 * flipping it back and forth when the vcpu gets
274 * blocked. Also disable the lazy disabling, as the
275 * doorbell could kick us out of the guest too
276 * early...
277 *
278 * On GICv4.1, the doorbell is managed in HW and must
279 * be left enabled.
280 */
281 if (kvm_vgic_global_state.has_gicv4_1)
282 irq_flags &= ~IRQ_NOAUTOEN;
283 irq_set_status_flags(irq, irq_flags);
284
285 ret = request_irq(irq, vgic_v4_doorbell_handler,
286 0, "vcpu", vcpu);
287 if (ret) {
288 kvm_err("failed to allocate vcpu IRQ%d\n", irq);
289 /*
290 * Trick: adjust the number of vpes so we know
291 * how many to nuke on teardown...
292 */
293 dist->its_vm.nr_vpes = i;
294 break;
295 }
296 }
297
298 if (ret)
299 vgic_v4_teardown(kvm);
300
301 return ret;
302 }
303
304 /**
305 * vgic_v4_teardown - Free the GICv4 data structures
306 * @kvm: Pointer to the VM being destroyed
307 *
308 * Relies on kvm->lock to be held.
309 */
vgic_v4_teardown(struct kvm * kvm)310 void vgic_v4_teardown(struct kvm *kvm)
311 {
312 struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
313 int i;
314
315 if (!its_vm->vpes)
316 return;
317
318 for (i = 0; i < its_vm->nr_vpes; i++) {
319 struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i);
320 int irq = its_vm->vpes[i]->irq;
321
322 irq_clear_status_flags(irq, DB_IRQ_FLAGS);
323 free_irq(irq, vcpu);
324 }
325
326 its_free_vcpu_irqs(its_vm);
327 kfree(its_vm->vpes);
328 its_vm->nr_vpes = 0;
329 its_vm->vpes = NULL;
330 }
331
vgic_v4_put(struct kvm_vcpu * vcpu,bool need_db)332 int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
333 {
334 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
335
336 if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
337 return 0;
338
339 return its_make_vpe_non_resident(vpe, need_db);
340 }
341
vgic_v4_load(struct kvm_vcpu * vcpu)342 int vgic_v4_load(struct kvm_vcpu *vcpu)
343 {
344 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
345 int err;
346
347 if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident)
348 return 0;
349
350 /*
351 * Before making the VPE resident, make sure the redistributor
352 * corresponding to our current CPU expects us here. See the
353 * doc in drivers/irqchip/irq-gic-v4.c to understand how this
354 * turns into a VMOVP command at the ITS level.
355 */
356 err = irq_set_affinity(vpe->irq, cpumask_of(smp_processor_id()));
357 if (err)
358 return err;
359
360 err = its_make_vpe_resident(vpe, false, vcpu->kvm->arch.vgic.enabled);
361 if (err)
362 return err;
363
364 /*
365 * Now that the VPE is resident, let's get rid of a potential
366 * doorbell interrupt that would still be pending. This is a
367 * GICv4.0 only "feature"...
368 */
369 if (!kvm_vgic_global_state.has_gicv4_1)
370 err = irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
371
372 return err;
373 }
374
vgic_v4_commit(struct kvm_vcpu * vcpu)375 void vgic_v4_commit(struct kvm_vcpu *vcpu)
376 {
377 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
378
379 /*
380 * No need to wait for the vPE to be ready across a shallow guest
381 * exit, as only a vcpu_put will invalidate it.
382 */
383 if (!vpe->ready)
384 its_commit_vpe(vpe);
385 }
386
vgic_get_its(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * irq_entry)387 static struct vgic_its *vgic_get_its(struct kvm *kvm,
388 struct kvm_kernel_irq_routing_entry *irq_entry)
389 {
390 struct kvm_msi msi = (struct kvm_msi) {
391 .address_lo = irq_entry->msi.address_lo,
392 .address_hi = irq_entry->msi.address_hi,
393 .data = irq_entry->msi.data,
394 .flags = irq_entry->msi.flags,
395 .devid = irq_entry->msi.devid,
396 };
397
398 return vgic_msi_to_its(kvm, &msi);
399 }
400
kvm_vgic_v4_set_forwarding(struct kvm * kvm,int virq,struct kvm_kernel_irq_routing_entry * irq_entry)401 int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
402 struct kvm_kernel_irq_routing_entry *irq_entry)
403 {
404 struct vgic_its *its;
405 struct vgic_irq *irq;
406 struct its_vlpi_map map;
407 unsigned long flags;
408 int ret;
409
410 if (!vgic_supports_direct_msis(kvm))
411 return 0;
412
413 /*
414 * Get the ITS, and escape early on error (not a valid
415 * doorbell for any of our vITSs).
416 */
417 its = vgic_get_its(kvm, irq_entry);
418 if (IS_ERR(its))
419 return 0;
420
421 mutex_lock(&its->its_lock);
422
423 /* Perform the actual DevID/EventID -> LPI translation. */
424 ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
425 irq_entry->msi.data, &irq);
426 if (ret)
427 goto out;
428
429 /*
430 * Emit the mapping request. If it fails, the ITS probably
431 * isn't v4 compatible, so let's silently bail out. Holding
432 * the ITS lock should ensure that nothing can modify the
433 * target vcpu.
434 */
435 map = (struct its_vlpi_map) {
436 .vm = &kvm->arch.vgic.its_vm,
437 .vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe,
438 .vintid = irq->intid,
439 .properties = ((irq->priority & 0xfc) |
440 (irq->enabled ? LPI_PROP_ENABLED : 0) |
441 LPI_PROP_GROUP1),
442 .db_enabled = true,
443 };
444
445 ret = its_map_vlpi(virq, &map);
446 if (ret)
447 goto out;
448
449 irq->hw = true;
450 irq->host_irq = virq;
451 atomic_inc(&map.vpe->vlpi_count);
452
453 /* Transfer pending state */
454 raw_spin_lock_irqsave(&irq->irq_lock, flags);
455 if (irq->pending_latch) {
456 ret = irq_set_irqchip_state(irq->host_irq,
457 IRQCHIP_STATE_PENDING,
458 irq->pending_latch);
459 WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);
460
461 /*
462 * Clear pending_latch and communicate this state
463 * change via vgic_queue_irq_unlock.
464 */
465 irq->pending_latch = false;
466 vgic_queue_irq_unlock(kvm, irq, flags);
467 } else {
468 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
469 }
470
471 out:
472 mutex_unlock(&its->its_lock);
473 return ret;
474 }
475
kvm_vgic_v4_unset_forwarding(struct kvm * kvm,int virq,struct kvm_kernel_irq_routing_entry * irq_entry)476 int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
477 struct kvm_kernel_irq_routing_entry *irq_entry)
478 {
479 struct vgic_its *its;
480 struct vgic_irq *irq;
481 int ret;
482
483 if (!vgic_supports_direct_msis(kvm))
484 return 0;
485
486 /*
487 * Get the ITS, and escape early on error (not a valid
488 * doorbell for any of our vITSs).
489 */
490 its = vgic_get_its(kvm, irq_entry);
491 if (IS_ERR(its))
492 return 0;
493
494 mutex_lock(&its->its_lock);
495
496 ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
497 irq_entry->msi.data, &irq);
498 if (ret)
499 goto out;
500
501 WARN_ON(!(irq->hw && irq->host_irq == virq));
502 if (irq->hw) {
503 atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
504 irq->hw = false;
505 ret = its_unmap_vlpi(virq);
506 }
507
508 out:
509 mutex_unlock(&its->its_lock);
510 return ret;
511 }
512