Lines Matching +full:redistributor +full:- +full:stride
1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/irqchip/arm-gic-v3.h>
23 struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; in vgic_v3_set_underflow()
25 cpuif->vgic_hcr |= ICH_HCR_UIE; in vgic_v3_set_underflow()
36 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in vgic_v3_fold_lr_state()
37 struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3; in vgic_v3_fold_lr_state()
38 u32 model = vcpu->kvm->arch.vgic.vgic_model; in vgic_v3_fold_lr_state()
43 cpuif->vgic_hcr &= ~ICH_HCR_UIE; in vgic_v3_fold_lr_state()
45 for (lr = 0; lr < cpuif->used_lrs; lr++) { in vgic_v3_fold_lr_state()
46 u64 val = cpuif->vgic_lr[lr]; in vgic_v3_fold_lr_state()
62 /* Notify fds when the guest EOI'ed a level-triggered IRQ */ in vgic_v3_fold_lr_state()
63 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid)) in vgic_v3_fold_lr_state()
64 kvm_notify_acked_irq(vcpu->kvm, 0, in vgic_v3_fold_lr_state()
65 intid - VGIC_NR_PRIVATE_IRQS); in vgic_v3_fold_lr_state()
67 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); in vgic_v3_fold_lr_state()
71 raw_spin_lock(&irq->irq_lock); in vgic_v3_fold_lr_state()
74 deactivated = irq->active && !(val & ICH_LR_ACTIVE_BIT); in vgic_v3_fold_lr_state()
75 irq->active = !!(val & ICH_LR_ACTIVE_BIT); in vgic_v3_fold_lr_state()
77 if (irq->active && is_v2_sgi) in vgic_v3_fold_lr_state()
78 irq->active_source = cpuid; in vgic_v3_fold_lr_state()
81 if (irq->config == VGIC_CONFIG_EDGE && in vgic_v3_fold_lr_state()
83 irq->pending_latch = true; in vgic_v3_fold_lr_state()
86 irq->source |= (1 << cpuid); in vgic_v3_fold_lr_state()
92 if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE)) in vgic_v3_fold_lr_state()
93 irq->pending_latch = false; in vgic_v3_fold_lr_state()
98 raw_spin_unlock(&irq->irq_lock); in vgic_v3_fold_lr_state()
99 vgic_put_irq(vcpu->kvm, irq); in vgic_v3_fold_lr_state()
102 cpuif->used_lrs = 0; in vgic_v3_fold_lr_state()
108 u32 model = vcpu->kvm->arch.vgic.vgic_model; in vgic_v3_populate_lr()
109 u64 val = irq->intid; in vgic_v3_populate_lr()
112 is_v2_sgi = (vgic_irq_is_sgi(irq->intid) && in vgic_v3_populate_lr()
115 if (irq->active) { in vgic_v3_populate_lr()
118 val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT; in vgic_v3_populate_lr()
125 if (irq->hw && !vgic_irq_needs_resampling(irq)) { in vgic_v3_populate_lr()
127 val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT; in vgic_v3_populate_lr()
133 if (irq->active) in vgic_v3_populate_lr()
136 if (irq->config == VGIC_CONFIG_LEVEL) { in vgic_v3_populate_lr()
143 if (irq->active) in vgic_v3_populate_lr()
151 if (irq->config == VGIC_CONFIG_EDGE) in vgic_v3_populate_lr()
152 irq->pending_latch = false; in vgic_v3_populate_lr()
154 if (vgic_irq_is_sgi(irq->intid) && in vgic_v3_populate_lr()
156 u32 src = ffs(irq->source); in vgic_v3_populate_lr()
159 irq->intid)) in vgic_v3_populate_lr()
162 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; in vgic_v3_populate_lr()
163 irq->source &= ~(1 << (src - 1)); in vgic_v3_populate_lr()
164 if (irq->source) { in vgic_v3_populate_lr()
165 irq->pending_latch = true; in vgic_v3_populate_lr()
172 * Level-triggered mapped IRQs are special because we only observe in vgic_v3_populate_lr()
178 irq->line_level = false; in vgic_v3_populate_lr()
180 if (irq->group) in vgic_v3_populate_lr()
183 val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT; in vgic_v3_populate_lr()
185 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val; in vgic_v3_populate_lr()
190 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0; in vgic_v3_clear_lr()
195 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; in vgic_v3_set_vmcr()
196 u32 model = vcpu->kvm->arch.vgic.vgic_model; in vgic_v3_set_vmcr()
200 vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) & in vgic_v3_set_vmcr()
202 vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) & in vgic_v3_set_vmcr()
212 vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK; in vgic_v3_set_vmcr()
213 vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK; in vgic_v3_set_vmcr()
214 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK; in vgic_v3_set_vmcr()
215 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK; in vgic_v3_set_vmcr()
216 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK; in vgic_v3_set_vmcr()
217 vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK; in vgic_v3_set_vmcr()
218 vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK; in vgic_v3_set_vmcr()
220 cpu_if->vgic_vmcr = vmcr; in vgic_v3_set_vmcr()
225 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; in vgic_v3_get_vmcr()
226 u32 model = vcpu->kvm->arch.vgic.vgic_model; in vgic_v3_get_vmcr()
229 vmcr = cpu_if->vgic_vmcr; in vgic_v3_get_vmcr()
232 vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >> in vgic_v3_get_vmcr()
234 vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >> in vgic_v3_get_vmcr()
241 vmcrp->fiqen = 1; in vgic_v3_get_vmcr()
242 vmcrp->ackctl = 0; in vgic_v3_get_vmcr()
245 vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT; in vgic_v3_get_vmcr()
246 vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT; in vgic_v3_get_vmcr()
247 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; in vgic_v3_get_vmcr()
248 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; in vgic_v3_get_vmcr()
249 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; in vgic_v3_get_vmcr()
250 vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT; in vgic_v3_get_vmcr()
251 vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT; in vgic_v3_get_vmcr()
261 struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3; in vgic_v3_enable()
268 vgic_v3->vgic_vmcr = 0; in vgic_v3_enable()
271 * If we are emulating a GICv3, we do it in an non-GICv2-compatible in vgic_v3_enable()
276 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { in vgic_v3_enable()
277 vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB | in vgic_v3_enable()
280 vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE; in vgic_v3_enable()
282 vgic_v3->vgic_sre = 0; in vgic_v3_enable()
285 vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 & in vgic_v3_enable()
288 vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 & in vgic_v3_enable()
293 vgic_v3->vgic_hcr = ICH_HCR_EN; in vgic_v3_enable()
295 vgic_v3->vgic_hcr |= ICH_HCR_TALL0; in vgic_v3_enable()
297 vgic_v3->vgic_hcr |= ICH_HCR_TALL1; in vgic_v3_enable()
299 vgic_v3->vgic_hcr |= ICH_HCR_TC; in vgic_v3_enable()
301 vgic_v3->vgic_hcr |= ICH_HCR_TDIR; in vgic_v3_enable()
315 vcpu = irq->target_vcpu; in vgic_v3_lpi_sync_pending_status()
319 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser); in vgic_v3_lpi_sync_pending_status()
321 byte_offset = irq->intid / BITS_PER_BYTE; in vgic_v3_lpi_sync_pending_status()
322 bit_nr = irq->intid % BITS_PER_BYTE; in vgic_v3_lpi_sync_pending_status()
331 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_v3_lpi_sync_pending_status()
332 if (irq->target_vcpu != vcpu) { in vgic_v3_lpi_sync_pending_status()
333 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_v3_lpi_sync_pending_status()
336 irq->pending_latch = status; in vgic_v3_lpi_sync_pending_status()
337 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_v3_lpi_sync_pending_status()
358 for (i = 0; i < dist->its_vm.nr_vpes; i++) { in unmap_all_vpes()
359 desc = irq_to_desc(dist->its_vm.vpes[i]->irq); in unmap_all_vpes()
369 for (i = 0; i < dist->its_vm.nr_vpes; i++) { in map_all_vpes()
370 desc = irq_to_desc(dist->its_vm.vpes[i]->irq); in map_all_vpes()
376 * vgic_v3_save_pending_tables - Save the pending tables into guest RAM
381 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_v3_save_pending_tables()
389 return -ENXIO; in vgic_v3_save_pending_tables()
401 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { in vgic_v3_save_pending_tables()
408 vcpu = irq->target_vcpu; in vgic_v3_save_pending_tables()
412 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser); in vgic_v3_save_pending_tables()
414 byte_offset = irq->intid / BITS_PER_BYTE; in vgic_v3_save_pending_tables()
415 bit_nr = irq->intid % BITS_PER_BYTE; in vgic_v3_save_pending_tables()
427 is_pending = irq->pending_latch; in vgic_v3_save_pending_tables()
429 if (irq->hw && vlpi_avail) in vgic_v3_save_pending_tables()
453 * vgic_v3_rdist_overlap - check if a region overlaps with any
454 * existing redistributor region
464 struct vgic_dist *d = &kvm->arch.vgic; in vgic_v3_rdist_overlap()
467 list_for_each_entry(rdreg, &d->rd_regions, list) { in vgic_v3_rdist_overlap()
468 if ((base + size > rdreg->base) && in vgic_v3_rdist_overlap()
469 (base < rdreg->base + vgic_v3_rd_region_size(kvm, rdreg))) in vgic_v3_rdist_overlap()
481 struct vgic_dist *d = &kvm->arch.vgic; in vgic_v3_check_base()
484 if (!IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) && in vgic_v3_check_base()
485 d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base) in vgic_v3_check_base()
488 list_for_each_entry(rdreg, &d->rd_regions, list) { in vgic_v3_check_base()
492 rdreg->base, SZ_64K, sz)) in vgic_v3_check_base()
496 if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base)) in vgic_v3_check_base()
499 return !vgic_v3_rdist_overlap(kvm, d->vgic_dist_base, in vgic_v3_check_base()
504 * vgic_v3_rdist_free_slot - Look up registered rdist regions and identify one
507 * @rd_regions: redistributor region list head
509 * A redistributor regions maps n redistributors, n = region size / (2 x 64kB).
510 * Stride between redistributors is 0 and regions are filled in the index order.
529 struct list_head *rd_regions = &kvm->arch.vgic.rd_regions; in vgic_v3_rdist_region_from_index()
533 if (rdreg->index == index) in vgic_v3_rdist_region_from_index()
542 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_v3_map_resources()
548 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in vgic_v3_map_resources()
550 if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) { in vgic_v3_map_resources()
551 kvm_debug("vcpu %ld redistributor base not set\n", c); in vgic_v3_map_resources()
552 return -ENXIO; in vgic_v3_map_resources()
556 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) { in vgic_v3_map_resources()
558 return -ENXIO; in vgic_v3_map_resources()
563 return -EINVAL; in vgic_v3_map_resources()
571 return -EBUSY; in vgic_v3_map_resources()
574 ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3); in vgic_v3_map_resources()
592 early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg);
598 early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg);
604 early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
610 early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
629 * vgic_v3_probe - probe for a VGICv3 compatible interrupt controller
653 if (info->has_v4) { in vgic_v3_probe()
655 kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable; in vgic_v3_probe()
663 if (!info->vcpu.start) { in vgic_v3_probe()
667 } else if (!PAGE_ALIGNED(info->vcpu.start)) { in vgic_v3_probe()
669 (unsigned long long)info->vcpu.start); in vgic_v3_probe()
671 kvm_vgic_global_state.vcpu_base = info->vcpu.start; in vgic_v3_probe()
678 kvm_info("vgic-v2@%llx\n", info->vcpu.start); in vgic_v3_probe()
725 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; in vgic_v3_load()
732 if (likely(cpu_if->vgic_sre)) in vgic_v3_load()
733 kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr); in vgic_v3_load()
745 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; in vgic_v3_vmcr_sync()
747 if (likely(cpu_if->vgic_sre)) in vgic_v3_vmcr_sync()
748 cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr); in vgic_v3_vmcr_sync()
753 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; in vgic_v3_put()