Lines Matching +full:cluster +full:- +full:mode

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
19 #include <linux/amd-iommu.h>
32 #define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
36 #define AVIC_VM_ID_MASK ((1 << AVIC_VM_ID_BITS) - 1)
62 struct list_head node; /* Used by SVM for per-vcpu ir_list */
68 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_activate_vmcb()
70 vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK); in avic_activate_vmcb()
71 vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK; in avic_activate_vmcb()
73 vmcb->control.int_ctl |= AVIC_ENABLE_MASK; in avic_activate_vmcb()
76 * KVM can support hybrid-AVIC mode, where KVM emulates x2APIC in avic_activate_vmcb()
82 if (apic_x2apic_mode(svm->vcpu.arch.apic) && in avic_activate_vmcb()
84 vmcb->control.int_ctl |= X2APIC_MODE_MASK; in avic_activate_vmcb()
85 vmcb->control.avic_physical_id |= X2AVIC_MAX_PHYSICAL_ID; in avic_activate_vmcb()
89 /* For xAVIC and hybrid-xAVIC modes */ in avic_activate_vmcb()
90 vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID; in avic_activate_vmcb()
98 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_deactivate_vmcb()
100 vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK); in avic_deactivate_vmcb()
101 vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK; in avic_deactivate_vmcb()
107 if (is_guest_mode(&svm->vcpu) && in avic_deactivate_vmcb()
108 vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)) in avic_deactivate_vmcb()
132 if (kvm_svm->avic_vm_id != vm_id) in avic_ga_log_notifier()
134 vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id); in avic_ga_log_notifier()
158 if (kvm_svm->avic_logical_id_table_page) in avic_vm_destroy()
159 __free_page(kvm_svm->avic_logical_id_table_page); in avic_vm_destroy()
160 if (kvm_svm->avic_physical_id_table_page) in avic_vm_destroy()
161 __free_page(kvm_svm->avic_physical_id_table_page); in avic_vm_destroy()
164 hash_del(&kvm_svm->hnode); in avic_vm_destroy()
171 int err = -ENOMEM; in avic_vm_init()
186 kvm_svm->avic_physical_id_table_page = p_page; in avic_vm_init()
193 kvm_svm->avic_logical_id_table_page = l_page; in avic_vm_init()
198 if (vm_id == 0) { /* id is 1-based, zero is not okay */ in avic_vm_init()
205 if (k2->avic_vm_id == vm_id) in avic_vm_init()
209 kvm_svm->avic_vm_id = vm_id; in avic_vm_init()
210 hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id); in avic_vm_init()
222 struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm); in avic_init_vmcb()
223 phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page)); in avic_init_vmcb()
224 phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page)); in avic_init_vmcb()
225 phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page)); in avic_init_vmcb()
227 vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK; in avic_init_vmcb()
228 vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK; in avic_init_vmcb()
229 vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK; in avic_init_vmcb()
230 vmcb->control.avic_vapic_bar = APIC_DEFAULT_PHYS_BASE & VMCB_AVIC_APIC_BAR_MASK; in avic_init_vmcb()
232 if (kvm_apicv_activated(svm->vcpu.kvm)) in avic_init_vmcb()
242 struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); in avic_get_physical_id_entry()
248 avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page); in avic_get_physical_id_entry()
266 mutex_lock(&kvm->slots_lock); in avic_alloc_access_page()
268 if (kvm->arch.apic_access_memslot_enabled) in avic_alloc_access_page()
280 kvm->arch.apic_access_memslot_enabled = true; in avic_alloc_access_page()
282 mutex_unlock(&kvm->slots_lock); in avic_alloc_access_page()
289 int id = vcpu->vcpu_id; in avic_init_backing_page()
294 return -EINVAL; in avic_init_backing_page()
296 if (!vcpu->arch.apic->regs) in avic_init_backing_page()
297 return -EINVAL; in avic_init_backing_page()
299 if (kvm_apicv_activated(vcpu->kvm)) { in avic_init_backing_page()
302 ret = avic_alloc_access_page(vcpu->kvm); in avic_init_backing_page()
307 svm->avic_backing_page = virt_to_page(vcpu->arch.apic->regs); in avic_init_backing_page()
312 return -EINVAL; in avic_init_backing_page()
314 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) & in avic_init_backing_page()
319 svm->avic_physical_id_cache = entry; in avic_init_backing_page()
333 int cpu = READ_ONCE(vcpu->cpu); in avic_ring_doorbell()
337 trace_kvm_avic_doorbell(vcpu->vcpu_id, kvm_cpu_get_apicid(cpu)); in avic_ring_doorbell()
343 * A fast-path version of avic_kick_target_vcpus(), which attempts to match
356 return -EINVAL; in avic_kick_target_vcpus_fast()
366 return -EINVAL; in avic_kick_target_vcpus_fast()
368 return -EINVAL; in avic_kick_target_vcpus_fast()
373 return -EINVAL; in avic_kick_target_vcpus_fast()
376 u32 bitmap, cluster; in avic_kick_target_vcpus_fast() local
380 /* 16 bit dest mask, 16 bit cluster id */ in avic_kick_target_vcpus_fast()
382 cluster = (dest >> 16) << 4; in avic_kick_target_vcpus_fast()
386 cluster = 0; in avic_kick_target_vcpus_fast()
388 /* 4 bit desk mask, 4 bit cluster id */ in avic_kick_target_vcpus_fast()
390 cluster = (dest >> 4) << 2; in avic_kick_target_vcpus_fast()
399 return -EINVAL; in avic_kick_target_vcpus_fast()
401 logid_index = cluster + __ffs(bitmap); in avic_kick_target_vcpus_fast()
405 page_address(kvm_svm->avic_logical_id_table_page); in avic_kick_target_vcpus_fast()
410 return -EINVAL; in avic_kick_target_vcpus_fast()
420 * For x2APIC logical mode, cannot leverage the index. in avic_kick_target_vcpus_fast()
423 int cluster = (icrh & 0xffff0000) >> 16; in avic_kick_target_vcpus_fast() local
424 int apic = ffs(icrh & 0xffff) - 1; in avic_kick_target_vcpus_fast()
427 * If the x2APIC logical ID sub-field (i.e. icrh[15:0]) in avic_kick_target_vcpus_fast()
432 return -EINVAL; in avic_kick_target_vcpus_fast()
434 l1_physical_id = (cluster << 4) + apic; in avic_kick_target_vcpus_fast()
443 target_vcpu->arch.apic->irr_pending = true; in avic_kick_target_vcpus_fast()
471 if (apic_x2apic_mode(vcpu->arch.apic)) in avic_kick_target_vcpus()
478 vcpu->arch.apic->irr_pending = true; in avic_kick_target_vcpus()
490 u32 icrh = svm->vmcb->control.exit_info_1 >> 32; in avic_incomplete_ipi_interception()
491 u32 icrl = svm->vmcb->control.exit_info_1; in avic_incomplete_ipi_interception()
492 u32 id = svm->vmcb->control.exit_info_2 >> 32; in avic_incomplete_ipi_interception()
493 u32 index = svm->vmcb->control.exit_info_2 & 0x1FF; in avic_incomplete_ipi_interception()
494 struct kvm_lapic *apic = vcpu->arch.apic; in avic_incomplete_ipi_interception()
496 trace_kvm_avic_incomplete_ipi(vcpu->vcpu_id, icrh, icrl, id, index); in avic_incomplete_ipi_interception()
502 * only virtualizes Fixed, Edge-Triggered INTRs. The exit is in avic_incomplete_ipi_interception()
520 avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh, index); in avic_incomplete_ipi_interception()
543 struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); in avic_get_logical_id_entry()
552 index = ffs(dlid) - 1; in avic_get_logical_id_entry()
555 } else { /* cluster */ in avic_get_logical_id_entry()
556 int cluster = (dlid & 0xf0) >> 4; in avic_get_logical_id_entry() local
557 int apic = ffs(dlid & 0x0f) - 1; in avic_get_logical_id_entry()
560 (cluster >= 0xf)) in avic_get_logical_id_entry()
562 index = (cluster << 2) + apic; in avic_get_logical_id_entry()
565 logical_apic_id_table = (u32 *) page_address(kvm_svm->avic_logical_id_table_page); in avic_get_logical_id_entry()
575 flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT; in avic_ldr_write()
578 return -EINVAL; in avic_ldr_write()
592 bool flat = svm->dfr_reg == APIC_DFR_FLAT; in avic_invalidate_logical_id_entry()
596 if (apic_x2apic_mode(vcpu->arch.apic)) in avic_invalidate_logical_id_entry()
599 entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat); in avic_invalidate_logical_id_entry()
608 u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR); in avic_handle_ldr_update()
609 u32 id = kvm_xapic_id(vcpu->arch.apic); in avic_handle_ldr_update()
612 if (apic_x2apic_mode(vcpu->arch.apic)) in avic_handle_ldr_update()
615 if (ldr == svm->ldr_reg) in avic_handle_ldr_update()
624 svm->ldr_reg = ldr; in avic_handle_ldr_update()
632 u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR); in avic_handle_dfr_update()
634 if (svm->dfr_reg == dfr) in avic_handle_dfr_update()
638 svm->dfr_reg = dfr; in avic_handle_dfr_update()
643 u32 offset = to_svm(vcpu)->vmcb->control.exit_info_1 & in avic_unaccel_trap_write()
695 u32 offset = svm->vmcb->control.exit_info_1 & in avic_unaccelerated_access_interception()
697 u32 vector = svm->vmcb->control.exit_info_2 & in avic_unaccelerated_access_interception()
699 bool write = (svm->vmcb->control.exit_info_1 >> 32) & in avic_unaccelerated_access_interception()
703 trace_kvm_avic_unaccelerated_access(vcpu->vcpu_id, offset, in avic_unaccelerated_access_interception()
720 struct kvm_vcpu *vcpu = &svm->vcpu; in avic_init_vcpu()
722 if (!enable_apicv || !irqchip_in_kernel(vcpu->kvm)) in avic_init_vcpu()
729 INIT_LIST_HEAD(&svm->ir_list); in avic_init_vcpu()
730 spin_lock_init(&svm->ir_list_lock); in avic_init_vcpu()
731 svm->dfr_reg = APIC_DFR_FLAT; in avic_init_vcpu()
748 WARN_ONCE(true, "Invalid local APIC state (vcpu_id=%d)", vcpu->vcpu_id); in avic_set_virtual_apic_mode()
761 if (!kvm_arch_has_assigned_device(vcpu->kvm)) in avic_set_pi_irte_mode()
765 * Here, we go through the per-vcpu ir_list to update all existing in avic_set_pi_irte_mode()
768 spin_lock_irqsave(&svm->ir_list_lock, flags); in avic_set_pi_irte_mode()
770 if (list_empty(&svm->ir_list)) in avic_set_pi_irte_mode()
773 list_for_each_entry(ir, &svm->ir_list, node) { in avic_set_pi_irte_mode()
775 ret = amd_iommu_activate_guest_mode(ir->data); in avic_set_pi_irte_mode()
777 ret = amd_iommu_deactivate_guest_mode(ir->data); in avic_set_pi_irte_mode()
782 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in avic_set_pi_irte_mode()
791 spin_lock_irqsave(&svm->ir_list_lock, flags); in svm_ir_list_del()
792 list_for_each_entry(cur, &svm->ir_list, node) { in svm_ir_list_del()
793 if (cur->data != pi->ir_data) in svm_ir_list_del()
795 list_del(&cur->node); in svm_ir_list_del()
799 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in svm_ir_list_del()
809 * In some cases, the existing irte is updated and re-set, in svm_ir_list_add()
813 if (pi->ir_data && (pi->prev_ga_tag != 0)) { in svm_ir_list_add()
814 struct kvm *kvm = svm->vcpu.kvm; in svm_ir_list_add()
815 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag); in svm_ir_list_add()
820 ret = -EINVAL; in svm_ir_list_add()
830 * add to the per-vcpu ir_list. in svm_ir_list_add()
834 ret = -ENOMEM; in svm_ir_list_add()
837 ir->data = pi->ir_data; in svm_ir_list_add()
839 spin_lock_irqsave(&svm->ir_list_lock, flags); in svm_ir_list_add()
840 list_add(&ir->node, &svm->ir_list); in svm_ir_list_add()
841 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in svm_ir_list_add()
852 * For lowest-priority interrupts, we only support
855 * irqbalance to make the interrupts single-CPU.
868 pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n", in get_pi_vcpu_info()
870 return -1; in get_pi_vcpu_info()
873 pr_debug("SVM: %s: use GA mode for irq %u\n", __func__, in get_pi_vcpu_info()
876 vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page)); in get_pi_vcpu_info()
877 vcpu_info->vector = irq.vector; in get_pi_vcpu_info()
883 * avic_pi_update_irte - set IRTE for Posted-Interrupts
905 idx = srcu_read_lock(&kvm->irq_srcu); in avic_pi_update_irte()
906 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); in avic_pi_update_irte()
908 if (guest_irq >= irq_rt->nr_rt_entries || in avic_pi_update_irte()
909 hlist_empty(&irq_rt->map[guest_irq])) { in avic_pi_update_irte()
911 guest_irq, irq_rt->nr_rt_entries); in avic_pi_update_irte()
915 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { in avic_pi_update_irte()
919 if (e->type != KVM_IRQ_ROUTING_MSI) in avic_pi_update_irte()
923 * Here, we setup with legacy mode in the following cases: in avic_pi_update_irte()
927 * 4. IRQ has incompatible delivery mode (SMI, INIT, etc) in avic_pi_update_irte()
930 kvm_vcpu_apicv_active(&svm->vcpu)) { in avic_pi_update_irte()
934 pi.base = __sme_set(page_to_phys(svm->avic_backing_page) & in avic_pi_update_irte()
936 pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id, in avic_pi_update_irte()
937 svm->vcpu.vcpu_id); in avic_pi_update_irte()
944 * IOMMU guest mode. Now, we need to store the posted in avic_pi_update_irte()
945 * interrupt information in a per-vcpu ir_list so that in avic_pi_update_irte()
952 /* Use legacy mode in IRTE */ in avic_pi_update_irte()
957 * - Tell IOMMU to use legacy mode for this interrupt. in avic_pi_update_irte()
958 * - Retrieve ga_tag of prior interrupt remapping data. in avic_pi_update_irte()
967 * was cached. If so, we need to clean up the per-vcpu in avic_pi_update_irte()
981 trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id, in avic_pi_update_irte()
982 e->gsi, vcpu_info.vector, in avic_pi_update_irte()
994 srcu_read_unlock(&kvm->irq_srcu, idx); in avic_pi_update_irte()
1023 if (!kvm_arch_has_assigned_device(vcpu->kvm)) in avic_update_iommu_vcpu_affinity()
1027 * Here, we go through the per-vcpu ir_list to update all existing in avic_update_iommu_vcpu_affinity()
1030 spin_lock_irqsave(&svm->ir_list_lock, flags); in avic_update_iommu_vcpu_affinity()
1032 if (list_empty(&svm->ir_list)) in avic_update_iommu_vcpu_affinity()
1035 list_for_each_entry(ir, &svm->ir_list, node) { in avic_update_iommu_vcpu_affinity()
1036 ret = amd_iommu_update_ga(cpu, r, ir->data); in avic_update_iommu_vcpu_affinity()
1041 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in avic_update_iommu_vcpu_affinity()
1066 entry = READ_ONCE(*(svm->avic_physical_id_cache)); in avic_vcpu_load()
1072 WRITE_ONCE(*(svm->avic_physical_id_cache), entry); in avic_vcpu_load()
1083 entry = READ_ONCE(*(svm->avic_physical_id_cache)); in avic_vcpu_put()
1089 avic_update_iommu_vcpu_affinity(vcpu, -1, 0); in avic_vcpu_put()
1092 WRITE_ONCE(*(svm->avic_physical_id_cache), entry); in avic_vcpu_put()
1099 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_refresh_apicv_exec_ctrl()
1111 * accordingly before re-activating. in avic_refresh_apicv_exec_ctrl()
1121 avic_vcpu_load(vcpu, vcpu->cpu); in avic_refresh_apicv_exec_ctrl()
1154 avic_vcpu_load(vcpu, vcpu->cpu); in avic_vcpu_unblocking()
1159 * - The module param avic enable both xAPIC and x2APIC mode.
1160 * - Hypervisor can support both xAVIC and x2AVIC in the same guest.
1161 * - The mode can be switched at run-time.