Lines Matching full:iommu
27 #include "iommu.h"
28 #include "../dma-iommu.h"
30 #include "../iommu-sva-lib.h"
218 static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) in context_copied() argument
220 if (!iommu->copied_tables) in context_copied()
223 return test_bit(((long)bus << 8) | devfn, iommu->copied_tables); in context_copied()
227 set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) in set_context_copied() argument
229 set_bit(((long)bus << 8) | devfn, iommu->copied_tables); in set_context_copied()
233 clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) in clear_context_copied() argument
235 clear_bit(((long)bus << 8) | devfn, iommu->copied_tables); in clear_context_copied()
241 * 2. It maps to each iommu if successful.
242 * 3. Each iommu mapps to this domain if successful.
268 struct intel_iommu *iommu; /* the corresponding iommu */ member
298 static bool translation_pre_enabled(struct intel_iommu *iommu) in translation_pre_enabled() argument
300 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
303 static void clear_translation_pre_enabled(struct intel_iommu *iommu) in clear_translation_pre_enabled() argument
305 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
308 static void init_translation_status(struct intel_iommu *iommu) in init_translation_status() argument
312 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_translation_status()
314 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
325 pr_info("IOMMU enabled\n"); in intel_iommu_setup()
329 pr_info("IOMMU disabled\n"); in intel_iommu_setup()
334 pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n"); in intel_iommu_setup()
337 pr_warn("intel_iommu=strict deprecated; use iommu.strict=1 instead\n"); in intel_iommu_setup()
349 pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); in intel_iommu_setup()
399 * Calculate the Supported Adjusted Guest Address Widths of an IOMMU.
403 static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu) in __iommu_calculate_sagaw() argument
407 fl_sagaw = BIT(2) | (cap_fl5lp_support(iommu->cap) ? BIT(3) : 0); in __iommu_calculate_sagaw()
408 sl_sagaw = cap_sagaw(iommu->cap); in __iommu_calculate_sagaw()
411 if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) in __iommu_calculate_sagaw()
415 if (!ecap_slts(iommu->ecap)) in __iommu_calculate_sagaw()
421 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) in __iommu_calculate_agaw() argument
426 sagaw = __iommu_calculate_sagaw(iommu); in __iommu_calculate_agaw()
436 * Calculate max SAGAW for each iommu.
438 int iommu_calculate_max_sagaw(struct intel_iommu *iommu) in iommu_calculate_max_sagaw() argument
440 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH); in iommu_calculate_max_sagaw()
444 * calculate agaw for each iommu.
448 int iommu_calculate_agaw(struct intel_iommu *iommu) in iommu_calculate_agaw() argument
450 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); in iommu_calculate_agaw()
453 static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu) in iommu_paging_structure_coherency() argument
455 return sm_supported(iommu) ? in iommu_paging_structure_coherency()
456 ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap); in iommu_paging_structure_coherency()
463 struct intel_iommu *iommu; in domain_update_iommu_coherency() local
470 if (!iommu_paging_structure_coherency(info->iommu)) { in domain_update_iommu_coherency()
480 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_coherency()
481 if (!iommu_paging_structure_coherency(iommu)) { in domain_update_iommu_coherency()
493 struct intel_iommu *iommu; in domain_update_iommu_superpage() local
501 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_superpage()
502 if (iommu != skip) { in domain_update_iommu_superpage()
504 if (!cap_fl1gp_support(iommu->cap)) in domain_update_iommu_superpage()
507 mask &= cap_super_page_val(iommu->cap); in domain_update_iommu_superpage()
590 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, in iommu_context_addr() argument
593 struct root_entry *root = &iommu->root_entry[bus]; in iommu_context_addr()
601 if (!alloc && context_copied(iommu, bus, devfn)) in iommu_context_addr()
605 if (sm_supported(iommu)) { in iommu_context_addr()
619 context = alloc_pgtable_page(iommu->node); in iommu_context_addr()
623 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); in iommu_context_addr()
626 __iommu_flush_cache(iommu, entry, sizeof(*entry)); in iommu_context_addr()
664 /* We know that this device on this chipset has its own IOMMU. in quirk_ioat_snb_local_iommu()
665 * If we find it under a different IOMMU, then the BIOS is lying in quirk_ioat_snb_local_iommu()
666 * to us. Hope that the IOMMU for this device is actually in quirk_ioat_snb_local_iommu()
677 /* we know that the this iommu should be at offset 0xa000 from vtbar */ in quirk_ioat_snb_local_iommu()
688 static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev) in iommu_is_dummy() argument
690 if (!iommu || iommu->drhd->ignored) in iommu_is_dummy()
709 struct intel_iommu *iommu; in device_to_iommu() local
723 * the PF instead to find the IOMMU. */ in device_to_iommu()
731 for_each_iommu(iommu, drhd) { in device_to_iommu()
739 * which we used for the IOMMU lookup. Strictly speaking in device_to_iommu()
765 iommu = NULL; in device_to_iommu()
767 if (iommu_is_dummy(iommu, dev)) in device_to_iommu()
768 iommu = NULL; in device_to_iommu()
772 return iommu; in device_to_iommu()
782 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) in device_context_mapped() argument
787 spin_lock(&iommu->lock); in device_context_mapped()
788 context = iommu_context_addr(iommu, bus, devfn, 0); in device_context_mapped()
791 spin_unlock(&iommu->lock); in device_context_mapped()
795 static void free_context_table(struct intel_iommu *iommu) in free_context_table() argument
800 if (!iommu->root_entry) in free_context_table()
804 context = iommu_context_addr(iommu, i, 0, 0); in free_context_table()
808 if (!sm_supported(iommu)) in free_context_table()
811 context = iommu_context_addr(iommu, i, 0x80, 0); in free_context_table()
816 free_pgtable_page(iommu->root_entry); in free_context_table()
817 iommu->root_entry = NULL; in free_context_table()
821 static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn, in pgtable_walk() argument
845 void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id, in dmar_fault_dump_ptes() argument
857 pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr); in dmar_fault_dump_ptes()
860 rt_entry = &iommu->root_entry[bus]; in dmar_fault_dump_ptes()
866 if (sm_supported(iommu)) in dmar_fault_dump_ptes()
873 ctx_entry = iommu_context_addr(iommu, bus, devfn, 0); in dmar_fault_dump_ptes()
883 if (!sm_supported(iommu)) { in dmar_fault_dump_ptes()
923 pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn, pgtable, level); in dmar_fault_dump_ptes()
937 /* Address beyond IOMMU's addressing capabilities. */ in pfn_to_dma_pte()
1179 /* We can't just free the pages because the IOMMU may still be walking
1201 /* iommu handling */
1202 static int iommu_alloc_root_entry(struct intel_iommu *iommu) in iommu_alloc_root_entry() argument
1206 root = (struct root_entry *)alloc_pgtable_page(iommu->node); in iommu_alloc_root_entry()
1209 iommu->name); in iommu_alloc_root_entry()
1213 __iommu_flush_cache(iommu, root, ROOT_SIZE); in iommu_alloc_root_entry()
1214 iommu->root_entry = root; in iommu_alloc_root_entry()
1219 static void iommu_set_root_entry(struct intel_iommu *iommu) in iommu_set_root_entry() argument
1225 addr = virt_to_phys(iommu->root_entry); in iommu_set_root_entry()
1226 if (sm_supported(iommu)) in iommu_set_root_entry()
1229 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_set_root_entry()
1230 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr); in iommu_set_root_entry()
1232 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_root_entry()
1235 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_root_entry()
1238 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_set_root_entry()
1244 if (cap_esrtps(iommu->cap)) in iommu_set_root_entry()
1247 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in iommu_set_root_entry()
1248 if (sm_supported(iommu)) in iommu_set_root_entry()
1249 qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0); in iommu_set_root_entry()
1250 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in iommu_set_root_entry()
1253 void iommu_flush_write_buffer(struct intel_iommu *iommu) in iommu_flush_write_buffer() argument
1258 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) in iommu_flush_write_buffer()
1261 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1262 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); in iommu_flush_write_buffer()
1265 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_flush_write_buffer()
1268 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1272 static void __iommu_flush_context(struct intel_iommu *iommu, in __iommu_flush_context() argument
1295 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_context()
1296 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); in __iommu_flush_context()
1299 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, in __iommu_flush_context()
1302 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_context()
1306 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, in __iommu_flush_iotlb() argument
1309 int tlb_offset = ecap_iotlb_offset(iommu->ecap); in __iommu_flush_iotlb()
1335 if (cap_read_drain(iommu->cap)) in __iommu_flush_iotlb()
1338 if (cap_write_drain(iommu->cap)) in __iommu_flush_iotlb()
1341 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1344 dmar_writeq(iommu->reg + tlb_offset, val_iva); in __iommu_flush_iotlb()
1345 dmar_writeq(iommu->reg + tlb_offset + 8, val); in __iommu_flush_iotlb()
1348 IOMMU_WAIT_OP(iommu, tlb_offset + 8, in __iommu_flush_iotlb()
1351 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1364 struct intel_iommu *iommu, u8 bus, u8 devfn) in domain_lookup_dev_info() argument
1371 if (info->iommu == iommu && info->bus == bus && in domain_lookup_dev_info()
1425 /* For IOMMU that supports device IOTLB throttling (DIT), we assign in iommu_enable_pci_caps()
1426 * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge in iommu_enable_pci_caps()
1430 if (!ecap_dit(info->iommu->ecap)) in iommu_enable_pci_caps()
1497 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, in __iommu_flush_dev_iotlb()
1517 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, in iommu_flush_iotlb_psi() argument
1525 u16 did = domain_id_iommu(domain, iommu); in iommu_flush_iotlb_psi()
1533 qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, pages, ih); in iommu_flush_iotlb_psi()
1560 if (!cap_pgsel_inv(iommu->cap) || in iommu_flush_iotlb_psi()
1561 mask > cap_max_amask_val(iommu->cap)) in iommu_flush_iotlb_psi()
1562 iommu->flush.flush_iotlb(iommu, did, 0, 0, in iommu_flush_iotlb_psi()
1565 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask, in iommu_flush_iotlb_psi()
1573 if (!cap_caching_mode(iommu->cap) || !map) in iommu_flush_iotlb_psi()
1578 static inline void __mapping_notify_one(struct intel_iommu *iommu, in __mapping_notify_one() argument
1586 if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain)) in __mapping_notify_one()
1587 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1); in __mapping_notify_one()
1589 iommu_flush_write_buffer(iommu); in __mapping_notify_one()
1599 struct intel_iommu *iommu = info->iommu; in intel_flush_iotlb_all() local
1600 u16 did = domain_id_iommu(dmar_domain, iommu); in intel_flush_iotlb_all()
1603 qi_flush_piotlb(iommu, did, PASID_RID2PASID, 0, -1, 0); in intel_flush_iotlb_all()
1605 iommu->flush.flush_iotlb(iommu, did, 0, 0, in intel_flush_iotlb_all()
1608 if (!cap_caching_mode(iommu->cap)) in intel_flush_iotlb_all()
1613 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) in iommu_disable_protect_mem_regions() argument
1618 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap)) in iommu_disable_protect_mem_regions()
1621 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1622 pmen = readl(iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1624 writel(pmen, iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1627 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, in iommu_disable_protect_mem_regions()
1630 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1633 static void iommu_enable_translation(struct intel_iommu *iommu) in iommu_enable_translation() argument
1638 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_translation()
1639 iommu->gcmd |= DMA_GCMD_TE; in iommu_enable_translation()
1640 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_translation()
1643 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_translation()
1646 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_translation()
1649 static void iommu_disable_translation(struct intel_iommu *iommu) in iommu_disable_translation() argument
1654 if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated && in iommu_disable_translation()
1655 (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap))) in iommu_disable_translation()
1658 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_disable_translation()
1659 iommu->gcmd &= ~DMA_GCMD_TE; in iommu_disable_translation()
1660 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_translation()
1663 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_disable_translation()
1666 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_disable_translation()
1669 static int iommu_init_domains(struct intel_iommu *iommu) in iommu_init_domains() argument
1673 ndomains = cap_ndoms(iommu->cap); in iommu_init_domains()
1675 iommu->name, ndomains); in iommu_init_domains()
1677 spin_lock_init(&iommu->lock); in iommu_init_domains()
1679 iommu->domain_ids = bitmap_zalloc(ndomains, GFP_KERNEL); in iommu_init_domains()
1680 if (!iommu->domain_ids) in iommu_init_domains()
1689 set_bit(0, iommu->domain_ids); in iommu_init_domains()
1698 if (sm_supported(iommu)) in iommu_init_domains()
1699 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids); in iommu_init_domains()
1704 static void disable_dmar_iommu(struct intel_iommu *iommu) in disable_dmar_iommu() argument
1706 if (!iommu->domain_ids) in disable_dmar_iommu()
1710 * All iommu domains must have been detached from the devices, in disable_dmar_iommu()
1713 if (WARN_ON(bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap)) in disable_dmar_iommu()
1717 if (iommu->gcmd & DMA_GCMD_TE) in disable_dmar_iommu()
1718 iommu_disable_translation(iommu); in disable_dmar_iommu()
1721 static void free_dmar_iommu(struct intel_iommu *iommu) in free_dmar_iommu() argument
1723 if (iommu->domain_ids) { in free_dmar_iommu()
1724 bitmap_free(iommu->domain_ids); in free_dmar_iommu()
1725 iommu->domain_ids = NULL; in free_dmar_iommu()
1728 if (iommu->copied_tables) { in free_dmar_iommu()
1729 bitmap_free(iommu->copied_tables); in free_dmar_iommu()
1730 iommu->copied_tables = NULL; in free_dmar_iommu()
1734 free_context_table(iommu); in free_dmar_iommu()
1737 if (pasid_supported(iommu)) { in free_dmar_iommu()
1738 if (ecap_prs(iommu->ecap)) in free_dmar_iommu()
1739 intel_svm_finish_prq(iommu); in free_dmar_iommu()
1741 if (vccap_pasid(iommu->vccap)) in free_dmar_iommu()
1742 ioasid_unregister_allocator(&iommu->pasid_allocator); in free_dmar_iommu()
1785 struct intel_iommu *iommu) in domain_attach_iommu() argument
1795 spin_lock(&iommu->lock); in domain_attach_iommu()
1796 curr = xa_load(&domain->iommu_array, iommu->seq_id); in domain_attach_iommu()
1799 spin_unlock(&iommu->lock); in domain_attach_iommu()
1804 ndomains = cap_ndoms(iommu->cap); in domain_attach_iommu()
1805 num = find_first_zero_bit(iommu->domain_ids, ndomains); in domain_attach_iommu()
1807 pr_err("%s: No free domain ids\n", iommu->name); in domain_attach_iommu()
1811 set_bit(num, iommu->domain_ids); in domain_attach_iommu()
1814 info->iommu = iommu; in domain_attach_iommu()
1815 curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id, in domain_attach_iommu()
1823 spin_unlock(&iommu->lock); in domain_attach_iommu()
1827 clear_bit(info->did, iommu->domain_ids); in domain_attach_iommu()
1829 spin_unlock(&iommu->lock); in domain_attach_iommu()
1835 struct intel_iommu *iommu) in domain_detach_iommu() argument
1839 spin_lock(&iommu->lock); in domain_detach_iommu()
1840 info = xa_load(&domain->iommu_array, iommu->seq_id); in domain_detach_iommu()
1842 clear_bit(info->did, iommu->domain_ids); in domain_detach_iommu()
1843 xa_erase(&domain->iommu_array, iommu->seq_id); in domain_detach_iommu()
1848 spin_unlock(&iommu->lock); in domain_detach_iommu()
1899 * IOMMU hardware will use the PASID value set in this field for
1930 struct intel_iommu *iommu, in domain_context_mapping_one() argument
1935 domain_lookup_dev_info(domain, iommu, bus, devfn); in domain_context_mapping_one()
1936 u16 did = domain_id_iommu(domain, iommu); in domain_context_mapping_one()
1951 spin_lock(&iommu->lock); in domain_context_mapping_one()
1953 context = iommu_context_addr(iommu, bus, devfn, 1); in domain_context_mapping_one()
1958 if (context_present(context) && !context_copied(iommu, bus, devfn)) in domain_context_mapping_one()
1970 if (context_copied(iommu, bus, devfn)) { in domain_context_mapping_one()
1973 if (did_old < cap_ndoms(iommu->cap)) { in domain_context_mapping_one()
1974 iommu->flush.flush_context(iommu, did_old, in domain_context_mapping_one()
1978 iommu->flush.flush_iotlb(iommu, did_old, 0, 0, in domain_context_mapping_one()
1982 clear_context_copied(iommu, bus, devfn); in domain_context_mapping_one()
1987 if (sm_supported(iommu)) { in domain_context_mapping_one()
2018 * Skip top levels of page tables for iommu which has in domain_context_mapping_one()
2021 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_context_mapping_one()
2041 context_set_address_width(context, iommu->msagaw); in domain_context_mapping_one()
2049 if (!ecap_coherent(iommu->ecap)) in domain_context_mapping_one()
2058 if (cap_caching_mode(iommu->cap)) { in domain_context_mapping_one()
2059 iommu->flush.flush_context(iommu, 0, in domain_context_mapping_one()
2063 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in domain_context_mapping_one()
2065 iommu_flush_write_buffer(iommu); in domain_context_mapping_one()
2072 spin_unlock(&iommu->lock); in domain_context_mapping_one()
2079 struct intel_iommu *iommu; member
2088 return domain_context_mapping_one(data->domain, data->iommu, in domain_context_mapping_cb()
2098 struct intel_iommu *iommu; in domain_context_mapping() local
2101 iommu = device_to_iommu(dev, &bus, &devfn); in domain_context_mapping()
2102 if (!iommu) in domain_context_mapping()
2108 return domain_context_mapping_one(domain, iommu, table, in domain_context_mapping()
2112 data.iommu = iommu; in domain_context_mapping()
2122 struct intel_iommu *iommu = opaque; in domain_context_mapped_cb() local
2124 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff); in domain_context_mapped_cb()
2129 struct intel_iommu *iommu; in domain_context_mapped() local
2132 iommu = device_to_iommu(dev, &bus, &devfn); in domain_context_mapped()
2133 if (!iommu) in domain_context_mapped()
2137 return device_context_mapped(iommu, bus, devfn); in domain_context_mapped()
2140 domain_context_mapped_cb, iommu); in domain_context_mapped()
2203 iommu_flush_iotlb_psi(info->iommu, domain, in switch_to_super_page()
2315 struct intel_iommu *iommu = info->iommu; in domain_context_clear_one() local
2319 if (!iommu) in domain_context_clear_one()
2322 spin_lock(&iommu->lock); in domain_context_clear_one()
2323 context = iommu_context_addr(iommu, bus, devfn, 0); in domain_context_clear_one()
2325 spin_unlock(&iommu->lock); in domain_context_clear_one()
2329 if (sm_supported(iommu)) { in domain_context_clear_one()
2333 did_old = domain_id_iommu(info->domain, iommu); in domain_context_clear_one()
2339 __iommu_flush_cache(iommu, context, sizeof(*context)); in domain_context_clear_one()
2340 spin_unlock(&iommu->lock); in domain_context_clear_one()
2341 iommu->flush.flush_context(iommu, in domain_context_clear_one()
2347 if (sm_supported(iommu)) in domain_context_clear_one()
2348 qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0); in domain_context_clear_one()
2350 iommu->flush.flush_iotlb(iommu, in domain_context_clear_one()
2359 static int domain_setup_first_level(struct intel_iommu *iommu, in domain_setup_first_level() argument
2369 * Skip top levels of page tables for iommu which has in domain_setup_first_level()
2372 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_setup_first_level()
2390 return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid, in domain_setup_first_level()
2391 domain_id_iommu(domain, iommu), in domain_setup_first_level()
2478 struct intel_iommu *iommu; in domain_add_dev_info() local
2483 iommu = device_to_iommu(dev, &bus, &devfn); in domain_add_dev_info()
2484 if (!iommu) in domain_add_dev_info()
2487 ret = domain_attach_iommu(domain, iommu); in domain_add_dev_info()
2496 if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) { in domain_add_dev_info()
2506 ret = intel_pasid_setup_pass_through(iommu, domain, in domain_add_dev_info()
2509 ret = domain_setup_first_level(iommu, domain, dev, in domain_add_dev_info()
2512 ret = intel_pasid_setup_second_level(iommu, domain, in domain_add_dev_info()
2590 * The second is use of the device through the IOMMU API. This interface
2594 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2596 * the IOMMU API, which eliminates them from device assignment.
2638 static void intel_iommu_init_qi(struct intel_iommu *iommu) in intel_iommu_init_qi() argument
2641 * Start from the sane iommu hardware state. in intel_iommu_init_qi()
2646 if (!iommu->qi) { in intel_iommu_init_qi()
2650 dmar_fault(-1, iommu); in intel_iommu_init_qi()
2655 dmar_disable_qi(iommu); in intel_iommu_init_qi()
2658 if (dmar_enable_qi(iommu)) { in intel_iommu_init_qi()
2662 iommu->flush.flush_context = __iommu_flush_context; in intel_iommu_init_qi()
2663 iommu->flush.flush_iotlb = __iommu_flush_iotlb; in intel_iommu_init_qi()
2665 iommu->name); in intel_iommu_init_qi()
2667 iommu->flush.flush_context = qi_flush_context; in intel_iommu_init_qi()
2668 iommu->flush.flush_iotlb = qi_flush_iotlb; in intel_iommu_init_qi()
2669 pr_info("%s: Using Queued invalidation\n", iommu->name); in intel_iommu_init_qi()
2673 static int copy_context_table(struct intel_iommu *iommu, in copy_context_table() argument
2695 __iommu_flush_cache(iommu, new_ce, in copy_context_table()
2725 new_ce = alloc_pgtable_page(iommu->node); in copy_context_table()
2739 if (did >= 0 && did < cap_ndoms(iommu->cap)) in copy_context_table()
2740 set_bit(did, iommu->domain_ids); in copy_context_table()
2742 set_context_copied(iommu, bus, devfn); in copy_context_table()
2748 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE); in copy_context_table()
2757 static int copy_translation_tables(struct intel_iommu *iommu) in copy_translation_tables() argument
2767 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG); in copy_translation_tables()
2769 new_ext = !!sm_supported(iommu); in copy_translation_tables()
2780 iommu->copied_tables = bitmap_zalloc(BIT_ULL(16), GFP_KERNEL); in copy_translation_tables()
2781 if (!iommu->copied_tables) in copy_translation_tables()
2800 ret = copy_context_table(iommu, &old_rt[bus], in copy_translation_tables()
2804 iommu->name, bus); in copy_translation_tables()
2809 spin_lock(&iommu->lock); in copy_translation_tables()
2818 iommu->root_entry[bus].lo = val; in copy_translation_tables()
2825 iommu->root_entry[bus].hi = val; in copy_translation_tables()
2828 spin_unlock(&iommu->lock); in copy_translation_tables()
2832 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE); in copy_translation_tables()
2845 struct intel_iommu *iommu = data; in intel_vcmd_ioasid_alloc() local
2848 if (!iommu) in intel_vcmd_ioasid_alloc()
2858 if (vcmd_alloc_pasid(iommu, &ioasid)) in intel_vcmd_ioasid_alloc()
2866 struct intel_iommu *iommu = data; in intel_vcmd_ioasid_free() local
2868 if (!iommu) in intel_vcmd_ioasid_free()
2878 vcmd_free_pasid(iommu, ioasid); in intel_vcmd_ioasid_free()
2881 static void register_pasid_allocator(struct intel_iommu *iommu) in register_pasid_allocator() argument
2887 if (!cap_caching_mode(iommu->cap)) in register_pasid_allocator()
2890 if (!sm_supported(iommu)) { in register_pasid_allocator()
2902 if (!vccap_pasid(iommu->vccap)) in register_pasid_allocator()
2906 iommu->pasid_allocator.alloc = intel_vcmd_ioasid_alloc; in register_pasid_allocator()
2907 iommu->pasid_allocator.free = intel_vcmd_ioasid_free; in register_pasid_allocator()
2908 iommu->pasid_allocator.pdata = (void *)iommu; in register_pasid_allocator()
2909 if (ioasid_register_allocator(&iommu->pasid_allocator)) { in register_pasid_allocator()
2912 * Disable scalable mode on this IOMMU if there in register_pasid_allocator()
2924 struct intel_iommu *iommu; in init_dmars() local
2931 for_each_iommu(iommu, drhd) { in init_dmars()
2933 iommu_disable_translation(iommu); in init_dmars()
2938 * Find the max pasid size of all IOMMU's in the system. in init_dmars()
2942 if (pasid_supported(iommu)) { in init_dmars()
2943 u32 temp = 2 << ecap_pss(iommu->ecap); in init_dmars()
2949 intel_iommu_init_qi(iommu); in init_dmars()
2951 ret = iommu_init_domains(iommu); in init_dmars()
2955 init_translation_status(iommu); in init_dmars()
2957 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { in init_dmars()
2958 iommu_disable_translation(iommu); in init_dmars()
2959 clear_translation_pre_enabled(iommu); in init_dmars()
2961 iommu->name); in init_dmars()
2967 * among all IOMMU's. Need to Split it later. in init_dmars()
2969 ret = iommu_alloc_root_entry(iommu); in init_dmars()
2973 if (translation_pre_enabled(iommu)) { in init_dmars()
2976 ret = copy_translation_tables(iommu); in init_dmars()
2979 * We found the IOMMU with translation in init_dmars()
2988 iommu->name); in init_dmars()
2989 iommu_disable_translation(iommu); in init_dmars()
2990 clear_translation_pre_enabled(iommu); in init_dmars()
2993 iommu->name); in init_dmars()
2997 if (!ecap_pass_through(iommu->ecap)) in init_dmars()
2999 intel_svm_check(iommu); in init_dmars()
3007 for_each_active_iommu(iommu, drhd) { in init_dmars()
3008 iommu_flush_write_buffer(iommu); in init_dmars()
3010 register_pasid_allocator(iommu); in init_dmars()
3012 iommu_set_root_entry(iommu); in init_dmars()
3035 for_each_iommu(iommu, drhd) { in init_dmars()
3042 iommu_disable_protect_mem_regions(iommu); in init_dmars()
3046 iommu_flush_write_buffer(iommu); in init_dmars()
3049 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { in init_dmars()
3055 ret = intel_svm_enable_prq(iommu); in init_dmars()
3061 ret = dmar_set_interrupt(iommu); in init_dmars()
3069 for_each_active_iommu(iommu, drhd) { in init_dmars()
3070 disable_dmar_iommu(iommu); in init_dmars()
3071 free_dmar_iommu(iommu); in init_dmars()
3109 /* This IOMMU has *only* gfx devices. Either bypass it or in init_no_remapping_devices()
3121 struct intel_iommu *iommu = NULL; in init_iommu_hw() local
3123 for_each_active_iommu(iommu, drhd) in init_iommu_hw()
3124 if (iommu->qi) in init_iommu_hw()
3125 dmar_reenable_qi(iommu); in init_iommu_hw()
3127 for_each_iommu(iommu, drhd) { in init_iommu_hw()
3134 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
3138 iommu_flush_write_buffer(iommu); in init_iommu_hw()
3139 iommu_set_root_entry(iommu); in init_iommu_hw()
3140 iommu_enable_translation(iommu); in init_iommu_hw()
3141 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
3150 struct intel_iommu *iommu; in iommu_flush_all() local
3152 for_each_active_iommu(iommu, drhd) { in iommu_flush_all()
3153 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_flush_all()
3155 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in iommu_flush_all()
3163 struct intel_iommu *iommu = NULL; in iommu_suspend() local
3166 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
3167 iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32), in iommu_suspend()
3169 if (!iommu->iommu_state) in iommu_suspend()
3175 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
3176 iommu_disable_translation(iommu); in iommu_suspend()
3178 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_suspend()
3180 iommu->iommu_state[SR_DMAR_FECTL_REG] = in iommu_suspend()
3181 readl(iommu->reg + DMAR_FECTL_REG); in iommu_suspend()
3182 iommu->iommu_state[SR_DMAR_FEDATA_REG] = in iommu_suspend()
3183 readl(iommu->reg + DMAR_FEDATA_REG); in iommu_suspend()
3184 iommu->iommu_state[SR_DMAR_FEADDR_REG] = in iommu_suspend()
3185 readl(iommu->reg + DMAR_FEADDR_REG); in iommu_suspend()
3186 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = in iommu_suspend()
3187 readl(iommu->reg + DMAR_FEUADDR_REG); in iommu_suspend()
3189 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_suspend()
3194 for_each_active_iommu(iommu, drhd) in iommu_suspend()
3195 kfree(iommu->iommu_state); in iommu_suspend()
3203 struct intel_iommu *iommu = NULL; in iommu_resume() local
3208 panic("tboot: IOMMU setup failed, DMAR can not resume!\n"); in iommu_resume()
3210 WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); in iommu_resume()
3214 for_each_active_iommu(iommu, drhd) { in iommu_resume()
3216 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_resume()
3218 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], in iommu_resume()
3219 iommu->reg + DMAR_FECTL_REG); in iommu_resume()
3220 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], in iommu_resume()
3221 iommu->reg + DMAR_FEDATA_REG); in iommu_resume()
3222 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], in iommu_resume()
3223 iommu->reg + DMAR_FEADDR_REG); in iommu_resume()
3224 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], in iommu_resume()
3225 iommu->reg + DMAR_FEUADDR_REG); in iommu_resume()
3227 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_resume()
3230 for_each_active_iommu(iommu, drhd) in iommu_resume()
3231 kfree(iommu->iommu_state); in iommu_resume()
3456 struct intel_iommu *iommu = dmaru->iommu; in intel_iommu_add() local
3458 ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu); in intel_iommu_add()
3462 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) { in intel_iommu_add()
3464 iommu->name); in intel_iommu_add()
3468 sp = domain_update_iommu_superpage(NULL, iommu) - 1; in intel_iommu_add()
3469 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { in intel_iommu_add()
3471 iommu->name); in intel_iommu_add()
3478 if (iommu->gcmd & DMA_GCMD_TE) in intel_iommu_add()
3479 iommu_disable_translation(iommu); in intel_iommu_add()
3481 ret = iommu_init_domains(iommu); in intel_iommu_add()
3483 ret = iommu_alloc_root_entry(iommu); in intel_iommu_add()
3487 intel_svm_check(iommu); in intel_iommu_add()
3494 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
3498 intel_iommu_init_qi(iommu); in intel_iommu_add()
3499 iommu_flush_write_buffer(iommu); in intel_iommu_add()
3502 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { in intel_iommu_add()
3503 ret = intel_svm_enable_prq(iommu); in intel_iommu_add()
3508 ret = dmar_set_interrupt(iommu); in intel_iommu_add()
3512 iommu_set_root_entry(iommu); in intel_iommu_add()
3513 iommu_enable_translation(iommu); in intel_iommu_add()
3515 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
3519 disable_dmar_iommu(iommu); in intel_iommu_add()
3521 free_dmar_iommu(iommu); in intel_iommu_add()
3528 struct intel_iommu *iommu = dmaru->iommu; in dmar_iommu_hotplug() local
3532 if (iommu == NULL) in dmar_iommu_hotplug()
3538 disable_dmar_iommu(iommu); in dmar_iommu_hotplug()
3539 free_dmar_iommu(iommu); in dmar_iommu_hotplug()
3592 static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu) in dmar_ats_supported() argument
3607 * When IOMMU is in legacy mode, enabling ATS is done in dmar_ats_supported()
3612 return !(satcu->atc_required && !sm_supported(iommu)); in dmar_ats_supported()
3740 struct intel_iommu *iommu; in intel_iommu_memory_notifier() local
3746 for_each_active_iommu(iommu, drhd) in intel_iommu_memory_notifier()
3747 iommu_flush_iotlb_psi(iommu, si_domain, in intel_iommu_memory_notifier()
3766 struct intel_iommu *iommu = NULL; in intel_disable_iommus() local
3769 for_each_iommu(iommu, drhd) in intel_disable_iommus()
3770 iommu_disable_translation(iommu); in intel_disable_iommus()
3776 struct intel_iommu *iommu = NULL; in intel_iommu_shutdown() local
3784 for_each_iommu(iommu, drhd) in intel_iommu_shutdown()
3785 iommu_disable_protect_mem_regions(iommu); in intel_iommu_shutdown()
3797 return container_of(iommu_dev, struct intel_iommu, iommu); in dev_to_intel_iommu()
3803 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in version_show() local
3804 u32 ver = readl(iommu->reg + DMAR_VER_REG); in version_show()
3813 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in address_show() local
3814 return sprintf(buf, "%llx\n", iommu->reg_phys); in address_show()
3821 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in cap_show() local
3822 return sprintf(buf, "%llx\n", iommu->cap); in cap_show()
3829 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in ecap_show() local
3830 return sprintf(buf, "%llx\n", iommu->ecap); in ecap_show()
3837 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in domains_supported_show() local
3838 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap)); in domains_supported_show()
3845 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in domains_used_show() local
3846 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids, in domains_used_show()
3847 cap_ndoms(iommu->cap))); in domains_used_show()
3862 .name = "intel-iommu",
3890 pr_info("Intel-IOMMU force enabled due to platform opt in\n"); in platform_optin_force_iommu()
3893 * If Intel-IOMMU is disabled by default, we will apply identity in platform_optin_force_iommu()
3909 struct intel_iommu *iommu __maybe_unused; in probe_acpi_namespace_devices()
3913 for_each_active_iommu(iommu, drhd) { in probe_acpi_namespace_devices()
3953 pr_warn("Forcing Intel-IOMMU to enabled\n"); in tboot_force_iommu()
3965 struct intel_iommu *iommu; in intel_iommu_init() local
3968 * Intel IOMMU is required for a TXT/tboot launch or platform in intel_iommu_init()
4002 * We exit the function here to ensure IOMMU's remapping and in intel_iommu_init()
4003 * mempool aren't setup, which means that the IOMMU's PMRs in intel_iommu_init()
4010 for_each_iommu(iommu, drhd) in intel_iommu_init()
4011 iommu_disable_protect_mem_regions(iommu); in intel_iommu_init()
4046 for_each_active_iommu(iommu, drhd) { in intel_iommu_init()
4052 * the virtual and physical IOMMU page-tables. in intel_iommu_init()
4054 if (cap_caching_mode(iommu->cap)) { in intel_iommu_init()
4055 pr_info_once("IOMMU batching disallowed due to virtualization\n"); in intel_iommu_init()
4058 iommu_device_sysfs_add(&iommu->iommu, NULL, in intel_iommu_init()
4060 "%s", iommu->name); in intel_iommu_init()
4061 iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL); in intel_iommu_init()
4073 for_each_iommu(iommu, drhd) { in intel_iommu_init()
4074 if (!drhd->ignored && !translation_pre_enabled(iommu)) in intel_iommu_init()
4075 iommu_enable_translation(iommu); in intel_iommu_init()
4077 iommu_disable_protect_mem_regions(iommu); in intel_iommu_init()
4102 * NB - intel-iommu lacks any sort of reference counting for the users of
4109 if (!info->iommu || !info->dev || !dev_is_pci(info->dev)) in domain_context_clear()
4120 struct intel_iommu *iommu = info->iommu; in dmar_remove_one_dev_info() local
4124 if (dev_is_pci(info->dev) && sm_supported(iommu)) in dmar_remove_one_dev_info()
4125 intel_pasid_tear_down_entry(iommu, info->dev, in dmar_remove_one_dev_info()
4137 domain_detach_iommu(domain, iommu); in dmar_remove_one_dev_info()
4208 struct intel_iommu *iommu; in prepare_domain_attach_device() local
4211 iommu = device_to_iommu(dev, NULL, NULL); in prepare_domain_attach_device()
4212 if (!iommu) in prepare_domain_attach_device()
4215 if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap)) in prepare_domain_attach_device()
4218 /* check if this iommu agaw is sufficient for max mapped address */ in prepare_domain_attach_device()
4219 addr_width = agaw_to_width(iommu->agaw); in prepare_domain_attach_device()
4220 if (addr_width > cap_mgaw(iommu->cap)) in prepare_domain_attach_device()
4221 addr_width = cap_mgaw(iommu->cap); in prepare_domain_attach_device()
4224 dev_err(dev, "%s: iommu width (%d) is not " in prepare_domain_attach_device()
4234 while (iommu->agaw < dmar_domain->agaw) { in prepare_domain_attach_device()
4255 …dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Con… in intel_iommu_attach_device()
4302 pr_err("%s: iommu width (%d) is not " in intel_iommu_map()
4392 iommu_flush_iotlb_psi(info->iommu, dmar_domain, in intel_iommu_tlb_sync()
4423 if (!ecap_sc_support(info->iommu->ecap)) { in domain_support_force_snooping()
4447 intel_pasid_setup_page_snoop_control(info->iommu, info->dev, in domain_set_force_snooping()
4488 struct intel_iommu *iommu; in intel_iommu_probe_device() local
4491 iommu = device_to_iommu(dev, &bus, &devfn); in intel_iommu_probe_device()
4492 if (!iommu || !iommu->iommu.ops) in intel_iommu_probe_device()
4506 info->segment = iommu->segment; in intel_iommu_probe_device()
4510 info->iommu = iommu; in intel_iommu_probe_device()
4512 if (ecap_dev_iotlb_support(iommu->ecap) && in intel_iommu_probe_device()
4514 dmar_ats_supported(pdev, iommu)) { in intel_iommu_probe_device()
4518 if (sm_supported(iommu)) { in intel_iommu_probe_device()
4519 if (pasid_supported(iommu)) { in intel_iommu_probe_device()
4526 if (info->ats_supported && ecap_prs(iommu->ecap) && in intel_iommu_probe_device()
4534 return &iommu->iommu; in intel_iommu_probe_device()
4622 struct intel_iommu *iommu; in intel_iommu_enable_sva() local
4628 iommu = info->iommu; in intel_iommu_enable_sva()
4629 if (!iommu) in intel_iommu_enable_sva()
4632 if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE)) in intel_iommu_enable_sva()
4638 ret = iopf_queue_add_device(iommu->iopf_queue, dev); in intel_iommu_enable_sva()
4648 struct intel_iommu *iommu = info->iommu; in intel_iommu_disable_sva() local
4653 ret = iopf_queue_remove_device(iommu->iopf_queue, dev); in intel_iommu_disable_sva()
4702 return translation_pre_enabled(info->iommu) && !info->domain; in intel_iommu_is_attach_deferred()
4708 * thus not be able to bypass the IOMMU restrictions.
4714 "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n", in risky_device()
4732 __mapping_notify_one(info->iommu, dmar_domain, pfn, pages); in intel_iommu_iotlb_sync_map()
4773 pci_info(dev, "Disabling IOMMU for graphics on this chipset\n"); in quirk_iommu_igfx()
4854 pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); in quirk_calpella_no_shadow_gtt()
4883 pci_info(dev, "Skip IOMMU disabling for graphics\n"); in quirk_igfx_skip_te_disable()
4977 * before unmap/unbind. For #3, iommu driver gets mmu_notifier to
4994 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, in quirk_extra_dev_tlb_flush()
4997 qi_flush_dev_iotlb_pasid(info->iommu, sid, info->pfsid, in quirk_extra_dev_tlb_flush()