Lines Matching full:iommu

33 #include <linux/iommu.h>
34 #include <linux/dma-iommu.h>
35 #include <linux/intel-iommu.h>
47 #include <asm/iommu.h>
50 #include "../iommu-sva-lib.h"
154 /* global iommu list, set NULL for ignored DMAR units */
276 * 2. It maps to each iommu if successful.
277 * 3. Each iommu mapps to this domain if successful.
307 struct intel_iommu *iommu; /* the corresponding iommu */ member
392 static bool translation_pre_enabled(struct intel_iommu *iommu) in translation_pre_enabled() argument
394 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
397 static void clear_translation_pre_enabled(struct intel_iommu *iommu) in clear_translation_pre_enabled() argument
399 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
402 static void init_translation_status(struct intel_iommu *iommu) in init_translation_status() argument
406 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_translation_status()
408 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
418 pr_info("IOMMU enabled\n"); in intel_iommu_setup()
422 pr_info("IOMMU disabled\n"); in intel_iommu_setup()
427 pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n"); in intel_iommu_setup()
430 pr_warn("intel_iommu=strict deprecated; use iommu.strict=1 instead\n"); in intel_iommu_setup()
442 pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); in intel_iommu_setup()
457 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did) in get_iommu_domain() argument
462 domains = iommu->domains[idx]; in get_iommu_domain()
469 static void set_iommu_domain(struct intel_iommu *iommu, u16 did, in set_iommu_domain() argument
475 if (!iommu->domains[idx]) { in set_iommu_domain()
477 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC); in set_iommu_domain()
480 domains = iommu->domains[idx]; in set_iommu_domain()
541 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) in __iommu_calculate_agaw() argument
546 sagaw = cap_sagaw(iommu->cap); in __iommu_calculate_agaw()
557 * Calculate max SAGAW for each iommu.
559 int iommu_calculate_max_sagaw(struct intel_iommu *iommu) in iommu_calculate_max_sagaw() argument
561 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH); in iommu_calculate_max_sagaw()
565 * calculate agaw for each iommu.
569 int iommu_calculate_agaw(struct intel_iommu *iommu) in iommu_calculate_agaw() argument
571 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); in iommu_calculate_agaw()
574 /* This functionin only returns single iommu in a domain */
592 static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu) in iommu_paging_structure_coherency() argument
594 return sm_supported(iommu) ? in iommu_paging_structure_coherency()
595 ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap); in iommu_paging_structure_coherency()
601 struct intel_iommu *iommu; in domain_update_iommu_coherency() local
619 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_coherency()
620 if (!iommu_paging_structure_coherency(iommu)) { in domain_update_iommu_coherency()
631 struct intel_iommu *iommu; in domain_update_iommu_snooping() local
635 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_snooping()
636 if (iommu != skip) { in domain_update_iommu_snooping()
643 if (!sm_supported(iommu) && in domain_update_iommu_snooping()
644 !ecap_sc_support(iommu->ecap)) { in domain_update_iommu_snooping()
659 struct intel_iommu *iommu; in domain_update_iommu_superpage() local
667 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_superpage()
668 if (iommu != skip) { in domain_update_iommu_superpage()
670 if (!cap_fl1gp_support(iommu->cap)) in domain_update_iommu_superpage()
673 mask &= cap_super_page_val(iommu->cap); in domain_update_iommu_superpage()
762 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, in iommu_context_addr() argument
765 struct root_entry *root = &iommu->root_entry[bus]; in iommu_context_addr()
770 if (sm_supported(iommu)) { in iommu_context_addr()
784 context = alloc_pgtable_page(iommu->node); in iommu_context_addr()
788 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); in iommu_context_addr()
791 __iommu_flush_cache(iommu, entry, sizeof(*entry)); in iommu_context_addr()
834 /* We know that this device on this chipset has its own IOMMU. in quirk_ioat_snb_local_iommu()
835 * If we find it under a different IOMMU, then the BIOS is lying in quirk_ioat_snb_local_iommu()
836 * to us. Hope that the IOMMU for this device is actually in quirk_ioat_snb_local_iommu()
847 /* we know that the this iommu should be at offset 0xa000 from vtbar */ in quirk_ioat_snb_local_iommu()
858 static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev) in iommu_is_dummy() argument
860 if (!iommu || iommu->drhd->ignored) in iommu_is_dummy()
879 struct intel_iommu *iommu; in device_to_iommu() local
893 * the PF instead to find the IOMMU. */ in device_to_iommu()
901 for_each_iommu(iommu, drhd) { in device_to_iommu()
909 * which we used for the IOMMU lookup. Strictly speaking in device_to_iommu()
935 iommu = NULL; in device_to_iommu()
937 if (iommu_is_dummy(iommu, dev)) in device_to_iommu()
938 iommu = NULL; in device_to_iommu()
942 return iommu; in device_to_iommu()
952 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) in device_context_mapped() argument
958 spin_lock_irqsave(&iommu->lock, flags); in device_context_mapped()
959 context = iommu_context_addr(iommu, bus, devfn, 0); in device_context_mapped()
962 spin_unlock_irqrestore(&iommu->lock, flags); in device_context_mapped()
966 static void free_context_table(struct intel_iommu *iommu) in free_context_table() argument
972 spin_lock_irqsave(&iommu->lock, flags); in free_context_table()
973 if (!iommu->root_entry) { in free_context_table()
977 context = iommu_context_addr(iommu, i, 0, 0); in free_context_table()
981 if (!sm_supported(iommu)) in free_context_table()
984 context = iommu_context_addr(iommu, i, 0x80, 0); in free_context_table()
989 free_pgtable_page(iommu->root_entry); in free_context_table()
990 iommu->root_entry = NULL; in free_context_table()
992 spin_unlock_irqrestore(&iommu->lock, flags); in free_context_table()
1005 /* Address beyond IOMMU's addressing capabilities. */ in pfn_to_dma_pte()
1262 /* We can't just free the pages because the IOMMU may still be walking
1301 /* iommu handling */
1302 static int iommu_alloc_root_entry(struct intel_iommu *iommu) in iommu_alloc_root_entry() argument
1307 root = (struct root_entry *)alloc_pgtable_page(iommu->node); in iommu_alloc_root_entry()
1310 iommu->name); in iommu_alloc_root_entry()
1314 __iommu_flush_cache(iommu, root, ROOT_SIZE); in iommu_alloc_root_entry()
1316 spin_lock_irqsave(&iommu->lock, flags); in iommu_alloc_root_entry()
1317 iommu->root_entry = root; in iommu_alloc_root_entry()
1318 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_alloc_root_entry()
1323 static void iommu_set_root_entry(struct intel_iommu *iommu) in iommu_set_root_entry() argument
1329 addr = virt_to_phys(iommu->root_entry); in iommu_set_root_entry()
1330 if (sm_supported(iommu)) in iommu_set_root_entry()
1333 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_set_root_entry()
1334 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr); in iommu_set_root_entry()
1336 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_root_entry()
1339 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_root_entry()
1342 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_set_root_entry()
1344 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in iommu_set_root_entry()
1345 if (sm_supported(iommu)) in iommu_set_root_entry()
1346 qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0); in iommu_set_root_entry()
1347 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in iommu_set_root_entry()
1350 void iommu_flush_write_buffer(struct intel_iommu *iommu) in iommu_flush_write_buffer() argument
1355 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) in iommu_flush_write_buffer()
1358 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1359 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); in iommu_flush_write_buffer()
1362 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_flush_write_buffer()
1365 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1369 static void __iommu_flush_context(struct intel_iommu *iommu, in __iommu_flush_context() argument
1392 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_context()
1393 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); in __iommu_flush_context()
1396 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, in __iommu_flush_context()
1399 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_context()
1403 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, in __iommu_flush_iotlb() argument
1406 int tlb_offset = ecap_iotlb_offset(iommu->ecap); in __iommu_flush_iotlb()
1432 if (cap_read_drain(iommu->cap)) in __iommu_flush_iotlb()
1435 if (cap_write_drain(iommu->cap)) in __iommu_flush_iotlb()
1438 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1441 dmar_writeq(iommu->reg + tlb_offset, val_iva); in __iommu_flush_iotlb()
1442 dmar_writeq(iommu->reg + tlb_offset + 8, val); in __iommu_flush_iotlb()
1445 IOMMU_WAIT_OP(iommu, tlb_offset + 8, in __iommu_flush_iotlb()
1448 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1460 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, in iommu_support_dev_iotlb() argument
1467 if (!iommu->qi) in iommu_support_dev_iotlb()
1471 if (info->iommu == iommu && info->bus == bus && in iommu_support_dev_iotlb()
1519 /* For IOMMU that supports device IOTLB throttling (DIT), we assign in iommu_enable_dev_iotlb()
1520 * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge in iommu_enable_dev_iotlb()
1524 if (!ecap_dit(info->iommu->ecap)) in iommu_enable_dev_iotlb()
1594 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, in __iommu_flush_dev_iotlb()
1619 static void domain_flush_piotlb(struct intel_iommu *iommu, in domain_flush_piotlb() argument
1623 u16 did = domain->iommu_did[iommu->seq_id]; in domain_flush_piotlb()
1626 qi_flush_piotlb(iommu, did, domain->default_pasid, in domain_flush_piotlb()
1630 qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih); in domain_flush_piotlb()
1633 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, in iommu_flush_iotlb_psi() argument
1640 u16 did = domain->iommu_did[iommu->seq_id]; in iommu_flush_iotlb_psi()
1648 domain_flush_piotlb(iommu, domain, addr, pages, ih); in iommu_flush_iotlb_psi()
1655 if (!cap_pgsel_inv(iommu->cap) || in iommu_flush_iotlb_psi()
1656 mask > cap_max_amask_val(iommu->cap)) in iommu_flush_iotlb_psi()
1657 iommu->flush.flush_iotlb(iommu, did, 0, 0, in iommu_flush_iotlb_psi()
1660 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask, in iommu_flush_iotlb_psi()
1668 if (!cap_caching_mode(iommu->cap) || !map) in iommu_flush_iotlb_psi()
1673 static inline void __mapping_notify_one(struct intel_iommu *iommu, in __mapping_notify_one() argument
1681 if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain)) in __mapping_notify_one()
1682 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1); in __mapping_notify_one()
1684 iommu_flush_write_buffer(iommu); in __mapping_notify_one()
1693 struct intel_iommu *iommu = g_iommus[idx]; in intel_flush_iotlb_all() local
1694 u16 did = dmar_domain->iommu_did[iommu->seq_id]; in intel_flush_iotlb_all()
1697 domain_flush_piotlb(iommu, dmar_domain, 0, -1, 0); in intel_flush_iotlb_all()
1699 iommu->flush.flush_iotlb(iommu, did, 0, 0, in intel_flush_iotlb_all()
1702 if (!cap_caching_mode(iommu->cap)) in intel_flush_iotlb_all()
1703 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did), in intel_flush_iotlb_all()
1708 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) in iommu_disable_protect_mem_regions() argument
1713 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap)) in iommu_disable_protect_mem_regions()
1716 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1717 pmen = readl(iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1719 writel(pmen, iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1722 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, in iommu_disable_protect_mem_regions()
1725 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1728 static void iommu_enable_translation(struct intel_iommu *iommu) in iommu_enable_translation() argument
1733 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_translation()
1734 iommu->gcmd |= DMA_GCMD_TE; in iommu_enable_translation()
1735 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_translation()
1738 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_translation()
1741 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_translation()
1744 static void iommu_disable_translation(struct intel_iommu *iommu) in iommu_disable_translation() argument
1749 if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated && in iommu_disable_translation()
1750 (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap))) in iommu_disable_translation()
1753 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_disable_translation()
1754 iommu->gcmd &= ~DMA_GCMD_TE; in iommu_disable_translation()
1755 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_translation()
1758 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_disable_translation()
1761 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_disable_translation()
1764 static int iommu_init_domains(struct intel_iommu *iommu) in iommu_init_domains() argument
1769 ndomains = cap_ndoms(iommu->cap); in iommu_init_domains()
1771 iommu->name, ndomains); in iommu_init_domains()
1774 spin_lock_init(&iommu->lock); in iommu_init_domains()
1776 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); in iommu_init_domains()
1777 if (!iommu->domain_ids) in iommu_init_domains()
1781 iommu->domains = kzalloc(size, GFP_KERNEL); in iommu_init_domains()
1783 if (iommu->domains) { in iommu_init_domains()
1785 iommu->domains[0] = kzalloc(size, GFP_KERNEL); in iommu_init_domains()
1788 if (!iommu->domains || !iommu->domains[0]) { in iommu_init_domains()
1790 iommu->name); in iommu_init_domains()
1791 kfree(iommu->domain_ids); in iommu_init_domains()
1792 kfree(iommu->domains); in iommu_init_domains()
1793 iommu->domain_ids = NULL; in iommu_init_domains()
1794 iommu->domains = NULL; in iommu_init_domains()
1804 set_bit(0, iommu->domain_ids); in iommu_init_domains()
1813 if (sm_supported(iommu)) in iommu_init_domains()
1814 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids); in iommu_init_domains()
1819 static void disable_dmar_iommu(struct intel_iommu *iommu) in disable_dmar_iommu() argument
1824 if (!iommu->domains || !iommu->domain_ids) in disable_dmar_iommu()
1829 if (info->iommu != iommu) in disable_dmar_iommu()
1839 if (iommu->gcmd & DMA_GCMD_TE) in disable_dmar_iommu()
1840 iommu_disable_translation(iommu); in disable_dmar_iommu()
1843 static void free_dmar_iommu(struct intel_iommu *iommu) in free_dmar_iommu() argument
1845 if ((iommu->domains) && (iommu->domain_ids)) { in free_dmar_iommu()
1846 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8; in free_dmar_iommu()
1850 kfree(iommu->domains[i]); in free_dmar_iommu()
1851 kfree(iommu->domains); in free_dmar_iommu()
1852 kfree(iommu->domain_ids); in free_dmar_iommu()
1853 iommu->domains = NULL; in free_dmar_iommu()
1854 iommu->domain_ids = NULL; in free_dmar_iommu()
1857 g_iommus[iommu->seq_id] = NULL; in free_dmar_iommu()
1860 free_context_table(iommu); in free_dmar_iommu()
1863 if (pasid_supported(iommu)) { in free_dmar_iommu()
1864 if (ecap_prs(iommu->ecap)) in free_dmar_iommu()
1865 intel_svm_finish_prq(iommu); in free_dmar_iommu()
1867 if (vccap_pasid(iommu->vccap)) in free_dmar_iommu()
1868 ioasid_unregister_allocator(&iommu->pasid_allocator); in free_dmar_iommu()
1902 /* Must be called with iommu->lock */
1904 struct intel_iommu *iommu) in domain_attach_iommu() argument
1910 assert_spin_locked(&iommu->lock); in domain_attach_iommu()
1912 domain->iommu_refcnt[iommu->seq_id] += 1; in domain_attach_iommu()
1913 if (domain->iommu_refcnt[iommu->seq_id] == 1) { in domain_attach_iommu()
1914 ndomains = cap_ndoms(iommu->cap); in domain_attach_iommu()
1915 num = find_first_zero_bit(iommu->domain_ids, ndomains); in domain_attach_iommu()
1918 pr_err("%s: No free domain ids\n", iommu->name); in domain_attach_iommu()
1919 domain->iommu_refcnt[iommu->seq_id] -= 1; in domain_attach_iommu()
1923 set_bit(num, iommu->domain_ids); in domain_attach_iommu()
1924 set_iommu_domain(iommu, num, domain); in domain_attach_iommu()
1926 domain->iommu_did[iommu->seq_id] = num; in domain_attach_iommu()
1927 domain->nid = iommu->node; in domain_attach_iommu()
1936 struct intel_iommu *iommu) in domain_detach_iommu() argument
1941 assert_spin_locked(&iommu->lock); in domain_detach_iommu()
1943 domain->iommu_refcnt[iommu->seq_id] -= 1; in domain_detach_iommu()
1944 if (domain->iommu_refcnt[iommu->seq_id] == 0) { in domain_detach_iommu()
1945 num = domain->iommu_did[iommu->seq_id]; in domain_detach_iommu()
1946 clear_bit(num, iommu->domain_ids); in domain_detach_iommu()
1947 set_iommu_domain(iommu, num, NULL); in domain_detach_iommu()
1950 domain->iommu_did[iommu->seq_id] = 0; in domain_detach_iommu()
2004 * IOMMU hardware will use the PASID value set in this field for
2035 struct intel_iommu *iommu, in domain_context_mapping_one() argument
2039 u16 did = domain->iommu_did[iommu->seq_id]; in domain_context_mapping_one()
2057 spin_lock(&iommu->lock); in domain_context_mapping_one()
2060 context = iommu_context_addr(iommu, bus, devfn, 1); in domain_context_mapping_one()
2080 if (did_old < cap_ndoms(iommu->cap)) { in domain_context_mapping_one()
2081 iommu->flush.flush_context(iommu, did_old, in domain_context_mapping_one()
2085 iommu->flush.flush_iotlb(iommu, did_old, 0, 0, in domain_context_mapping_one()
2092 if (sm_supported(iommu)) { in domain_context_mapping_one()
2109 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); in domain_context_mapping_one()
2122 * Skip top levels of page tables for iommu which has in domain_context_mapping_one()
2125 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_context_mapping_one()
2132 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); in domain_context_mapping_one()
2146 context_set_address_width(context, iommu->msagaw); in domain_context_mapping_one()
2154 if (!ecap_coherent(iommu->ecap)) in domain_context_mapping_one()
2163 if (cap_caching_mode(iommu->cap)) { in domain_context_mapping_one()
2164 iommu->flush.flush_context(iommu, 0, in domain_context_mapping_one()
2168 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in domain_context_mapping_one()
2170 iommu_flush_write_buffer(iommu); in domain_context_mapping_one()
2177 spin_unlock(&iommu->lock); in domain_context_mapping_one()
2185 struct intel_iommu *iommu; member
2194 return domain_context_mapping_one(data->domain, data->iommu, in domain_context_mapping_cb()
2204 struct intel_iommu *iommu; in domain_context_mapping() local
2207 iommu = device_to_iommu(dev, &bus, &devfn); in domain_context_mapping()
2208 if (!iommu) in domain_context_mapping()
2214 return domain_context_mapping_one(domain, iommu, table, in domain_context_mapping()
2218 data.iommu = iommu; in domain_context_mapping()
2228 struct intel_iommu *iommu = opaque; in domain_context_mapped_cb() local
2230 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff); in domain_context_mapped_cb()
2235 struct intel_iommu *iommu; in domain_context_mapped() local
2238 iommu = device_to_iommu(dev, &bus, &devfn); in domain_context_mapped()
2239 if (!iommu) in domain_context_mapped()
2243 return device_context_mapped(iommu, bus, devfn); in domain_context_mapped()
2246 domain_context_mapped_cb, iommu); in domain_context_mapped()
2419 struct intel_iommu *iommu = info->iommu; in domain_context_clear_one() local
2424 if (!iommu) in domain_context_clear_one()
2427 spin_lock_irqsave(&iommu->lock, flags); in domain_context_clear_one()
2428 context = iommu_context_addr(iommu, bus, devfn, 0); in domain_context_clear_one()
2430 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_clear_one()
2434 if (sm_supported(iommu)) { in domain_context_clear_one()
2438 did_old = info->domain->iommu_did[iommu->seq_id]; in domain_context_clear_one()
2444 __iommu_flush_cache(iommu, context, sizeof(*context)); in domain_context_clear_one()
2445 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_clear_one()
2446 iommu->flush.flush_context(iommu, in domain_context_clear_one()
2452 if (sm_supported(iommu)) in domain_context_clear_one()
2453 qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0); in domain_context_clear_one()
2455 iommu->flush.flush_iotlb(iommu, in domain_context_clear_one()
2488 if (unlikely(!dev || !dev->iommu)) in find_domain()
2515 static int domain_setup_first_level(struct intel_iommu *iommu, in domain_setup_first_level() argument
2525 * Skip top levels of page tables for iommu which has in domain_setup_first_level()
2528 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_setup_first_level()
2546 return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid, in domain_setup_first_level()
2547 domain->iommu_did[iommu->seq_id], in domain_setup_first_level()
2557 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, in dmar_insert_one_dev_info() argument
2574 info->segment = iommu->segment; in dmar_insert_one_dev_info()
2588 info->iommu = iommu; in dmar_insert_one_dev_info()
2596 if (ecap_dev_iotlb_support(iommu->ecap) && in dmar_insert_one_dev_info()
2601 if (sm_supported(iommu)) { in dmar_insert_one_dev_info()
2602 if (pasid_supported(iommu)) { in dmar_insert_one_dev_info()
2608 if (info->ats_supported && ecap_prs(iommu->ecap) && in dmar_insert_one_dev_info()
2635 spin_lock(&iommu->lock); in dmar_insert_one_dev_info()
2636 ret = domain_attach_iommu(domain, iommu); in dmar_insert_one_dev_info()
2637 spin_unlock(&iommu->lock); in dmar_insert_one_dev_info()
2652 if (dev && dev_is_pci(dev) && sm_supported(iommu)) { in dmar_insert_one_dev_info()
2661 spin_lock_irqsave(&iommu->lock, flags); in dmar_insert_one_dev_info()
2663 ret = intel_pasid_setup_pass_through(iommu, domain, in dmar_insert_one_dev_info()
2666 ret = domain_setup_first_level(iommu, domain, dev, in dmar_insert_one_dev_info()
2669 ret = intel_pasid_setup_second_level(iommu, domain, in dmar_insert_one_dev_info()
2671 spin_unlock_irqrestore(&iommu->lock, flags); in dmar_insert_one_dev_info()
2764 struct intel_iommu *iommu; in domain_add_dev_info() local
2767 iommu = device_to_iommu(dev, &bus, &devfn); in domain_add_dev_info()
2768 if (!iommu) in domain_add_dev_info()
2771 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain); in domain_add_dev_info()
2837 * The second is use of the device through the IOMMU API. This interface
2841 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2843 * the IOMMU API, which eliminates them from device assignment.
2885 static void intel_iommu_init_qi(struct intel_iommu *iommu) in intel_iommu_init_qi() argument
2888 * Start from the sane iommu hardware state. in intel_iommu_init_qi()
2893 if (!iommu->qi) { in intel_iommu_init_qi()
2897 dmar_fault(-1, iommu); in intel_iommu_init_qi()
2902 dmar_disable_qi(iommu); in intel_iommu_init_qi()
2905 if (dmar_enable_qi(iommu)) { in intel_iommu_init_qi()
2909 iommu->flush.flush_context = __iommu_flush_context; in intel_iommu_init_qi()
2910 iommu->flush.flush_iotlb = __iommu_flush_iotlb; in intel_iommu_init_qi()
2912 iommu->name); in intel_iommu_init_qi()
2914 iommu->flush.flush_context = qi_flush_context; in intel_iommu_init_qi()
2915 iommu->flush.flush_iotlb = qi_flush_iotlb; in intel_iommu_init_qi()
2916 pr_info("%s: Using Queued invalidation\n", iommu->name); in intel_iommu_init_qi()
2920 static int copy_context_table(struct intel_iommu *iommu, in copy_context_table() argument
2942 __iommu_flush_cache(iommu, new_ce, in copy_context_table()
2972 new_ce = alloc_pgtable_page(iommu->node); in copy_context_table()
2986 if (did >= 0 && did < cap_ndoms(iommu->cap)) in copy_context_table()
2987 set_bit(did, iommu->domain_ids); in copy_context_table()
3013 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE); in copy_context_table()
3022 static int copy_translation_tables(struct intel_iommu *iommu) in copy_translation_tables() argument
3033 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG); in copy_translation_tables()
3035 new_ext = !!ecap_ecs(iommu->ecap); in copy_translation_tables()
3062 ret = copy_context_table(iommu, &old_rt[bus], in copy_translation_tables()
3066 iommu->name, bus); in copy_translation_tables()
3071 spin_lock_irqsave(&iommu->lock, flags); in copy_translation_tables()
3080 iommu->root_entry[bus].lo = val; in copy_translation_tables()
3087 iommu->root_entry[bus].hi = val; in copy_translation_tables()
3090 spin_unlock_irqrestore(&iommu->lock, flags); in copy_translation_tables()
3094 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE); in copy_translation_tables()
3107 struct intel_iommu *iommu = data; in intel_vcmd_ioasid_alloc() local
3110 if (!iommu) in intel_vcmd_ioasid_alloc()
3120 if (vcmd_alloc_pasid(iommu, &ioasid)) in intel_vcmd_ioasid_alloc()
3128 struct intel_iommu *iommu = data; in intel_vcmd_ioasid_free() local
3130 if (!iommu) in intel_vcmd_ioasid_free()
3140 vcmd_free_pasid(iommu, ioasid); in intel_vcmd_ioasid_free()
3143 static void register_pasid_allocator(struct intel_iommu *iommu) in register_pasid_allocator() argument
3149 if (!cap_caching_mode(iommu->cap)) in register_pasid_allocator()
3152 if (!sm_supported(iommu)) { in register_pasid_allocator()
3164 if (!vccap_pasid(iommu->vccap)) in register_pasid_allocator()
3168 iommu->pasid_allocator.alloc = intel_vcmd_ioasid_alloc; in register_pasid_allocator()
3169 iommu->pasid_allocator.free = intel_vcmd_ioasid_free; in register_pasid_allocator()
3170 iommu->pasid_allocator.pdata = (void *)iommu; in register_pasid_allocator()
3171 if (ioasid_register_allocator(&iommu->pasid_allocator)) { in register_pasid_allocator()
3174 * Disable scalable mode on this IOMMU if there in register_pasid_allocator()
3186 struct intel_iommu *iommu; in init_dmars() local
3208 /* Preallocate enough resources for IOMMU hot-addition */ in init_dmars()
3223 for_each_iommu(iommu, drhd) { in init_dmars()
3225 iommu_disable_translation(iommu); in init_dmars()
3230 * Find the max pasid size of all IOMMU's in the system. in init_dmars()
3234 if (pasid_supported(iommu)) { in init_dmars()
3235 u32 temp = 2 << ecap_pss(iommu->ecap); in init_dmars()
3241 g_iommus[iommu->seq_id] = iommu; in init_dmars()
3243 intel_iommu_init_qi(iommu); in init_dmars()
3245 ret = iommu_init_domains(iommu); in init_dmars()
3249 init_translation_status(iommu); in init_dmars()
3251 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { in init_dmars()
3252 iommu_disable_translation(iommu); in init_dmars()
3253 clear_translation_pre_enabled(iommu); in init_dmars()
3255 iommu->name); in init_dmars()
3261 * among all IOMMU's. Need to Split it later. in init_dmars()
3263 ret = iommu_alloc_root_entry(iommu); in init_dmars()
3267 if (translation_pre_enabled(iommu)) { in init_dmars()
3270 ret = copy_translation_tables(iommu); in init_dmars()
3273 * We found the IOMMU with translation in init_dmars()
3282 iommu->name); in init_dmars()
3283 iommu_disable_translation(iommu); in init_dmars()
3284 clear_translation_pre_enabled(iommu); in init_dmars()
3287 iommu->name); in init_dmars()
3291 if (!ecap_pass_through(iommu->ecap)) in init_dmars()
3293 intel_svm_check(iommu); in init_dmars()
3301 for_each_active_iommu(iommu, drhd) { in init_dmars()
3302 iommu_flush_write_buffer(iommu); in init_dmars()
3304 register_pasid_allocator(iommu); in init_dmars()
3306 iommu_set_root_entry(iommu); in init_dmars()
3329 for_each_iommu(iommu, drhd) { in init_dmars()
3336 iommu_disable_protect_mem_regions(iommu); in init_dmars()
3340 iommu_flush_write_buffer(iommu); in init_dmars()
3343 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { in init_dmars()
3349 ret = intel_svm_enable_prq(iommu); in init_dmars()
3355 ret = dmar_set_interrupt(iommu); in init_dmars()
3363 for_each_active_iommu(iommu, drhd) { in init_dmars()
3364 disable_dmar_iommu(iommu); in init_dmars()
3365 free_dmar_iommu(iommu); in init_dmars()
3466 /* This IOMMU has *only* gfx devices. Either bypass it or in init_no_remapping_devices()
3478 struct intel_iommu *iommu = NULL; in init_iommu_hw() local
3480 for_each_active_iommu(iommu, drhd) in init_iommu_hw()
3481 if (iommu->qi) in init_iommu_hw()
3482 dmar_reenable_qi(iommu); in init_iommu_hw()
3484 for_each_iommu(iommu, drhd) { in init_iommu_hw()
3491 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
3495 iommu_flush_write_buffer(iommu); in init_iommu_hw()
3496 iommu_set_root_entry(iommu); in init_iommu_hw()
3497 iommu_enable_translation(iommu); in init_iommu_hw()
3498 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
3507 struct intel_iommu *iommu; in iommu_flush_all() local
3509 for_each_active_iommu(iommu, drhd) { in iommu_flush_all()
3510 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_flush_all()
3512 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in iommu_flush_all()
3520 struct intel_iommu *iommu = NULL; in iommu_suspend() local
3523 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
3524 iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32), in iommu_suspend()
3526 if (!iommu->iommu_state) in iommu_suspend()
3532 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
3533 iommu_disable_translation(iommu); in iommu_suspend()
3535 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_suspend()
3537 iommu->iommu_state[SR_DMAR_FECTL_REG] = in iommu_suspend()
3538 readl(iommu->reg + DMAR_FECTL_REG); in iommu_suspend()
3539 iommu->iommu_state[SR_DMAR_FEDATA_REG] = in iommu_suspend()
3540 readl(iommu->reg + DMAR_FEDATA_REG); in iommu_suspend()
3541 iommu->iommu_state[SR_DMAR_FEADDR_REG] = in iommu_suspend()
3542 readl(iommu->reg + DMAR_FEADDR_REG); in iommu_suspend()
3543 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = in iommu_suspend()
3544 readl(iommu->reg + DMAR_FEUADDR_REG); in iommu_suspend()
3546 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_suspend()
3551 for_each_active_iommu(iommu, drhd) in iommu_suspend()
3552 kfree(iommu->iommu_state); in iommu_suspend()
3560 struct intel_iommu *iommu = NULL; in iommu_resume() local
3565 panic("tboot: IOMMU setup failed, DMAR can not resume!\n"); in iommu_resume()
3567 WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); in iommu_resume()
3571 for_each_active_iommu(iommu, drhd) { in iommu_resume()
3573 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_resume()
3575 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], in iommu_resume()
3576 iommu->reg + DMAR_FECTL_REG); in iommu_resume()
3577 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], in iommu_resume()
3578 iommu->reg + DMAR_FEDATA_REG); in iommu_resume()
3579 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], in iommu_resume()
3580 iommu->reg + DMAR_FEADDR_REG); in iommu_resume()
3581 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], in iommu_resume()
3582 iommu->reg + DMAR_FEUADDR_REG); in iommu_resume()
3584 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_resume()
3587 for_each_active_iommu(iommu, drhd) in iommu_resume()
3588 kfree(iommu->iommu_state); in iommu_resume()
3813 struct intel_iommu *iommu = dmaru->iommu; in intel_iommu_add() local
3815 if (g_iommus[iommu->seq_id]) in intel_iommu_add()
3818 ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu); in intel_iommu_add()
3822 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) { in intel_iommu_add()
3824 iommu->name); in intel_iommu_add()
3827 if (!ecap_sc_support(iommu->ecap) && in intel_iommu_add()
3828 domain_update_iommu_snooping(iommu)) { in intel_iommu_add()
3830 iommu->name); in intel_iommu_add()
3833 sp = domain_update_iommu_superpage(NULL, iommu) - 1; in intel_iommu_add()
3834 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { in intel_iommu_add()
3836 iommu->name); in intel_iommu_add()
3843 if (iommu->gcmd & DMA_GCMD_TE) in intel_iommu_add()
3844 iommu_disable_translation(iommu); in intel_iommu_add()
3846 g_iommus[iommu->seq_id] = iommu; in intel_iommu_add()
3847 ret = iommu_init_domains(iommu); in intel_iommu_add()
3849 ret = iommu_alloc_root_entry(iommu); in intel_iommu_add()
3853 intel_svm_check(iommu); in intel_iommu_add()
3860 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
3864 intel_iommu_init_qi(iommu); in intel_iommu_add()
3865 iommu_flush_write_buffer(iommu); in intel_iommu_add()
3868 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { in intel_iommu_add()
3869 ret = intel_svm_enable_prq(iommu); in intel_iommu_add()
3874 ret = dmar_set_interrupt(iommu); in intel_iommu_add()
3878 iommu_set_root_entry(iommu); in intel_iommu_add()
3879 iommu_enable_translation(iommu); in intel_iommu_add()
3881 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
3885 disable_dmar_iommu(iommu); in intel_iommu_add()
3887 free_dmar_iommu(iommu); in intel_iommu_add()
3894 struct intel_iommu *iommu = dmaru->iommu; in dmar_iommu_hotplug() local
3898 if (iommu == NULL) in dmar_iommu_hotplug()
3904 disable_dmar_iommu(iommu); in dmar_iommu_hotplug()
3905 free_dmar_iommu(iommu); in dmar_iommu_hotplug()
4070 struct intel_iommu *iommu; in intel_iommu_memory_notifier() local
4078 for_each_active_iommu(iommu, drhd) in intel_iommu_memory_notifier()
4079 iommu_flush_iotlb_psi(iommu, si_domain, in intel_iommu_memory_notifier()
4098 struct intel_iommu *iommu = NULL; in intel_disable_iommus() local
4101 for_each_iommu(iommu, drhd) in intel_disable_iommus()
4102 iommu_disable_translation(iommu); in intel_disable_iommus()
4108 struct intel_iommu *iommu = NULL; in intel_iommu_shutdown() local
4116 for_each_iommu(iommu, drhd) in intel_iommu_shutdown()
4117 iommu_disable_protect_mem_regions(iommu); in intel_iommu_shutdown()
4129 return container_of(iommu_dev, struct intel_iommu, iommu); in dev_to_intel_iommu()
4135 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in version_show() local
4136 u32 ver = readl(iommu->reg + DMAR_VER_REG); in version_show()
4145 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in address_show() local
4146 return sprintf(buf, "%llx\n", iommu->reg_phys); in address_show()
4153 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in cap_show() local
4154 return sprintf(buf, "%llx\n", iommu->cap); in cap_show()
4161 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in ecap_show() local
4162 return sprintf(buf, "%llx\n", iommu->ecap); in ecap_show()
4169 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in domains_supported_show() local
4170 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap)); in domains_supported_show()
4177 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in domains_used_show() local
4178 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids, in domains_used_show()
4179 cap_ndoms(iommu->cap))); in domains_used_show()
4194 .name = "intel-iommu",
4220 pr_info("Intel-IOMMU force enabled due to platform opt in\n"); in platform_optin_force_iommu()
4223 * If Intel-IOMMU is disabled by default, we will apply identity in platform_optin_force_iommu()
4239 struct intel_iommu *iommu __maybe_unused; in probe_acpi_namespace_devices()
4243 for_each_active_iommu(iommu, drhd) { in probe_acpi_namespace_devices()
4282 struct intel_iommu *iommu; in intel_iommu_init() local
4285 * Intel IOMMU is required for a TXT/tboot launch or platform in intel_iommu_init()
4293 panic("tboot: Failed to initialize iommu memory\n"); in intel_iommu_init()
4325 * We exit the function here to ensure IOMMU's remapping and in intel_iommu_init()
4326 * mempool aren't setup, which means that the IOMMU's PMRs in intel_iommu_init()
4333 for_each_iommu(iommu, drhd) in intel_iommu_init()
4334 iommu_disable_protect_mem_regions(iommu); in intel_iommu_init()
4372 for_each_active_iommu(iommu, drhd) { in intel_iommu_init()
4378 * the virtual and physical IOMMU page-tables. in intel_iommu_init()
4380 if (cap_caching_mode(iommu->cap)) { in intel_iommu_init()
4381 pr_info_once("IOMMU batching disallowed due to virtualization\n"); in intel_iommu_init()
4384 iommu_device_sysfs_add(&iommu->iommu, NULL, in intel_iommu_init()
4386 "%s", iommu->name); in intel_iommu_init()
4387 iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL); in intel_iommu_init()
4400 for_each_iommu(iommu, drhd) { in intel_iommu_init()
4401 if (!drhd->ignored && !translation_pre_enabled(iommu)) in intel_iommu_init()
4402 iommu_enable_translation(iommu); in intel_iommu_init()
4404 iommu_disable_protect_mem_regions(iommu); in intel_iommu_init()
4430 * NB - intel-iommu lacks any sort of reference counting for the users of
4437 if (!info->iommu || !info->dev || !dev_is_pci(info->dev)) in domain_context_clear()
4447 struct intel_iommu *iommu; in __dmar_remove_one_dev_info() local
4455 iommu = info->iommu; in __dmar_remove_one_dev_info()
4459 if (dev_is_pci(info->dev) && sm_supported(iommu)) in __dmar_remove_one_dev_info()
4460 intel_pasid_tear_down_entry(iommu, info->dev, in __dmar_remove_one_dev_info()
4470 spin_lock_irqsave(&iommu->lock, flags); in __dmar_remove_one_dev_info()
4471 domain_detach_iommu(domain, iommu); in __dmar_remove_one_dev_info()
4472 spin_unlock_irqrestore(&iommu->lock, flags); in __dmar_remove_one_dev_info()
4630 struct intel_iommu *iommu; in aux_domain_add_dev() local
4632 iommu = device_to_iommu(dev, NULL, NULL); in aux_domain_add_dev()
4633 if (!iommu) in aux_domain_add_dev()
4665 * iommu->lock must be held to attach domain to iommu and setup the in aux_domain_add_dev()
4668 spin_lock(&iommu->lock); in aux_domain_add_dev()
4669 ret = domain_attach_iommu(domain, iommu); in aux_domain_add_dev()
4675 ret = domain_setup_first_level(iommu, domain, dev, in aux_domain_add_dev()
4678 ret = intel_pasid_setup_second_level(iommu, domain, dev, in aux_domain_add_dev()
4683 spin_unlock(&iommu->lock); in aux_domain_add_dev()
4690 domain_detach_iommu(domain, iommu); in aux_domain_add_dev()
4692 spin_unlock(&iommu->lock); in aux_domain_add_dev()
4706 struct intel_iommu *iommu; in aux_domain_remove_dev() local
4714 iommu = info->iommu; in aux_domain_remove_dev()
4717 spin_lock(&iommu->lock); in aux_domain_remove_dev()
4718 intel_pasid_tear_down_entry(iommu, dev, in aux_domain_remove_dev()
4720 domain_detach_iommu(domain, iommu); in aux_domain_remove_dev()
4721 spin_unlock(&iommu->lock); in aux_domain_remove_dev()
4734 struct intel_iommu *iommu; in prepare_domain_attach_device() local
4737 iommu = device_to_iommu(dev, NULL, NULL); in prepare_domain_attach_device()
4738 if (!iommu) in prepare_domain_attach_device()
4742 !ecap_nest(iommu->ecap)) { in prepare_domain_attach_device()
4743 dev_err(dev, "%s: iommu not support nested translation\n", in prepare_domain_attach_device()
4744 iommu->name); in prepare_domain_attach_device()
4748 /* check if this iommu agaw is sufficient for max mapped address */ in prepare_domain_attach_device()
4749 addr_width = agaw_to_width(iommu->agaw); in prepare_domain_attach_device()
4750 if (addr_width > cap_mgaw(iommu->cap)) in prepare_domain_attach_device()
4751 addr_width = cap_mgaw(iommu->cap); in prepare_domain_attach_device()
4754 dev_err(dev, "%s: iommu width (%d) is not " in prepare_domain_attach_device()
4764 while (iommu->agaw < dmar_domain->agaw) { in prepare_domain_attach_device()
4785 …dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Con… in intel_iommu_attach_device()
4837 * 2D array for converting and sanitizing IOMMU generic TLB granularity to
4848 * X: indexed by iommu cache type
4876 * IOMMU cache invalidate API passes granu_size in bytes, and number of in to_vtd_size()
4888 struct intel_iommu *iommu; in intel_iommu_sva_invalidate() local
4902 iommu = device_to_iommu(dev, &bus, &devfn); in intel_iommu_sva_invalidate()
4903 if (!iommu) in intel_iommu_sva_invalidate()
4910 spin_lock(&iommu->lock); in intel_iommu_sva_invalidate()
4916 did = dmar_domain->iommu_did[iommu->seq_id]; in intel_iommu_sva_invalidate()
4963 qi_flush_piotlb(iommu, did, pasid, in intel_iommu_sva_invalidate()
4993 qi_flush_dev_iotlb_pasid(iommu, sid, in intel_iommu_sva_invalidate()
5001 dev_err_ratelimited(dev, "Unsupported IOMMU invalidation type %d\n", in intel_iommu_sva_invalidate()
5007 spin_unlock(&iommu->lock); in intel_iommu_sva_invalidate()
5036 pr_err("%s: iommu width (%d) is not " in intel_iommu_map()
5161 struct intel_iommu *iommu; in intel_iommu_probe_device() local
5163 iommu = device_to_iommu(dev, NULL, NULL); in intel_iommu_probe_device()
5164 if (!iommu) in intel_iommu_probe_device()
5167 if (translation_pre_enabled(iommu)) in intel_iommu_probe_device()
5170 return &iommu->iommu; in intel_iommu_probe_device()
5175 struct intel_iommu *iommu; in intel_iommu_release_device() local
5177 iommu = device_to_iommu(dev, NULL, NULL); in intel_iommu_release_device()
5178 if (!iommu) in intel_iommu_release_device()
5249 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev) in intel_iommu_enable_pasid() argument
5263 spin_lock(&iommu->lock); in intel_iommu_enable_pasid()
5270 context = iommu_context_addr(iommu, info->bus, info->devfn, 0); in intel_iommu_enable_pasid()
5280 iommu->flush.flush_context(iommu, in intel_iommu_enable_pasid()
5281 domain->iommu_did[iommu->seq_id], in intel_iommu_enable_pasid()
5294 spin_unlock(&iommu->lock); in intel_iommu_enable_pasid()
5310 struct intel_iommu *iommu; in intel_iommu_enable_auxd() local
5314 iommu = device_to_iommu(dev, NULL, NULL); in intel_iommu_enable_auxd()
5315 if (!iommu || dmar_disabled) in intel_iommu_enable_auxd()
5318 if (!sm_supported(iommu) || !pasid_supported(iommu)) in intel_iommu_enable_auxd()
5321 ret = intel_iommu_enable_pasid(iommu, dev); in intel_iommu_enable_auxd()
5350 struct intel_iommu *iommu; in intel_iommu_enable_sva() local
5356 iommu = info->iommu; in intel_iommu_enable_sva()
5357 if (!iommu) in intel_iommu_enable_sva()
5360 if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE)) in intel_iommu_enable_sva()
5363 if (intel_iommu_enable_pasid(iommu, dev)) in intel_iommu_enable_sva()
5369 ret = iopf_queue_add_device(iommu->iopf_queue, dev); in intel_iommu_enable_sva()
5379 struct intel_iommu *iommu = info->iommu; in intel_iommu_disable_sva() local
5384 ret = iopf_queue_remove_device(iommu->iopf_queue, dev); in intel_iommu_disable_sva()
5440 return info && (info->iommu->flags & VTD_FLAG_SVM_CAPABLE) && in intel_iommu_dev_has_feat()
5530 * thus not be able to bypass the IOMMU restrictions.
5536 "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n", in risky_device()
5550 struct intel_iommu *iommu; in intel_iommu_iotlb_sync_map() local
5554 iommu = g_iommus[iommu_id]; in intel_iommu_iotlb_sync_map()
5555 __mapping_notify_one(iommu, dmar_domain, pfn, pages); in intel_iommu_iotlb_sync_map()
5604 pci_info(dev, "Disabling IOMMU for graphics on this chipset\n"); in quirk_iommu_igfx()
5685 pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); in quirk_calpella_no_shadow_gtt()
5714 pci_info(dev, "Skip IOMMU disabling for graphics\n"); in quirk_igfx_skip_te_disable()