Lines Matching full:iommu
33 #include <linux/iommu.h>
34 #include <linux/intel-iommu.h>
47 #include <asm/iommu.h>
90 * to the IOMMU core, which will then use this information to split
94 * Traditionally the IOMMU core just handed us the mappings directly,
101 * If at some point we'd like to utilize the IOMMU core's new behavior,
171 /* global iommu list, set NULL for ignored DMAR units */
293 * 2. It maps to each iommu if successful.
294 * 3. Each iommu mapps to this domain if successful.
414 static bool translation_pre_enabled(struct intel_iommu *iommu) in translation_pre_enabled() argument
416 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
419 static void clear_translation_pre_enabled(struct intel_iommu *iommu) in clear_translation_pre_enabled() argument
421 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
424 static void init_translation_status(struct intel_iommu *iommu) in init_translation_status() argument
428 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_translation_status()
430 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
440 pr_info("IOMMU enabled\n"); in intel_iommu_setup()
444 pr_info("IOMMU disabled\n"); in intel_iommu_setup()
458 pr_info("Intel-IOMMU: scalable mode supported\n"); in intel_iommu_setup()
461 pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); in intel_iommu_setup()
464 pr_info("Intel-IOMMU: No bounce buffer. This could expose security risks of DMA attacks\n"); in intel_iommu_setup()
479 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did) in get_iommu_domain() argument
484 domains = iommu->domains[idx]; in get_iommu_domain()
491 static void set_iommu_domain(struct intel_iommu *iommu, u16 did, in set_iommu_domain() argument
497 if (!iommu->domains[idx]) { in set_iommu_domain()
499 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC); in set_iommu_domain()
502 domains = iommu->domains[idx]; in set_iommu_domain()
563 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) in __iommu_calculate_agaw() argument
568 sagaw = cap_sagaw(iommu->cap); in __iommu_calculate_agaw()
579 * Calculate max SAGAW for each iommu.
581 int iommu_calculate_max_sagaw(struct intel_iommu *iommu) in iommu_calculate_max_sagaw() argument
583 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH); in iommu_calculate_max_sagaw()
587 * calculate agaw for each iommu.
591 int iommu_calculate_agaw(struct intel_iommu *iommu) in iommu_calculate_agaw() argument
593 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); in iommu_calculate_agaw()
596 /* This functionin only returns single iommu in a domain */
614 static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu) in iommu_paging_structure_coherency() argument
616 return sm_supported(iommu) ? in iommu_paging_structure_coherency()
617 ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap); in iommu_paging_structure_coherency()
623 struct intel_iommu *iommu; in domain_update_iommu_coherency() local
641 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_coherency()
642 if (!iommu_paging_structure_coherency(iommu)) { in domain_update_iommu_coherency()
653 struct intel_iommu *iommu; in domain_update_iommu_snooping() local
657 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_snooping()
658 if (iommu != skip) { in domain_update_iommu_snooping()
659 if (!ecap_sc_support(iommu->ecap)) { in domain_update_iommu_snooping()
674 struct intel_iommu *iommu; in domain_update_iommu_superpage() local
683 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_superpage()
684 if (iommu != skip) { in domain_update_iommu_superpage()
686 if (!cap_fl1gp_support(iommu->cap)) in domain_update_iommu_superpage()
689 mask &= cap_super_page_val(iommu->cap); in domain_update_iommu_superpage()
744 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, in iommu_context_addr() argument
747 struct root_entry *root = &iommu->root_entry[bus]; in iommu_context_addr()
752 if (sm_supported(iommu)) { in iommu_context_addr()
766 context = alloc_pgtable_page(iommu->node); in iommu_context_addr()
770 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); in iommu_context_addr()
773 __iommu_flush_cache(iommu, entry, sizeof(*entry)); in iommu_context_addr()
816 /* We know that this device on this chipset has its own IOMMU. in quirk_ioat_snb_local_iommu()
817 * If we find it under a different IOMMU, then the BIOS is lying in quirk_ioat_snb_local_iommu()
818 * to us. Hope that the IOMMU for this device is actually in quirk_ioat_snb_local_iommu()
829 /* we know that the this iommu should be at offset 0xa000 from vtbar */ in quirk_ioat_snb_local_iommu()
840 static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev) in iommu_is_dummy() argument
842 if (!iommu || iommu->drhd->ignored) in iommu_is_dummy()
861 struct intel_iommu *iommu; in device_to_iommu() local
875 * the PF instead to find the IOMMU. */ in device_to_iommu()
883 for_each_iommu(iommu, drhd) { in device_to_iommu()
891 * which we used for the IOMMU lookup. Strictly speaking in device_to_iommu()
917 iommu = NULL; in device_to_iommu()
919 if (iommu_is_dummy(iommu, dev)) in device_to_iommu()
920 iommu = NULL; in device_to_iommu()
924 return iommu; in device_to_iommu()
934 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) in device_context_mapped() argument
940 spin_lock_irqsave(&iommu->lock, flags); in device_context_mapped()
941 context = iommu_context_addr(iommu, bus, devfn, 0); in device_context_mapped()
944 spin_unlock_irqrestore(&iommu->lock, flags); in device_context_mapped()
948 static void free_context_table(struct intel_iommu *iommu) in free_context_table() argument
954 spin_lock_irqsave(&iommu->lock, flags); in free_context_table()
955 if (!iommu->root_entry) { in free_context_table()
959 context = iommu_context_addr(iommu, i, 0, 0); in free_context_table()
963 if (!sm_supported(iommu)) in free_context_table()
966 context = iommu_context_addr(iommu, i, 0x80, 0); in free_context_table()
971 free_pgtable_page(iommu->root_entry); in free_context_table()
972 iommu->root_entry = NULL; in free_context_table()
974 spin_unlock_irqrestore(&iommu->lock, flags); in free_context_table()
987 /* Address beyond IOMMU's addressing capabilities. */ in pfn_to_dma_pte()
1241 /* We can't just free the pages because the IOMMU may still be walking
1287 /* iommu handling */
1288 static int iommu_alloc_root_entry(struct intel_iommu *iommu) in iommu_alloc_root_entry() argument
1293 root = (struct root_entry *)alloc_pgtable_page(iommu->node); in iommu_alloc_root_entry()
1296 iommu->name); in iommu_alloc_root_entry()
1300 __iommu_flush_cache(iommu, root, ROOT_SIZE); in iommu_alloc_root_entry()
1302 spin_lock_irqsave(&iommu->lock, flags); in iommu_alloc_root_entry()
1303 iommu->root_entry = root; in iommu_alloc_root_entry()
1304 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_alloc_root_entry()
1309 static void iommu_set_root_entry(struct intel_iommu *iommu) in iommu_set_root_entry() argument
1315 addr = virt_to_phys(iommu->root_entry); in iommu_set_root_entry()
1316 if (sm_supported(iommu)) in iommu_set_root_entry()
1319 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_set_root_entry()
1320 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr); in iommu_set_root_entry()
1322 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_root_entry()
1325 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_root_entry()
1328 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_set_root_entry()
1331 void iommu_flush_write_buffer(struct intel_iommu *iommu) in iommu_flush_write_buffer() argument
1336 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) in iommu_flush_write_buffer()
1339 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1340 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); in iommu_flush_write_buffer()
1343 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_flush_write_buffer()
1346 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1350 static void __iommu_flush_context(struct intel_iommu *iommu, in __iommu_flush_context() argument
1373 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_context()
1374 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); in __iommu_flush_context()
1377 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, in __iommu_flush_context()
1380 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_context()
1384 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, in __iommu_flush_iotlb() argument
1387 int tlb_offset = ecap_iotlb_offset(iommu->ecap); in __iommu_flush_iotlb()
1413 if (cap_read_drain(iommu->cap)) in __iommu_flush_iotlb()
1416 if (cap_write_drain(iommu->cap)) in __iommu_flush_iotlb()
1419 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1422 dmar_writeq(iommu->reg + tlb_offset, val_iva); in __iommu_flush_iotlb()
1423 dmar_writeq(iommu->reg + tlb_offset + 8, val); in __iommu_flush_iotlb()
1426 IOMMU_WAIT_OP(iommu, tlb_offset + 8, in __iommu_flush_iotlb()
1429 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1441 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, in iommu_support_dev_iotlb() argument
1448 if (!iommu->qi) in iommu_support_dev_iotlb()
1452 if (info->iommu == iommu && info->bus == bus && in iommu_support_dev_iotlb()
1495 /* For IOMMU that supports device IOTLB throttling (DIT), we assign in iommu_enable_dev_iotlb()
1496 * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge in iommu_enable_dev_iotlb()
1500 if (!ecap_dit(info->iommu->ecap)) in iommu_enable_dev_iotlb()
1577 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, in iommu_flush_dev_iotlb()
1583 static void domain_flush_piotlb(struct intel_iommu *iommu, in domain_flush_piotlb() argument
1587 u16 did = domain->iommu_did[iommu->seq_id]; in domain_flush_piotlb()
1590 qi_flush_piotlb(iommu, did, domain->default_pasid, in domain_flush_piotlb()
1594 qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih); in domain_flush_piotlb()
1597 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, in iommu_flush_iotlb_psi() argument
1604 u16 did = domain->iommu_did[iommu->seq_id]; in iommu_flush_iotlb_psi()
1612 domain_flush_piotlb(iommu, domain, addr, pages, ih); in iommu_flush_iotlb_psi()
1619 if (!cap_pgsel_inv(iommu->cap) || in iommu_flush_iotlb_psi()
1620 mask > cap_max_amask_val(iommu->cap)) in iommu_flush_iotlb_psi()
1621 iommu->flush.flush_iotlb(iommu, did, 0, 0, in iommu_flush_iotlb_psi()
1624 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask, in iommu_flush_iotlb_psi()
1632 if (!cap_caching_mode(iommu->cap) || !map) in iommu_flush_iotlb_psi()
1637 static inline void __mapping_notify_one(struct intel_iommu *iommu, in __mapping_notify_one() argument
1645 if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain)) in __mapping_notify_one()
1646 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1); in __mapping_notify_one()
1648 iommu_flush_write_buffer(iommu); in __mapping_notify_one()
1659 struct intel_iommu *iommu = g_iommus[idx]; in iommu_flush_iova() local
1660 u16 did = domain->iommu_did[iommu->seq_id]; in iommu_flush_iova()
1663 domain_flush_piotlb(iommu, domain, 0, -1, 0); in iommu_flush_iova()
1665 iommu->flush.flush_iotlb(iommu, did, 0, 0, in iommu_flush_iova()
1668 if (!cap_caching_mode(iommu->cap)) in iommu_flush_iova()
1669 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did), in iommu_flush_iova()
1674 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) in iommu_disable_protect_mem_regions() argument
1679 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap)) in iommu_disable_protect_mem_regions()
1682 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1683 pmen = readl(iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1685 writel(pmen, iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1688 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, in iommu_disable_protect_mem_regions()
1691 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1694 static void iommu_enable_translation(struct intel_iommu *iommu) in iommu_enable_translation() argument
1699 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_translation()
1700 iommu->gcmd |= DMA_GCMD_TE; in iommu_enable_translation()
1701 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_translation()
1704 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_translation()
1707 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_translation()
1710 static void iommu_disable_translation(struct intel_iommu *iommu) in iommu_disable_translation() argument
1715 if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated && in iommu_disable_translation()
1716 (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap))) in iommu_disable_translation()
1719 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_disable_translation()
1720 iommu->gcmd &= ~DMA_GCMD_TE; in iommu_disable_translation()
1721 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_translation()
1724 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_disable_translation()
1727 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_disable_translation()
1730 static int iommu_init_domains(struct intel_iommu *iommu) in iommu_init_domains() argument
1735 ndomains = cap_ndoms(iommu->cap); in iommu_init_domains()
1737 iommu->name, ndomains); in iommu_init_domains()
1740 spin_lock_init(&iommu->lock); in iommu_init_domains()
1742 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); in iommu_init_domains()
1743 if (!iommu->domain_ids) { in iommu_init_domains()
1745 iommu->name); in iommu_init_domains()
1750 iommu->domains = kzalloc(size, GFP_KERNEL); in iommu_init_domains()
1752 if (iommu->domains) { in iommu_init_domains()
1754 iommu->domains[0] = kzalloc(size, GFP_KERNEL); in iommu_init_domains()
1757 if (!iommu->domains || !iommu->domains[0]) { in iommu_init_domains()
1759 iommu->name); in iommu_init_domains()
1760 kfree(iommu->domain_ids); in iommu_init_domains()
1761 kfree(iommu->domains); in iommu_init_domains()
1762 iommu->domain_ids = NULL; in iommu_init_domains()
1763 iommu->domains = NULL; in iommu_init_domains()
1773 set_bit(0, iommu->domain_ids); in iommu_init_domains()
1782 if (sm_supported(iommu)) in iommu_init_domains()
1783 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids); in iommu_init_domains()
1788 static void disable_dmar_iommu(struct intel_iommu *iommu) in disable_dmar_iommu() argument
1793 if (!iommu->domains || !iommu->domain_ids) in disable_dmar_iommu()
1798 if (info->iommu != iommu) in disable_dmar_iommu()
1808 if (iommu->gcmd & DMA_GCMD_TE) in disable_dmar_iommu()
1809 iommu_disable_translation(iommu); in disable_dmar_iommu()
1812 static void free_dmar_iommu(struct intel_iommu *iommu) in free_dmar_iommu() argument
1814 if ((iommu->domains) && (iommu->domain_ids)) { in free_dmar_iommu()
1815 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8; in free_dmar_iommu()
1819 kfree(iommu->domains[i]); in free_dmar_iommu()
1820 kfree(iommu->domains); in free_dmar_iommu()
1821 kfree(iommu->domain_ids); in free_dmar_iommu()
1822 iommu->domains = NULL; in free_dmar_iommu()
1823 iommu->domain_ids = NULL; in free_dmar_iommu()
1826 g_iommus[iommu->seq_id] = NULL; in free_dmar_iommu()
1829 free_context_table(iommu); in free_dmar_iommu()
1832 if (pasid_supported(iommu)) { in free_dmar_iommu()
1833 if (ecap_prs(iommu->ecap)) in free_dmar_iommu()
1834 intel_svm_finish_prq(iommu); in free_dmar_iommu()
1836 if (vccap_pasid(iommu->vccap)) in free_dmar_iommu()
1837 ioasid_unregister_allocator(&iommu->pasid_allocator); in free_dmar_iommu()
1849 struct intel_iommu *iommu; in first_level_by_default() local
1858 for_each_active_iommu(iommu, drhd) { in first_level_by_default()
1859 if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) { in first_level_by_default()
1888 /* Must be called with iommu->lock */
1890 struct intel_iommu *iommu) in domain_attach_iommu() argument
1896 assert_spin_locked(&iommu->lock); in domain_attach_iommu()
1898 domain->iommu_refcnt[iommu->seq_id] += 1; in domain_attach_iommu()
1900 if (domain->iommu_refcnt[iommu->seq_id] == 1) { in domain_attach_iommu()
1901 ndomains = cap_ndoms(iommu->cap); in domain_attach_iommu()
1902 num = find_first_zero_bit(iommu->domain_ids, ndomains); in domain_attach_iommu()
1905 pr_err("%s: No free domain ids\n", iommu->name); in domain_attach_iommu()
1906 domain->iommu_refcnt[iommu->seq_id] -= 1; in domain_attach_iommu()
1911 set_bit(num, iommu->domain_ids); in domain_attach_iommu()
1912 set_iommu_domain(iommu, num, domain); in domain_attach_iommu()
1914 domain->iommu_did[iommu->seq_id] = num; in domain_attach_iommu()
1915 domain->nid = iommu->node; in domain_attach_iommu()
1924 struct intel_iommu *iommu) in domain_detach_iommu() argument
1929 assert_spin_locked(&iommu->lock); in domain_detach_iommu()
1931 domain->iommu_refcnt[iommu->seq_id] -= 1; in domain_detach_iommu()
1933 if (domain->iommu_refcnt[iommu->seq_id] == 0) { in domain_detach_iommu()
1934 num = domain->iommu_did[iommu->seq_id]; in domain_detach_iommu()
1935 clear_bit(num, iommu->domain_ids); in domain_detach_iommu()
1936 set_iommu_domain(iommu, num, NULL); in domain_detach_iommu()
1939 domain->iommu_did[iommu->seq_id] = 0; in domain_detach_iommu()
2040 * IOMMU hardware will use the PASID value set in this field for
2071 struct intel_iommu *iommu, in domain_context_mapping_one() argument
2075 u16 did = domain->iommu_did[iommu->seq_id]; in domain_context_mapping_one()
2093 spin_lock(&iommu->lock); in domain_context_mapping_one()
2096 context = iommu_context_addr(iommu, bus, devfn, 1); in domain_context_mapping_one()
2116 if (did_old < cap_ndoms(iommu->cap)) { in domain_context_mapping_one()
2117 iommu->flush.flush_context(iommu, did_old, in domain_context_mapping_one()
2121 iommu->flush.flush_iotlb(iommu, did_old, 0, 0, in domain_context_mapping_one()
2128 if (sm_supported(iommu)) { in domain_context_mapping_one()
2145 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); in domain_context_mapping_one()
2158 * Skip top levels of page tables for iommu which has in domain_context_mapping_one()
2161 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_context_mapping_one()
2168 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); in domain_context_mapping_one()
2182 context_set_address_width(context, iommu->msagaw); in domain_context_mapping_one()
2190 if (!ecap_coherent(iommu->ecap)) in domain_context_mapping_one()
2199 if (cap_caching_mode(iommu->cap)) { in domain_context_mapping_one()
2200 iommu->flush.flush_context(iommu, 0, in domain_context_mapping_one()
2204 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in domain_context_mapping_one()
2206 iommu_flush_write_buffer(iommu); in domain_context_mapping_one()
2213 spin_unlock(&iommu->lock); in domain_context_mapping_one()
2221 struct intel_iommu *iommu; member
2230 return domain_context_mapping_one(data->domain, data->iommu, in domain_context_mapping_cb()
2240 struct intel_iommu *iommu; in domain_context_mapping() local
2243 iommu = device_to_iommu(dev, &bus, &devfn); in domain_context_mapping()
2244 if (!iommu) in domain_context_mapping()
2250 return domain_context_mapping_one(domain, iommu, table, in domain_context_mapping()
2254 data.iommu = iommu; in domain_context_mapping()
2264 struct intel_iommu *iommu = opaque; in domain_context_mapped_cb() local
2266 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff); in domain_context_mapped_cb()
2271 struct intel_iommu *iommu; in domain_context_mapped() local
2274 iommu = device_to_iommu(dev, &bus, &devfn); in domain_context_mapped()
2275 if (!iommu) in domain_context_mapped()
2279 return device_context_mapped(iommu, bus, devfn); in domain_context_mapped()
2282 domain_context_mapped_cb, iommu); in domain_context_mapped()
2444 struct intel_iommu *iommu; in domain_mapping() local
2452 iommu = g_iommus[iommu_id]; in domain_mapping()
2453 __mapping_notify_one(iommu, domain, iov_pfn, nr_pages); in domain_mapping()
2473 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn) in domain_context_clear_one() argument
2479 if (!iommu) in domain_context_clear_one()
2482 spin_lock_irqsave(&iommu->lock, flags); in domain_context_clear_one()
2483 context = iommu_context_addr(iommu, bus, devfn, 0); in domain_context_clear_one()
2485 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_clear_one()
2490 __iommu_flush_cache(iommu, context, sizeof(*context)); in domain_context_clear_one()
2491 spin_unlock_irqrestore(&iommu->lock, flags); in domain_context_clear_one()
2492 iommu->flush.flush_context(iommu, in domain_context_clear_one()
2497 iommu->flush.flush_iotlb(iommu, in domain_context_clear_one()
2528 if (unlikely(!dev || !dev->iommu)) in find_domain()
2565 static int domain_setup_first_level(struct intel_iommu *iommu, in domain_setup_first_level() argument
2575 * Skip top levels of page tables for iommu which has in domain_setup_first_level()
2578 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in domain_setup_first_level()
2590 return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid, in domain_setup_first_level()
2591 domain->iommu_did[iommu->seq_id], in domain_setup_first_level()
2601 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, in dmar_insert_one_dev_info() argument
2618 info->segment = iommu->segment; in dmar_insert_one_dev_info()
2632 info->iommu = iommu; in dmar_insert_one_dev_info()
2640 if (ecap_dev_iotlb_support(iommu->ecap) && in dmar_insert_one_dev_info()
2645 if (sm_supported(iommu)) { in dmar_insert_one_dev_info()
2646 if (pasid_supported(iommu)) { in dmar_insert_one_dev_info()
2652 if (info->ats_supported && ecap_prs(iommu->ecap) && in dmar_insert_one_dev_info()
2679 spin_lock(&iommu->lock); in dmar_insert_one_dev_info()
2680 ret = domain_attach_iommu(domain, iommu); in dmar_insert_one_dev_info()
2681 spin_unlock(&iommu->lock); in dmar_insert_one_dev_info()
2696 if (dev && dev_is_pci(dev) && sm_supported(iommu)) { in dmar_insert_one_dev_info()
2705 spin_lock_irqsave(&iommu->lock, flags); in dmar_insert_one_dev_info()
2707 ret = intel_pasid_setup_pass_through(iommu, domain, in dmar_insert_one_dev_info()
2710 ret = domain_setup_first_level(iommu, domain, dev, in dmar_insert_one_dev_info()
2713 ret = intel_pasid_setup_second_level(iommu, domain, in dmar_insert_one_dev_info()
2715 spin_unlock_irqrestore(&iommu->lock, flags); in dmar_insert_one_dev_info()
2808 struct intel_iommu *iommu; in domain_add_dev_info() local
2811 iommu = device_to_iommu(dev, &bus, &devfn); in domain_add_dev_info()
2812 if (!iommu) in domain_add_dev_info()
2815 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain); in domain_add_dev_info()
2881 * The second is use of the device through the IOMMU API. This interface
2885 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2887 * the IOMMU API, which eliminates them from device assignment.
2936 static void intel_iommu_init_qi(struct intel_iommu *iommu) in intel_iommu_init_qi() argument
2939 * Start from the sane iommu hardware state. in intel_iommu_init_qi()
2944 if (!iommu->qi) { in intel_iommu_init_qi()
2948 dmar_fault(-1, iommu); in intel_iommu_init_qi()
2953 dmar_disable_qi(iommu); in intel_iommu_init_qi()
2956 if (dmar_enable_qi(iommu)) { in intel_iommu_init_qi()
2960 iommu->flush.flush_context = __iommu_flush_context; in intel_iommu_init_qi()
2961 iommu->flush.flush_iotlb = __iommu_flush_iotlb; in intel_iommu_init_qi()
2963 iommu->name); in intel_iommu_init_qi()
2965 iommu->flush.flush_context = qi_flush_context; in intel_iommu_init_qi()
2966 iommu->flush.flush_iotlb = qi_flush_iotlb; in intel_iommu_init_qi()
2967 pr_info("%s: Using Queued invalidation\n", iommu->name); in intel_iommu_init_qi()
2971 static int copy_context_table(struct intel_iommu *iommu, in copy_context_table() argument
2993 __iommu_flush_cache(iommu, new_ce, in copy_context_table()
3023 new_ce = alloc_pgtable_page(iommu->node); in copy_context_table()
3037 if (did >= 0 && did < cap_ndoms(iommu->cap)) in copy_context_table()
3038 set_bit(did, iommu->domain_ids); in copy_context_table()
3064 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE); in copy_context_table()
3073 static int copy_translation_tables(struct intel_iommu *iommu) in copy_translation_tables() argument
3084 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG); in copy_translation_tables()
3086 new_ext = !!ecap_ecs(iommu->ecap); in copy_translation_tables()
3113 ret = copy_context_table(iommu, &old_rt[bus], in copy_translation_tables()
3117 iommu->name, bus); in copy_translation_tables()
3122 spin_lock_irqsave(&iommu->lock, flags); in copy_translation_tables()
3131 iommu->root_entry[bus].lo = val; in copy_translation_tables()
3138 iommu->root_entry[bus].hi = val; in copy_translation_tables()
3141 spin_unlock_irqrestore(&iommu->lock, flags); in copy_translation_tables()
3145 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE); in copy_translation_tables()
3158 struct intel_iommu *iommu = data; in intel_vcmd_ioasid_alloc() local
3161 if (!iommu) in intel_vcmd_ioasid_alloc()
3171 if (vcmd_alloc_pasid(iommu, &ioasid)) in intel_vcmd_ioasid_alloc()
3179 struct intel_iommu *iommu = data; in intel_vcmd_ioasid_free() local
3181 if (!iommu) in intel_vcmd_ioasid_free()
3191 vcmd_free_pasid(iommu, ioasid); in intel_vcmd_ioasid_free()
3194 static void register_pasid_allocator(struct intel_iommu *iommu) in register_pasid_allocator() argument
3200 if (!cap_caching_mode(iommu->cap)) in register_pasid_allocator()
3203 if (!sm_supported(iommu)) { in register_pasid_allocator()
3215 if (!vccap_pasid(iommu->vccap)) in register_pasid_allocator()
3219 iommu->pasid_allocator.alloc = intel_vcmd_ioasid_alloc; in register_pasid_allocator()
3220 iommu->pasid_allocator.free = intel_vcmd_ioasid_free; in register_pasid_allocator()
3221 iommu->pasid_allocator.pdata = (void *)iommu; in register_pasid_allocator()
3222 if (ioasid_register_allocator(&iommu->pasid_allocator)) { in register_pasid_allocator()
3225 * Disable scalable mode on this IOMMU if there in register_pasid_allocator()
3237 struct intel_iommu *iommu; in init_dmars() local
3259 /* Preallocate enough resources for IOMMU hot-addition */ in init_dmars()
3266 pr_err("Allocating global iommu array failed\n"); in init_dmars()
3271 for_each_iommu(iommu, drhd) { in init_dmars()
3273 iommu_disable_translation(iommu); in init_dmars()
3278 * Find the max pasid size of all IOMMU's in the system. in init_dmars()
3282 if (pasid_supported(iommu)) { in init_dmars()
3283 u32 temp = 2 << ecap_pss(iommu->ecap); in init_dmars()
3289 g_iommus[iommu->seq_id] = iommu; in init_dmars()
3291 intel_iommu_init_qi(iommu); in init_dmars()
3293 ret = iommu_init_domains(iommu); in init_dmars()
3297 init_translation_status(iommu); in init_dmars()
3299 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { in init_dmars()
3300 iommu_disable_translation(iommu); in init_dmars()
3301 clear_translation_pre_enabled(iommu); in init_dmars()
3303 iommu->name); in init_dmars()
3309 * among all IOMMU's. Need to Split it later. in init_dmars()
3311 ret = iommu_alloc_root_entry(iommu); in init_dmars()
3315 if (translation_pre_enabled(iommu)) { in init_dmars()
3318 ret = copy_translation_tables(iommu); in init_dmars()
3321 * We found the IOMMU with translation in init_dmars()
3330 iommu->name); in init_dmars()
3331 iommu_disable_translation(iommu); in init_dmars()
3332 clear_translation_pre_enabled(iommu); in init_dmars()
3335 iommu->name); in init_dmars()
3339 if (!ecap_pass_through(iommu->ecap)) in init_dmars()
3341 intel_svm_check(iommu); in init_dmars()
3349 for_each_active_iommu(iommu, drhd) { in init_dmars()
3350 iommu_flush_write_buffer(iommu); in init_dmars()
3352 register_pasid_allocator(iommu); in init_dmars()
3354 iommu_set_root_entry(iommu); in init_dmars()
3355 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in init_dmars()
3356 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in init_dmars()
3379 for_each_iommu(iommu, drhd) { in init_dmars()
3386 iommu_disable_protect_mem_regions(iommu); in init_dmars()
3390 iommu_flush_write_buffer(iommu); in init_dmars()
3393 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { in init_dmars()
3399 ret = intel_svm_enable_prq(iommu); in init_dmars()
3405 ret = dmar_set_interrupt(iommu); in init_dmars()
3413 for_each_active_iommu(iommu, drhd) { in init_dmars()
3414 disable_dmar_iommu(iommu); in init_dmars()
3415 free_dmar_iommu(iommu); in init_dmars()
3432 * Restrict dma_mask to the width that the iommu can handle. in intel_alloc_iova()
3479 struct intel_iommu *iommu; in __intel_map_single() local
3491 iommu = domain_get_iommu(domain); in __intel_map_single()
3503 !cap_zlr(iommu->cap)) in __intel_map_single()
3555 struct intel_iommu *iommu; in intel_unmap() local
3562 iommu = domain_get_iommu(domain); in intel_unmap()
3576 iommu_flush_iotlb_psi(iommu, domain, start_pfn, in intel_unmap()
3686 struct intel_iommu *iommu; in intel_map_sg() local
3697 iommu = domain_get_iommu(domain); in intel_map_sg()
3714 !cap_zlr(iommu->cap)) in intel_map_sg()
3781 struct intel_iommu *iommu; in bounce_map_single() local
3796 iommu = domain_get_iommu(domain); in bounce_map_single()
3797 if (WARN_ON(!iommu)) in bounce_map_single()
3811 !cap_zlr(iommu->cap)) in bounce_map_single()
4104 /* This IOMMU has *only* gfx devices. Either bypass it or in init_no_remapping_devices()
4116 struct intel_iommu *iommu = NULL; in init_iommu_hw() local
4118 for_each_active_iommu(iommu, drhd) in init_iommu_hw()
4119 if (iommu->qi) in init_iommu_hw()
4120 dmar_reenable_qi(iommu); in init_iommu_hw()
4122 for_each_iommu(iommu, drhd) { in init_iommu_hw()
4129 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
4133 iommu_flush_write_buffer(iommu); in init_iommu_hw()
4135 iommu_set_root_entry(iommu); in init_iommu_hw()
4137 iommu->flush.flush_context(iommu, 0, 0, 0, in init_iommu_hw()
4139 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in init_iommu_hw()
4140 iommu_enable_translation(iommu); in init_iommu_hw()
4141 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
4150 struct intel_iommu *iommu; in iommu_flush_all() local
4152 for_each_active_iommu(iommu, drhd) { in iommu_flush_all()
4153 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_flush_all()
4155 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in iommu_flush_all()
4163 struct intel_iommu *iommu = NULL; in iommu_suspend() local
4166 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
4167 iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32), in iommu_suspend()
4169 if (!iommu->iommu_state) in iommu_suspend()
4175 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
4176 iommu_disable_translation(iommu); in iommu_suspend()
4178 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_suspend()
4180 iommu->iommu_state[SR_DMAR_FECTL_REG] = in iommu_suspend()
4181 readl(iommu->reg + DMAR_FECTL_REG); in iommu_suspend()
4182 iommu->iommu_state[SR_DMAR_FEDATA_REG] = in iommu_suspend()
4183 readl(iommu->reg + DMAR_FEDATA_REG); in iommu_suspend()
4184 iommu->iommu_state[SR_DMAR_FEADDR_REG] = in iommu_suspend()
4185 readl(iommu->reg + DMAR_FEADDR_REG); in iommu_suspend()
4186 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = in iommu_suspend()
4187 readl(iommu->reg + DMAR_FEUADDR_REG); in iommu_suspend()
4189 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_suspend()
4194 for_each_active_iommu(iommu, drhd) in iommu_suspend()
4195 kfree(iommu->iommu_state); in iommu_suspend()
4203 struct intel_iommu *iommu = NULL; in iommu_resume() local
4208 panic("tboot: IOMMU setup failed, DMAR can not resume!\n"); in iommu_resume()
4210 WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); in iommu_resume()
4214 for_each_active_iommu(iommu, drhd) { in iommu_resume()
4216 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_resume()
4218 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], in iommu_resume()
4219 iommu->reg + DMAR_FECTL_REG); in iommu_resume()
4220 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], in iommu_resume()
4221 iommu->reg + DMAR_FEDATA_REG); in iommu_resume()
4222 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], in iommu_resume()
4223 iommu->reg + DMAR_FEADDR_REG); in iommu_resume()
4224 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], in iommu_resume()
4225 iommu->reg + DMAR_FEUADDR_REG); in iommu_resume()
4227 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_resume()
4230 for_each_active_iommu(iommu, drhd) in iommu_resume()
4231 kfree(iommu->iommu_state); in iommu_resume()
4405 struct intel_iommu *iommu = dmaru->iommu; in intel_iommu_add() local
4407 if (g_iommus[iommu->seq_id]) in intel_iommu_add()
4410 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) { in intel_iommu_add()
4412 iommu->name); in intel_iommu_add()
4415 if (!ecap_sc_support(iommu->ecap) && in intel_iommu_add()
4416 domain_update_iommu_snooping(iommu)) { in intel_iommu_add()
4418 iommu->name); in intel_iommu_add()
4421 sp = domain_update_iommu_superpage(NULL, iommu) - 1; in intel_iommu_add()
4422 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { in intel_iommu_add()
4424 iommu->name); in intel_iommu_add()
4431 if (iommu->gcmd & DMA_GCMD_TE) in intel_iommu_add()
4432 iommu_disable_translation(iommu); in intel_iommu_add()
4434 g_iommus[iommu->seq_id] = iommu; in intel_iommu_add()
4435 ret = iommu_init_domains(iommu); in intel_iommu_add()
4437 ret = iommu_alloc_root_entry(iommu); in intel_iommu_add()
4441 intel_svm_check(iommu); in intel_iommu_add()
4448 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
4452 intel_iommu_init_qi(iommu); in intel_iommu_add()
4453 iommu_flush_write_buffer(iommu); in intel_iommu_add()
4456 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { in intel_iommu_add()
4457 ret = intel_svm_enable_prq(iommu); in intel_iommu_add()
4462 ret = dmar_set_interrupt(iommu); in intel_iommu_add()
4466 iommu_set_root_entry(iommu); in intel_iommu_add()
4467 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in intel_iommu_add()
4468 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in intel_iommu_add()
4469 iommu_enable_translation(iommu); in intel_iommu_add()
4471 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
4475 disable_dmar_iommu(iommu); in intel_iommu_add()
4477 free_dmar_iommu(iommu); in intel_iommu_add()
4484 struct intel_iommu *iommu = dmaru->iommu; in dmar_iommu_hotplug() local
4488 if (iommu == NULL) in dmar_iommu_hotplug()
4494 disable_dmar_iommu(iommu); in dmar_iommu_hotplug()
4495 free_dmar_iommu(iommu); in dmar_iommu_hotplug()
4635 struct intel_iommu *iommu; in intel_iommu_memory_notifier() local
4642 for_each_active_iommu(iommu, drhd) in intel_iommu_memory_notifier()
4643 iommu_flush_iotlb_psi(iommu, si_domain, in intel_iommu_memory_notifier()
4665 struct intel_iommu *iommu = g_iommus[i]; in free_all_cpu_cached_iovas() local
4669 if (!iommu) in free_all_cpu_cached_iovas()
4672 for (did = 0; did < cap_ndoms(iommu->cap); did++) { in free_all_cpu_cached_iovas()
4673 domain = get_iommu_domain(iommu, (u16)did); in free_all_cpu_cached_iovas()
4691 struct intel_iommu *iommu = NULL; in intel_disable_iommus() local
4694 for_each_iommu(iommu, drhd) in intel_disable_iommus()
4695 iommu_disable_translation(iommu); in intel_disable_iommus()
4701 struct intel_iommu *iommu = NULL; in intel_iommu_shutdown() local
4709 for_each_iommu(iommu, drhd) in intel_iommu_shutdown()
4710 iommu_disable_protect_mem_regions(iommu); in intel_iommu_shutdown()
4722 return container_of(iommu_dev, struct intel_iommu, iommu); in dev_to_intel_iommu()
4729 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in intel_iommu_show_version() local
4730 u32 ver = readl(iommu->reg + DMAR_VER_REG); in intel_iommu_show_version()
4740 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in intel_iommu_show_address() local
4741 return sprintf(buf, "%llx\n", iommu->reg_phys); in intel_iommu_show_address()
4749 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in intel_iommu_show_cap() local
4750 return sprintf(buf, "%llx\n", iommu->cap); in intel_iommu_show_cap()
4758 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in intel_iommu_show_ecap() local
4759 return sprintf(buf, "%llx\n", iommu->ecap); in intel_iommu_show_ecap()
4767 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in intel_iommu_show_ndoms() local
4768 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap)); in intel_iommu_show_ndoms()
4776 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in intel_iommu_show_ndoms_used() local
4777 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids, in intel_iommu_show_ndoms_used()
4778 cap_ndoms(iommu->cap))); in intel_iommu_show_ndoms_used()
4793 .name = "intel-iommu",
4819 pr_info("Intel-IOMMU force enabled due to platform opt in\n"); in platform_optin_force_iommu()
4822 * If Intel-IOMMU is disabled by default, we will apply identity in platform_optin_force_iommu()
4838 struct intel_iommu *iommu __maybe_unused; in probe_acpi_namespace_devices()
4842 for_each_active_iommu(iommu, drhd) { in probe_acpi_namespace_devices()
4881 struct intel_iommu *iommu; in intel_iommu_init() local
4884 * Intel IOMMU is required for a TXT/tboot launch or platform in intel_iommu_init()
4892 panic("tboot: Failed to initialize iommu memory\n"); in intel_iommu_init()
4924 * We exit the function here to ensure IOMMU's remapping and in intel_iommu_init()
4925 * mempool aren't setup, which means that the IOMMU's PMRs in intel_iommu_init()
4932 for_each_iommu(iommu, drhd) in intel_iommu_init()
4933 iommu_disable_protect_mem_regions(iommu); in intel_iommu_init()
4953 panic("tboot: Failed to reserve iommu ranges\n"); in intel_iommu_init()
4974 for_each_active_iommu(iommu, drhd) { in intel_iommu_init()
4975 iommu_device_sysfs_add(&iommu->iommu, NULL, in intel_iommu_init()
4977 "%s", iommu->name); in intel_iommu_init()
4978 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops); in intel_iommu_init()
4979 iommu_device_register(&iommu->iommu); in intel_iommu_init()
4986 cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL, in intel_iommu_init()
4994 for_each_iommu(iommu, drhd) { in intel_iommu_init()
4995 if (!drhd->ignored && !translation_pre_enabled(iommu)) in intel_iommu_init()
4996 iommu_enable_translation(iommu); in intel_iommu_init()
4998 iommu_disable_protect_mem_regions(iommu); in intel_iommu_init()
5019 struct intel_iommu *iommu = opaque; in domain_context_clear_one_cb() local
5021 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff); in domain_context_clear_one_cb()
5026 * NB - intel-iommu lacks any sort of reference counting for the users of
5031 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev) in domain_context_clear() argument
5033 if (!iommu || !dev || !dev_is_pci(dev)) in domain_context_clear()
5036 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu); in domain_context_clear()
5042 struct intel_iommu *iommu; in __dmar_remove_one_dev_info() local
5050 iommu = info->iommu; in __dmar_remove_one_dev_info()
5054 if (dev_is_pci(info->dev) && sm_supported(iommu)) in __dmar_remove_one_dev_info()
5055 intel_pasid_tear_down_entry(iommu, info->dev, in __dmar_remove_one_dev_info()
5060 domain_context_clear(iommu, info->dev); in __dmar_remove_one_dev_info()
5066 spin_lock_irqsave(&iommu->lock, flags); in __dmar_remove_one_dev_info()
5067 domain_detach_iommu(domain, iommu); in __dmar_remove_one_dev_info()
5068 spin_unlock_irqrestore(&iommu->lock, flags); in __dmar_remove_one_dev_info()
5209 struct intel_iommu *iommu; in aux_domain_add_dev() local
5211 iommu = device_to_iommu(dev, NULL, NULL); in aux_domain_add_dev()
5212 if (!iommu) in aux_domain_add_dev()
5231 * iommu->lock must be held to attach domain to iommu and setup the in aux_domain_add_dev()
5234 spin_lock(&iommu->lock); in aux_domain_add_dev()
5235 ret = domain_attach_iommu(domain, iommu); in aux_domain_add_dev()
5241 ret = domain_setup_first_level(iommu, domain, dev, in aux_domain_add_dev()
5244 ret = intel_pasid_setup_second_level(iommu, domain, dev, in aux_domain_add_dev()
5248 spin_unlock(&iommu->lock); in aux_domain_add_dev()
5257 domain_detach_iommu(domain, iommu); in aux_domain_add_dev()
5259 spin_unlock(&iommu->lock); in aux_domain_add_dev()
5271 struct intel_iommu *iommu; in aux_domain_remove_dev() local
5279 iommu = info->iommu; in aux_domain_remove_dev()
5283 spin_lock(&iommu->lock); in aux_domain_remove_dev()
5284 intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid, false); in aux_domain_remove_dev()
5285 domain_detach_iommu(domain, iommu); in aux_domain_remove_dev()
5286 spin_unlock(&iommu->lock); in aux_domain_remove_dev()
5295 struct intel_iommu *iommu; in prepare_domain_attach_device() local
5298 iommu = device_to_iommu(dev, NULL, NULL); in prepare_domain_attach_device()
5299 if (!iommu) in prepare_domain_attach_device()
5302 /* check if this iommu agaw is sufficient for max mapped address */ in prepare_domain_attach_device()
5303 addr_width = agaw_to_width(iommu->agaw); in prepare_domain_attach_device()
5304 if (addr_width > cap_mgaw(iommu->cap)) in prepare_domain_attach_device()
5305 addr_width = cap_mgaw(iommu->cap); in prepare_domain_attach_device()
5308 dev_err(dev, "%s: iommu width (%d) is not " in prepare_domain_attach_device()
5318 while (iommu->agaw < dmar_domain->agaw) { in prepare_domain_attach_device()
5340 …dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Con… in intel_iommu_attach_device()
5391 * 2D array for converting and sanitizing IOMMU generic TLB granularity to
5402 * X: indexed by iommu cache type
5430 * IOMMU cache invalidate API passes granu_size in bytes, and number of in to_vtd_size()
5443 struct intel_iommu *iommu; in intel_iommu_sva_invalidate() local
5457 iommu = device_to_iommu(dev, &bus, &devfn); in intel_iommu_sva_invalidate()
5458 if (!iommu) in intel_iommu_sva_invalidate()
5465 spin_lock(&iommu->lock); in intel_iommu_sva_invalidate()
5471 did = dmar_domain->iommu_did[iommu->seq_id]; in intel_iommu_sva_invalidate()
5518 qi_flush_piotlb(iommu, did, pasid, in intel_iommu_sva_invalidate()
5548 qi_flush_dev_iotlb_pasid(iommu, sid, in intel_iommu_sva_invalidate()
5556 dev_err_ratelimited(dev, "Unsupported IOMMU invalidation type %d\n", in intel_iommu_sva_invalidate()
5562 spin_unlock(&iommu->lock); in intel_iommu_sva_invalidate()
5592 pr_err("%s: iommu width (%d) is not " in intel_iommu_map()
5663 struct intel_iommu *iommu; in scalable_mode_support() local
5667 for_each_active_iommu(iommu, drhd) { in scalable_mode_support()
5668 if (!sm_supported(iommu)) { in scalable_mode_support()
5681 struct intel_iommu *iommu; in iommu_pasid_support() local
5685 for_each_active_iommu(iommu, drhd) { in iommu_pasid_support()
5686 if (!pasid_supported(iommu)) { in iommu_pasid_support()
5699 struct intel_iommu *iommu; in nested_mode_support() local
5703 for_each_active_iommu(iommu, drhd) { in nested_mode_support()
5704 if (!sm_supported(iommu) || !ecap_nest(iommu->ecap)) { in nested_mode_support()
5726 struct intel_iommu *iommu; in intel_iommu_probe_device() local
5728 iommu = device_to_iommu(dev, NULL, NULL); in intel_iommu_probe_device()
5729 if (!iommu) in intel_iommu_probe_device()
5732 if (translation_pre_enabled(iommu)) in intel_iommu_probe_device()
5735 return &iommu->iommu; in intel_iommu_probe_device()
5740 struct intel_iommu *iommu; in intel_iommu_release_device() local
5742 iommu = device_to_iommu(dev, NULL, NULL); in intel_iommu_release_device()
5743 if (!iommu) in intel_iommu_release_device()
5821 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev) in intel_iommu_enable_pasid() argument
5835 spin_lock(&iommu->lock); in intel_iommu_enable_pasid()
5842 context = iommu_context_addr(iommu, info->bus, info->devfn, 0); in intel_iommu_enable_pasid()
5852 iommu->flush.flush_context(iommu, in intel_iommu_enable_pasid()
5853 domain->iommu_did[iommu->seq_id], in intel_iommu_enable_pasid()
5866 spin_unlock(&iommu->lock); in intel_iommu_enable_pasid()
5895 struct intel_iommu *iommu; in intel_iommu_enable_auxd() local
5899 iommu = device_to_iommu(dev, NULL, NULL); in intel_iommu_enable_auxd()
5900 if (!iommu || dmar_disabled) in intel_iommu_enable_auxd()
5903 if (!sm_supported(iommu) || !pasid_supported(iommu)) in intel_iommu_enable_auxd()
5906 ret = intel_iommu_enable_pasid(iommu, dev); in intel_iommu_enable_auxd()
5980 return info && (info->iommu->flags & VTD_FLAG_SVM_CAPABLE) && in intel_iommu_dev_has_feat()
6000 if (info->iommu->flags & VTD_FLAG_SVM_CAPABLE) in intel_iommu_dev_enable_feat()
6076 * thus not be able to bypass the IOMMU restrictions.
6082 "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n", in risky_device()
6133 pci_info(dev, "Disabling IOMMU for graphics on this chipset\n"); in quirk_iommu_igfx()
6214 pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); in quirk_calpella_no_shadow_gtt()
6243 pci_info(dev, "Skip IOMMU disabling for graphics\n"); in quirk_igfx_skip_te_disable()