Lines Matching +full:dma +full:- +full:coherent

1 // SPDX-License-Identifier: GPL-2.0-only
3 * A fairly generic DMA-API to IOMMU-API glue layer.
5 * Copyright (C) 2014-2015 ARM Ltd.
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
13 #include <linux/dma-map-ops.h>
14 #include <linux/dma-iommu.h>
27 #include <linux/dma-direct.h>
74 freelist = freelist->freelist; in iommu_dma_entry_dtor()
81 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) in cookie_msi_granule()
82 return cookie->iovad.granule; in cookie_msi_granule()
92 INIT_LIST_HEAD(&cookie->msi_page_list); in cookie_alloc()
93 cookie->type = type; in cookie_alloc()
99 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
100 * @domain: IOMMU domain to prepare for DMA-API usage
103 * callback when domain->type == IOMMU_DOMAIN_DMA.
107 if (domain->iova_cookie) in iommu_get_dma_cookie()
108 return -EEXIST; in iommu_get_dma_cookie()
110 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); in iommu_get_dma_cookie()
111 if (!domain->iova_cookie) in iommu_get_dma_cookie()
112 return -ENOMEM; in iommu_get_dma_cookie()
119 * iommu_get_msi_cookie - Acquire just MSI remapping resources
123 * Users who manage their own IOVA allocation and do not want DMA API support,
134 if (domain->type != IOMMU_DOMAIN_UNMANAGED) in iommu_get_msi_cookie()
135 return -EINVAL; in iommu_get_msi_cookie()
137 if (domain->iova_cookie) in iommu_get_msi_cookie()
138 return -EEXIST; in iommu_get_msi_cookie()
142 return -ENOMEM; in iommu_get_msi_cookie()
144 cookie->msi_iova = base; in iommu_get_msi_cookie()
145 domain->iova_cookie = cookie; in iommu_get_msi_cookie()
151 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
159 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_put_dma_cookie()
165 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) in iommu_put_dma_cookie()
166 put_iova_domain(&cookie->iovad); in iommu_put_dma_cookie()
168 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { in iommu_put_dma_cookie()
169 list_del(&msi->list); in iommu_put_dma_cookie()
173 domain->iova_cookie = NULL; in iommu_put_dma_cookie()
178 * iommu_dma_get_resv_regions - Reserved region driver helper
183 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
190 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) in iommu_dma_get_resv_regions()
199 struct iova_domain *iovad = &cookie->iovad; in cookie_init_hw_msi_region()
203 start -= iova_offset(iovad, start); in cookie_init_hw_msi_region()
204 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); in cookie_init_hw_msi_region()
209 return -ENOMEM; in cookie_init_hw_msi_region()
211 msi_page->phys = start; in cookie_init_hw_msi_region()
212 msi_page->iova = start; in cookie_init_hw_msi_region()
213 INIT_LIST_HEAD(&msi_page->list); in cookie_init_hw_msi_region()
214 list_add(&msi_page->list, &cookie->msi_page_list); in cookie_init_hw_msi_region()
215 start += iovad->granule; in cookie_init_hw_msi_region()
224 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); in iova_reserve_pci_windows()
229 resource_list_for_each_entry(window, &bridge->windows) { in iova_reserve_pci_windows()
230 if (resource_type(window->res) != IORESOURCE_MEM) in iova_reserve_pci_windows()
233 lo = iova_pfn(iovad, window->res->start - window->offset); in iova_reserve_pci_windows()
234 hi = iova_pfn(iovad, window->res->end - window->offset); in iova_reserve_pci_windows()
238 /* Get reserved DMA windows from host bridge */ in iova_reserve_pci_windows()
239 resource_list_for_each_entry(window, &bridge->dma_ranges) { in iova_reserve_pci_windows()
240 end = window->res->start - window->offset; in iova_reserve_pci_windows()
248 dev_err(&dev->dev, in iova_reserve_pci_windows()
249 "Failed to reserve IOVA [%pa-%pa]\n", in iova_reserve_pci_windows()
251 return -EINVAL; in iova_reserve_pci_windows()
254 start = window->res->end - window->offset + 1; in iova_reserve_pci_windows()
256 if (window->node.next == &bridge->dma_ranges && in iova_reserve_pci_windows()
269 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iova_reserve_iommu_regions()
270 struct iova_domain *iovad = &cookie->iovad; in iova_reserve_iommu_regions()
286 if (region->type == IOMMU_RESV_SW_MSI) in iova_reserve_iommu_regions()
289 lo = iova_pfn(iovad, region->start); in iova_reserve_iommu_regions()
290 hi = iova_pfn(iovad, region->start + region->length - 1); in iova_reserve_iommu_regions()
293 if (region->type == IOMMU_RESV_MSI) in iova_reserve_iommu_regions()
294 ret = cookie_init_hw_msi_region(cookie, region->start, in iova_reserve_iommu_regions()
295 region->start + region->length); in iova_reserve_iommu_regions()
310 domain = cookie->fq_domain; in iommu_dma_flush_iotlb_all()
312 domain->ops->flush_iotlb_all(domain); in iommu_dma_flush_iotlb_all()
317 return dev_is_pci(dev) && to_pci_dev(dev)->untrusted; in dev_is_untrusted()
323 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_init_fq()
326 if (cookie->fq_domain) in iommu_dma_init_fq()
329 ret = init_iova_flush_queue(&cookie->iovad, iommu_dma_flush_iotlb_all, in iommu_dma_init_fq()
336 * Prevent incomplete iovad->fq being observable. Pairs with path from in iommu_dma_init_fq()
340 WRITE_ONCE(cookie->fq_domain, domain); in iommu_dma_init_fq()
345 * iommu_dma_init_domain - Initialise a DMA mapping domain
359 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_init_domain()
363 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) in iommu_dma_init_domain()
364 return -EINVAL; in iommu_dma_init_domain()
366 iovad = &cookie->iovad; in iommu_dma_init_domain()
369 order = __ffs(domain->pgsize_bitmap); in iommu_dma_init_domain()
373 if (domain->geometry.force_aperture) { in iommu_dma_init_domain()
374 if (base > domain->geometry.aperture_end || in iommu_dma_init_domain()
375 limit < domain->geometry.aperture_start) { in iommu_dma_init_domain()
376 pr_warn("specified DMA range outside IOMMU capability\n"); in iommu_dma_init_domain()
377 return -EFAULT; in iommu_dma_init_domain()
381 domain->geometry.aperture_start >> order); in iommu_dma_init_domain()
384 /* start_pfn is always nonzero for an already-initialised domain */ in iommu_dma_init_domain()
385 if (iovad->start_pfn) { in iommu_dma_init_domain()
386 if (1UL << order != iovad->granule || in iommu_dma_init_domain()
387 base_pfn != iovad->start_pfn) { in iommu_dma_init_domain()
388 pr_warn("Incompatible range for DMA domain\n"); in iommu_dma_init_domain()
389 return -EFAULT; in iommu_dma_init_domain()
398 if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain)) in iommu_dma_init_domain()
399 domain->type = IOMMU_DOMAIN_DMA; in iommu_dma_init_domain()
405 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
407 * @dir: Direction of DMA transfer
408 * @coherent: Is the DMA master cache-coherent?
409 * @attrs: DMA attributes for the mapping
413 static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, in dma_info_to_prot() argument
416 int prot = coherent ? IOMMU_CACHE : 0; in dma_info_to_prot()
436 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_alloc_iova()
437 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_alloc_iova()
440 if (cookie->type == IOMMU_DMA_MSI_COOKIE) { in iommu_dma_alloc_iova()
441 cookie->msi_iova += size; in iommu_dma_alloc_iova()
442 return cookie->msi_iova - size; in iommu_dma_alloc_iova()
448 * Freeing non-power-of-two-sized allocations back into the IOVA caches in iommu_dma_alloc_iova()
453 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) in iommu_dma_alloc_iova()
456 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); in iommu_dma_alloc_iova()
458 if (domain->geometry.force_aperture) in iommu_dma_alloc_iova()
459 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); in iommu_dma_alloc_iova()
476 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_free_iova()
479 if (cookie->type == IOMMU_DMA_MSI_COOKIE) in iommu_dma_free_iova()
480 cookie->msi_iova -= size; in iommu_dma_free_iova()
481 else if (gather && gather->queued) in iommu_dma_free_iova()
484 (unsigned long)gather->freelist); in iommu_dma_free_iova()
494 struct iommu_dma_cookie *cookie = domain->iova_cookie; in __iommu_dma_unmap()
495 struct iova_domain *iovad = &cookie->iovad; in __iommu_dma_unmap()
500 dma_addr -= iova_off; in __iommu_dma_unmap()
503 iotlb_gather.queued = READ_ONCE(cookie->fq_domain); in __iommu_dma_unmap()
534 struct iommu_dma_cookie *cookie = domain->iova_cookie; in __iommu_dma_map()
535 struct iova_domain *iovad = &cookie->iovad; in __iommu_dma_map()
549 if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) { in __iommu_dma_map()
557 size_t org_size, dma_addr_t dma_mask, bool coherent, in __iommu_dma_map_swiotlb() argument
560 int prot = dma_info_to_prot(dir, coherent, attrs); in __iommu_dma_map_swiotlb()
562 struct iommu_dma_cookie *cookie = domain->iova_cookie; in __iommu_dma_map_swiotlb()
563 struct iova_domain *iovad = &cookie->iovad; in __iommu_dma_map_swiotlb()
590 padding_size -= org_size; in __iommu_dma_map_swiotlb()
604 while (count--) in __iommu_dma_free_pages()
615 order_mask &= (2U << MAX_ORDER) - 1; in __iommu_dma_alloc_pages()
634 * Higher-order allocations are a convenience rather in __iommu_dma_alloc_pages()
636 * falling back to minimum-order allocations. in __iommu_dma_alloc_pages()
638 for (order_mask &= (2U << __fls(count)) - 1; in __iommu_dma_alloc_pages()
657 count -= order_size; in __iommu_dma_alloc_pages()
658 while (order_size--) in __iommu_dma_alloc_pages()
673 struct iommu_dma_cookie *cookie = domain->iova_cookie; in __iommu_dma_alloc_noncontiguous()
674 struct iova_domain *iovad = &cookie->iovad; in __iommu_dma_alloc_noncontiguous()
675 bool coherent = dev_is_dma_coherent(dev); in __iommu_dma_alloc_noncontiguous() local
676 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); in __iommu_dma_alloc_noncontiguous()
677 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; in __iommu_dma_alloc_noncontiguous()
685 min_size = alloc_sizes & -alloc_sizes; in __iommu_dma_alloc_noncontiguous()
702 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); in __iommu_dma_alloc_noncontiguous()
713 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) in __iommu_dma_alloc_noncontiguous()
714 arch_dma_prep_coherent(sg_page(sg), sg->length); in __iommu_dma_alloc_noncontiguous()
717 if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot) in __iommu_dma_alloc_noncontiguous()
721 sgt->sgl->dma_address = iova; in __iommu_dma_alloc_noncontiguous()
722 sgt->sgl->dma_length = size; in __iommu_dma_alloc_noncontiguous()
746 *dma_handle = sgt.sgl->dma_address; in iommu_dma_alloc_remap()
771 sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp, in iommu_dma_alloc_noncontiguous()
773 if (!sh->pages) { in iommu_dma_alloc_noncontiguous()
777 return &sh->sgt; in iommu_dma_alloc_noncontiguous()
785 __iommu_dma_unmap(dev, sgt->sgl->dma_address, size); in iommu_dma_free_noncontiguous()
786 __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT); in iommu_dma_free_noncontiguous()
787 sg_free_table(&sh->sgt); in iommu_dma_free_noncontiguous()
836 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); in iommu_dma_sync_sg_for_cpu()
840 sg->length, dir); in iommu_dma_sync_sg_for_cpu()
857 sg->length, dir); in iommu_dma_sync_sg_for_device()
860 arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); in iommu_dma_sync_sg_for_device()
869 bool coherent = dev_is_dma_coherent(dev); in iommu_dma_map_page() local
873 coherent, dir, attrs); in iommu_dma_map_page()
874 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && in iommu_dma_map_page()
889 * Prepare a successfully-mapped scatterlist to give back to the caller.
907 unsigned int s_iova_len = s->length; in __finalise_sg()
909 s->offset += s_iova_off; in __finalise_sg()
910 s->length = s_length; in __finalise_sg()
915 * Now fill in the real DMA data. If... in __finalise_sg()
916 * - there is a valid output segment to append to in __finalise_sg()
917 * - and this segment starts on an IOVA page boundary in __finalise_sg()
918 * - but doesn't fall at a segment boundary in __finalise_sg()
919 * - and wouldn't make the resulting output segment too long in __finalise_sg()
922 (max_len - cur_len >= s_length)) { in __finalise_sg()
946 * but making sure the DMA fields are invalidated.
955 s->offset += sg_dma_address(s); in __invalidate_sg()
957 s->length = sg_dma_len(s); in __invalidate_sg()
982 s->length, dma_get_mask(dev), in iommu_dma_map_sg_swiotlb()
986 sg_dma_len(s) = s->length; in iommu_dma_map_sg_swiotlb()
993 return -EIO; in iommu_dma_map_sg_swiotlb()
997 * The DMA API client is passing in a scatterlist which could describe
1000 * impedance-matching, to be able to hand off a suitably-aligned list,
1007 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_map_sg()
1008 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_map_sg()
1031 * trickery we can modify the list in-place, but reversibly, by in iommu_dma_map_sg()
1032 * stashing the unaligned parts in the as-yet-unused DMA fields. in iommu_dma_map_sg()
1035 size_t s_iova_off = iova_offset(iovad, s->offset); in iommu_dma_map_sg()
1036 size_t s_length = s->length; in iommu_dma_map_sg()
1037 size_t pad_len = (mask - iova_len + 1) & mask; in iommu_dma_map_sg()
1041 s->offset -= s_iova_off; in iommu_dma_map_sg()
1043 s->length = s_length; in iommu_dma_map_sg()
1048 * - If mask size >= IOVA size, then the IOVA range cannot in iommu_dma_map_sg()
1050 * - If mask size < IOVA size, then the IOVA range must start in iommu_dma_map_sg()
1054 * - The mask must be a power of 2, so pad_len == 0 if in iommu_dma_map_sg()
1058 if (pad_len && pad_len < s_length - 1) { in iommu_dma_map_sg()
1059 prev->length += pad_len; in iommu_dma_map_sg()
1069 ret = -ENOMEM; in iommu_dma_map_sg()
1075 * implementation - it knows better than we do. in iommu_dma_map_sg()
1088 if (ret != -ENOMEM) in iommu_dma_map_sg()
1089 return -EINVAL; in iommu_dma_map_sg()
1113 for_each_sg(sg_next(sg), tmp, nents - 1, i) { in iommu_dma_unmap_sg()
1119 __iommu_dma_unmap(dev, start, end - start); in iommu_dma_unmap_sg()
1142 /* Non-coherent atomic allocation? Easy */ in __iommu_dma_free()
1149 * If it the address is remapped, then it's either non-coherent in __iommu_dma_free()
1157 /* Lowmem means a coherent atomic or CMA allocation */ in __iommu_dma_free()
1177 bool coherent = dev_is_dma_coherent(dev); in iommu_dma_alloc_pages() local
1189 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { in iommu_dma_alloc_pages()
1197 if (!coherent) in iommu_dma_alloc_pages()
1214 bool coherent = dev_is_dma_coherent(dev); in iommu_dma_alloc() local
1215 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); in iommu_dma_alloc()
1228 !gfpflags_allow_blocking(gfp) && !coherent) in iommu_dma_alloc()
1237 dev->coherent_dma_mask); in iommu_dma_alloc()
1251 unsigned long pfn, off = vma->vm_pgoff; in iommu_dma_mmap()
1254 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); in iommu_dma_mmap()
1259 if (off >= nr_pages || vma_pages(vma) > nr_pages - off) in iommu_dma_mmap()
1260 return -ENXIO; in iommu_dma_mmap()
1272 return remap_pfn_range(vma, vma->vm_start, pfn + off, in iommu_dma_mmap()
1273 vma->vm_end - vma->vm_start, in iommu_dma_mmap()
1274 vma->vm_page_prot); in iommu_dma_mmap()
1300 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in iommu_dma_get_sgtable()
1308 return (1UL << __ffs(domain->pgsize_bitmap)) - 1; in iommu_dma_get_merge_boundary()
1336 * The IOMMU core code allocates the default DMA domain, which the underlying
1337 * IOMMU driver needs to support via the dma-iommu layer.
1347 * The IOMMU core code allocates the default DMA domain, which the in iommu_setup_dma_ops()
1348 * underlying IOMMU driver needs to support via the dma-iommu layer. in iommu_setup_dma_ops()
1353 dev->dma_ops = &iommu_dma_ops; in iommu_setup_dma_ops()
1358 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", in iommu_setup_dma_ops()
1366 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_get_msi_page()
1372 msi_addr &= ~(phys_addr_t)(size - 1); in iommu_dma_get_msi_page()
1373 list_for_each_entry(msi_page, &cookie->msi_page_list, list) in iommu_dma_get_msi_page()
1374 if (msi_page->phys == msi_addr) in iommu_dma_get_msi_page()
1388 INIT_LIST_HEAD(&msi_page->list); in iommu_dma_get_msi_page()
1389 msi_page->phys = msi_addr; in iommu_dma_get_msi_page()
1390 msi_page->iova = iova; in iommu_dma_get_msi_page()
1391 list_add(&msi_page->list, &cookie->msi_page_list); in iommu_dma_get_msi_page()
1408 if (!domain || !domain->iova_cookie) { in iommu_dma_prepare_msi()
1409 desc->iommu_cookie = NULL; in iommu_dma_prepare_msi()
1425 return -ENOMEM; in iommu_dma_prepare_msi()
1438 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) in iommu_dma_compose_msi_msg()
1441 msg->address_hi = upper_32_bits(msi_page->iova); in iommu_dma_compose_msi_msg()
1442 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; in iommu_dma_compose_msi_msg()
1443 msg->address_lo += lower_32_bits(msi_page->iova); in iommu_dma_compose_msi_msg()