Lines Matching +full:dma +full:- +full:coherent
1 // SPDX-License-Identifier: GPL-2.0-only
3 * A fairly generic DMA-API to IOMMU-API glue layer.
5 * Copyright (C) 2014-2015 ARM Ltd.
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
13 #include <linux/dma-map-ops.h>
14 #include <linux/dma-iommu.h>
54 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) in cookie_msi_granule()
55 return cookie->iovad.granule; in cookie_msi_granule()
65 INIT_LIST_HEAD(&cookie->msi_page_list); in cookie_alloc()
66 cookie->type = type; in cookie_alloc()
72 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
73 * @domain: IOMMU domain to prepare for DMA-API usage
76 * callback when domain->type == IOMMU_DOMAIN_DMA.
80 if (domain->iova_cookie) in iommu_get_dma_cookie()
81 return -EEXIST; in iommu_get_dma_cookie()
83 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); in iommu_get_dma_cookie()
84 if (!domain->iova_cookie) in iommu_get_dma_cookie()
85 return -ENOMEM; in iommu_get_dma_cookie()
92 * iommu_get_msi_cookie - Acquire just MSI remapping resources
96 * Users who manage their own IOVA allocation and do not want DMA API support,
107 if (domain->type != IOMMU_DOMAIN_UNMANAGED) in iommu_get_msi_cookie()
108 return -EINVAL; in iommu_get_msi_cookie()
110 if (domain->iova_cookie) in iommu_get_msi_cookie()
111 return -EEXIST; in iommu_get_msi_cookie()
115 return -ENOMEM; in iommu_get_msi_cookie()
117 cookie->msi_iova = base; in iommu_get_msi_cookie()
118 domain->iova_cookie = cookie; in iommu_get_msi_cookie()
124 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
132 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_put_dma_cookie()
138 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) in iommu_put_dma_cookie()
139 put_iova_domain(&cookie->iovad); in iommu_put_dma_cookie()
141 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { in iommu_put_dma_cookie()
142 list_del(&msi->list); in iommu_put_dma_cookie()
146 domain->iova_cookie = NULL; in iommu_put_dma_cookie()
151 * iommu_dma_get_resv_regions - Reserved region driver helper
156 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
163 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) in iommu_dma_get_resv_regions()
172 struct iova_domain *iovad = &cookie->iovad; in cookie_init_hw_msi_region()
176 start -= iova_offset(iovad, start); in cookie_init_hw_msi_region()
177 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); in cookie_init_hw_msi_region()
182 return -ENOMEM; in cookie_init_hw_msi_region()
184 msi_page->phys = start; in cookie_init_hw_msi_region()
185 msi_page->iova = start; in cookie_init_hw_msi_region()
186 INIT_LIST_HEAD(&msi_page->list); in cookie_init_hw_msi_region()
187 list_add(&msi_page->list, &cookie->msi_page_list); in cookie_init_hw_msi_region()
188 start += iovad->granule; in cookie_init_hw_msi_region()
197 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); in iova_reserve_pci_windows()
202 resource_list_for_each_entry(window, &bridge->windows) { in iova_reserve_pci_windows()
203 if (resource_type(window->res) != IORESOURCE_MEM) in iova_reserve_pci_windows()
206 lo = iova_pfn(iovad, window->res->start - window->offset); in iova_reserve_pci_windows()
207 hi = iova_pfn(iovad, window->res->end - window->offset); in iova_reserve_pci_windows()
211 /* Get reserved DMA windows from host bridge */ in iova_reserve_pci_windows()
212 resource_list_for_each_entry(window, &bridge->dma_ranges) { in iova_reserve_pci_windows()
213 end = window->res->start - window->offset; in iova_reserve_pci_windows()
221 dev_err(&dev->dev, "Failed to reserve IOVA\n"); in iova_reserve_pci_windows()
222 return -EINVAL; in iova_reserve_pci_windows()
225 start = window->res->end - window->offset + 1; in iova_reserve_pci_windows()
227 if (window->node.next == &bridge->dma_ranges && in iova_reserve_pci_windows()
240 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iova_reserve_iommu_regions()
241 struct iova_domain *iovad = &cookie->iovad; in iova_reserve_iommu_regions()
257 if (region->type == IOMMU_RESV_SW_MSI) in iova_reserve_iommu_regions()
260 lo = iova_pfn(iovad, region->start); in iova_reserve_iommu_regions()
261 hi = iova_pfn(iovad, region->start + region->length - 1); in iova_reserve_iommu_regions()
264 if (region->type == IOMMU_RESV_MSI) in iova_reserve_iommu_regions()
265 ret = cookie_init_hw_msi_region(cookie, region->start, in iova_reserve_iommu_regions()
266 region->start + region->length); in iova_reserve_iommu_regions()
281 domain = cookie->fq_domain; in iommu_dma_flush_iotlb_all()
284 * implies that ops->flush_iotlb_all must be non-NULL. in iommu_dma_flush_iotlb_all()
286 domain->ops->flush_iotlb_all(domain); in iommu_dma_flush_iotlb_all()
290 * iommu_dma_init_domain - Initialise a DMA mapping domain
304 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_init_domain()
309 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) in iommu_dma_init_domain()
310 return -EINVAL; in iommu_dma_init_domain()
312 iovad = &cookie->iovad; in iommu_dma_init_domain()
315 order = __ffs(domain->pgsize_bitmap); in iommu_dma_init_domain()
319 if (domain->geometry.force_aperture) { in iommu_dma_init_domain()
320 if (base > domain->geometry.aperture_end || in iommu_dma_init_domain()
321 base + size <= domain->geometry.aperture_start) { in iommu_dma_init_domain()
322 pr_warn("specified DMA range outside IOMMU capability\n"); in iommu_dma_init_domain()
323 return -EFAULT; in iommu_dma_init_domain()
327 domain->geometry.aperture_start >> order); in iommu_dma_init_domain()
330 /* start_pfn is always nonzero for an already-initialised domain */ in iommu_dma_init_domain()
331 if (iovad->start_pfn) { in iommu_dma_init_domain()
332 if (1UL << order != iovad->granule || in iommu_dma_init_domain()
333 base_pfn != iovad->start_pfn) { in iommu_dma_init_domain()
334 pr_warn("Incompatible range for DMA domain\n"); in iommu_dma_init_domain()
335 return -EFAULT; in iommu_dma_init_domain()
343 if (!cookie->fq_domain && !iommu_domain_get_attr(domain, in iommu_dma_init_domain()
349 cookie->fq_domain = domain; in iommu_dma_init_domain()
361 const struct iommu_ops *ops = domain->ops; in iommu_dma_deferred_attach()
366 if (unlikely(ops->is_attach_deferred && in iommu_dma_deferred_attach()
367 ops->is_attach_deferred(domain, dev))) in iommu_dma_deferred_attach()
374 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
376 * @dir: Direction of DMA transfer
377 * @coherent: Is the DMA master cache-coherent?
378 * @attrs: DMA attributes for the mapping
382 static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, in dma_info_to_prot() argument
385 int prot = coherent ? IOMMU_CACHE : 0; in dma_info_to_prot()
405 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_alloc_iova()
406 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_alloc_iova()
409 if (cookie->type == IOMMU_DMA_MSI_COOKIE) { in iommu_dma_alloc_iova()
410 cookie->msi_iova += size; in iommu_dma_alloc_iova()
411 return cookie->msi_iova - size; in iommu_dma_alloc_iova()
417 * Freeing non-power-of-two-sized allocations back into the IOVA caches in iommu_dma_alloc_iova()
422 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) in iommu_dma_alloc_iova()
425 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); in iommu_dma_alloc_iova()
427 if (domain->geometry.force_aperture) in iommu_dma_alloc_iova()
428 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); in iommu_dma_alloc_iova()
445 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_free_iova()
448 if (cookie->type == IOMMU_DMA_MSI_COOKIE) in iommu_dma_free_iova()
449 cookie->msi_iova -= size; in iommu_dma_free_iova()
450 else if (cookie->fq_domain) /* non-strict mode */ in iommu_dma_free_iova()
462 struct iommu_dma_cookie *cookie = domain->iova_cookie; in __iommu_dma_unmap()
463 struct iova_domain *iovad = &cookie->iovad; in __iommu_dma_unmap()
468 dma_addr -= iova_off; in __iommu_dma_unmap()
475 if (!cookie->fq_domain) in __iommu_dma_unmap()
484 struct iommu_dma_cookie *cookie = domain->iova_cookie; in __iommu_dma_map()
485 struct iova_domain *iovad = &cookie->iovad; in __iommu_dma_map()
498 if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) { in __iommu_dma_map()
507 while (count--) in __iommu_dma_free_pages()
518 order_mask &= (2U << MAX_ORDER) - 1; in __iommu_dma_alloc_pages()
537 * Higher-order allocations are a convenience rather in __iommu_dma_alloc_pages()
539 * falling back to minimum-order allocations. in __iommu_dma_alloc_pages()
541 for (order_mask &= (2U << __fls(count)) - 1; in __iommu_dma_alloc_pages()
560 count -= order_size; in __iommu_dma_alloc_pages()
561 while (order_size--) in __iommu_dma_alloc_pages()
568 * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
572 * @dma_handle: Out argument for allocated DMA handle
575 * @attrs: DMA attributes for this allocation
587 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_alloc_remap()
588 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_alloc_remap()
589 bool coherent = dev_is_dma_coherent(dev); in iommu_dma_alloc_remap() local
590 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); in iommu_dma_alloc_remap()
591 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; in iommu_dma_alloc_remap()
602 min_size = alloc_sizes & -alloc_sizes; in iommu_dma_alloc_remap()
619 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); in iommu_dma_alloc_remap()
631 arch_dma_prep_coherent(sg_page(sg), sg->length); in iommu_dma_alloc_remap()
659 * __iommu_dma_mmap - Map a buffer into provided user VMA
708 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); in iommu_dma_sync_sg_for_cpu()
722 arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); in iommu_dma_sync_sg_for_device()
730 bool coherent = dev_is_dma_coherent(dev); in iommu_dma_map_page() local
731 int prot = dma_info_to_prot(dir, coherent, attrs); in iommu_dma_map_page()
735 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && in iommu_dma_map_page()
750 * Prepare a successfully-mapped scatterlist to give back to the caller.
768 unsigned int s_iova_len = s->length; in __finalise_sg()
770 s->offset += s_iova_off; in __finalise_sg()
771 s->length = s_length; in __finalise_sg()
776 * Now fill in the real DMA data. If... in __finalise_sg()
777 * - there is a valid output segment to append to in __finalise_sg()
778 * - and this segment starts on an IOVA page boundary in __finalise_sg()
779 * - but doesn't fall at a segment boundary in __finalise_sg()
780 * - and wouldn't make the resulting output segment too long in __finalise_sg()
783 (max_len - cur_len >= s_length)) { in __finalise_sg()
807 * but making sure the DMA fields are invalidated.
816 s->offset += sg_dma_address(s); in __invalidate_sg()
818 s->length = sg_dma_len(s); in __invalidate_sg()
825 * The DMA API client is passing in a scatterlist which could describe
828 * impedance-matching, to be able to hand off a suitably-aligned list,
835 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_map_sg()
836 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_map_sg()
853 * trickery we can modify the list in-place, but reversibly, by in iommu_dma_map_sg()
854 * stashing the unaligned parts in the as-yet-unused DMA fields. in iommu_dma_map_sg()
857 size_t s_iova_off = iova_offset(iovad, s->offset); in iommu_dma_map_sg()
858 size_t s_length = s->length; in iommu_dma_map_sg()
859 size_t pad_len = (mask - iova_len + 1) & mask; in iommu_dma_map_sg()
863 s->offset -= s_iova_off; in iommu_dma_map_sg()
865 s->length = s_length; in iommu_dma_map_sg()
870 * - If mask size >= IOVA size, then the IOVA range cannot in iommu_dma_map_sg()
872 * - If mask size < IOVA size, then the IOVA range must start in iommu_dma_map_sg()
876 * - The mask must be a power of 2, so pad_len == 0 if in iommu_dma_map_sg()
880 if (pad_len && pad_len < s_length - 1) { in iommu_dma_map_sg()
881 prev->length += pad_len; in iommu_dma_map_sg()
895 * implementation - it knows better than we do. in iommu_dma_map_sg()
924 for_each_sg(sg_next(sg), tmp, nents - 1, i) { in iommu_dma_unmap_sg()
930 __iommu_dma_unmap(dev, start, end - start); in iommu_dma_unmap_sg()
953 /* Non-coherent atomic allocation? Easy */ in __iommu_dma_free()
960 * If it the address is remapped, then it's either non-coherent in __iommu_dma_free()
968 /* Lowmem means a coherent atomic or CMA allocation */ in __iommu_dma_free()
988 bool coherent = dev_is_dma_coherent(dev); in iommu_dma_alloc_pages() local
1000 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { in iommu_dma_alloc_pages()
1008 if (!coherent) in iommu_dma_alloc_pages()
1025 bool coherent = dev_is_dma_coherent(dev); in iommu_dma_alloc() local
1026 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); in iommu_dma_alloc()
1039 !gfpflags_allow_blocking(gfp) && !coherent) in iommu_dma_alloc()
1048 dev->coherent_dma_mask); in iommu_dma_alloc()
1090 unsigned long pfn, off = vma->vm_pgoff; in iommu_dma_mmap()
1093 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); in iommu_dma_mmap()
1098 if (off >= nr_pages || vma_pages(vma) > nr_pages - off) in iommu_dma_mmap()
1099 return -ENXIO; in iommu_dma_mmap()
1111 return remap_pfn_range(vma, vma->vm_start, pfn + off, in iommu_dma_mmap()
1112 vma->vm_end - vma->vm_start, in iommu_dma_mmap()
1113 vma->vm_page_prot); in iommu_dma_mmap()
1139 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in iommu_dma_get_sgtable()
1147 return (1UL << __ffs(domain->pgsize_bitmap)) - 1; in iommu_dma_get_merge_boundary()
1173 * The IOMMU core code allocates the default DMA domain, which the underlying
1174 * IOMMU driver needs to support via the dma-iommu layer.
1184 * The IOMMU core code allocates the default DMA domain, which the in iommu_setup_dma_ops()
1185 * underlying IOMMU driver needs to support via the dma-iommu layer. in iommu_setup_dma_ops()
1187 if (domain->type == IOMMU_DOMAIN_DMA) { in iommu_setup_dma_ops()
1190 dev->dma_ops = &iommu_dma_ops; in iommu_setup_dma_ops()
1195 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", in iommu_setup_dma_ops()
1202 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_get_msi_page()
1208 msi_addr &= ~(phys_addr_t)(size - 1); in iommu_dma_get_msi_page()
1209 list_for_each_entry(msi_page, &cookie->msi_page_list, list) in iommu_dma_get_msi_page()
1210 if (msi_page->phys == msi_addr) in iommu_dma_get_msi_page()
1224 INIT_LIST_HEAD(&msi_page->list); in iommu_dma_get_msi_page()
1225 msi_page->phys = msi_addr; in iommu_dma_get_msi_page()
1226 msi_page->iova = iova; in iommu_dma_get_msi_page()
1227 list_add(&msi_page->list, &cookie->msi_page_list); in iommu_dma_get_msi_page()
1244 if (!domain || !domain->iova_cookie) { in iommu_dma_prepare_msi()
1245 desc->iommu_cookie = NULL; in iommu_dma_prepare_msi()
1261 return -ENOMEM; in iommu_dma_prepare_msi()
1274 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) in iommu_dma_compose_msi_msg()
1277 msg->address_hi = upper_32_bits(msi_page->iova); in iommu_dma_compose_msi_msg()
1278 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; in iommu_dma_compose_msi_msg()
1279 msg->address_lo += lower_32_bits(msi_page->iova); in iommu_dma_compose_msi_msg()