Lines Matching +full:dma +full:- +full:coherent

1 // SPDX-License-Identifier: GPL-2.0
3 * arch-independent dma-mapping routines
10 #include <linux/dma-map-ops.h>
27 * Managed DMA API
40 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, in dmam_release()
41 this->attrs); in dmam_release()
48 if (this->vaddr == match->vaddr) { in dmam_match()
49 WARN_ON(this->size != match->size || in dmam_match()
50 this->dma_handle != match->dma_handle); in dmam_match()
57 * dmam_free_coherent - Managed dma_free_coherent()
58 * @dev: Device to free coherent memory for
61 * @dma_handle: DMA handle of the memory to free
76 * dmam_alloc_attrs - Managed dma_alloc_attrs()
79 * @dma_handle: Out argument for allocated DMA handle
105 dr->vaddr = vaddr; in dmam_alloc_attrs()
106 dr->dma_handle = *dma_handle; in dmam_alloc_attrs()
107 dr->size = size; in dmam_alloc_attrs()
108 dr->attrs = attrs; in dmam_alloc_attrs()
122 if (dev->dma_ops_bypass) in dma_go_direct()
123 return min_not_zero(mask, dev->bus_dma_limit) >= in dma_go_direct()
131 * Check if the devices uses a direct mapping for streaming DMA operations.
132 * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
138 return dma_go_direct(dev, dev->coherent_dma_mask, ops); in dma_alloc_direct()
144 return dma_go_direct(dev, *dev->dma_mask, ops); in dma_map_direct()
156 if (WARN_ON_ONCE(!dev->dma_mask)) in dma_map_page_attrs()
163 addr = ops->map_page(dev, page, offset, size, dir, attrs); in dma_map_page_attrs()
180 else if (ops->unmap_page) in dma_unmap_page_attrs()
181 ops->unmap_page(dev, addr, size, dir, attrs); in dma_unmap_page_attrs()
194 if (WARN_ON_ONCE(!dev->dma_mask)) in __dma_map_sg_attrs()
201 ents = ops->map_sg(dev, sg, nents, dir, attrs); in __dma_map_sg_attrs()
206 } else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM && in __dma_map_sg_attrs()
207 ents != -EIO && ents != -EREMOTEIO)) { in __dma_map_sg_attrs()
208 return -EIO; in __dma_map_sg_attrs()
215 * dma_map_sg_attrs - Map the given buffer for DMA
216 * @dev: The device for which to perform the DMA operation
219 * @dir: DMA direction
220 * @attrs: Optional DMA attributes for the map operation
223 * nents segments for the @dir DMA operation by the @dev device.
244 * dma_map_sgtable - Map the given buffer for DMA
245 * @dev: The device for which to perform the DMA operation
247 * @dir: DMA direction
248 * @attrs: Optional DMA attributes for the map operation
251 * object for the @dir DMA operation by the @dev device. After success, the
252 * ownership for the buffer is transferred to the DMA domain. One has to
260 * -EINVAL An invalid argument, unaligned access or other error
262 * -ENOMEM Insufficient resources (like memory or IOVA space) to
264 * -EIO Legacy error code with an unknown meaning. eg. this is
267 * -EREMOTEIO The DMA device cannot access P2PDMA memory specified
275 nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); in dma_map_sgtable()
278 sgt->nents = nents; in dma_map_sgtable()
294 else if (ops->unmap_sg) in dma_unmap_sg_attrs()
295 ops->unmap_sg(dev, sg, nents, dir, attrs); in dma_unmap_sg_attrs()
307 if (WARN_ON_ONCE(!dev->dma_mask)) in dma_map_resource()
312 else if (ops->map_resource) in dma_map_resource()
313 addr = ops->map_resource(dev, phys_addr, size, dir, attrs); in dma_map_resource()
326 if (!dma_map_direct(dev, ops) && ops->unmap_resource) in dma_unmap_resource()
327 ops->unmap_resource(dev, addr, size, dir, attrs); in dma_unmap_resource()
340 else if (ops->sync_single_for_cpu) in dma_sync_single_for_cpu()
341 ops->sync_single_for_cpu(dev, addr, size, dir); in dma_sync_single_for_cpu()
354 else if (ops->sync_single_for_device) in dma_sync_single_for_device()
355 ops->sync_single_for_device(dev, addr, size, dir); in dma_sync_single_for_device()
368 else if (ops->sync_sg_for_cpu) in dma_sync_sg_for_cpu()
369 ops->sync_sg_for_cpu(dev, sg, nelems, dir); in dma_sync_sg_for_cpu()
382 else if (ops->sync_sg_for_device) in dma_sync_sg_for_device()
383 ops->sync_sg_for_device(dev, sg, nelems, dir); in dma_sync_sg_for_device()
389 * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
391 * coherent DMA APIs through the dma_buf API, which only accepts a
393 * 1. Not all memory allocated via the coherent DMA APIs is backed by
395 * 2. Passing coherent DMA memory into the streaming APIs is not allowed
408 if (!ops->get_sgtable) in dma_get_sgtable_attrs()
409 return -ENXIO; in dma_get_sgtable_attrs()
410 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); in dma_get_sgtable_attrs()
432 * dma_can_mmap - check if a given device supports dma_mmap_*
436 * map DMA allocations to userspace.
444 return ops->mmap != NULL; in dma_can_mmap()
449 * dma_mmap_attrs - map a coherent DMA allocation into user space
450 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
452 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
453 * @dma_addr: device-view address returned from dma_alloc_attrs
457 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
458 * space. The coherent DMA buffer must not be freed by the driver until the
470 if (!ops->mmap) in dma_mmap_attrs()
471 return -ENXIO; in dma_mmap_attrs()
472 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); in dma_mmap_attrs()
482 if (ops->get_required_mask) in dma_get_required_mask()
483 return ops->get_required_mask(dev); in dma_get_required_mask()
486 * We require every DMA ops implementation to at least support a 32-bit in dma_get_required_mask()
487 * DMA mask (and use bounce buffering if that isn't supported in in dma_get_required_mask()
489 * actually report an optimal mask we default to 32-bit here as that in dma_get_required_mask()
503 WARN_ON_ONCE(!dev->coherent_dma_mask); in dma_alloc_attrs()
506 * DMA allocations can never be turned back into a page pointer, so in dma_alloc_attrs()
521 else if (ops->alloc) in dma_alloc_attrs()
522 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); in dma_alloc_attrs()
539 * On non-coherent platforms which implement DMA-coherent buffers via in dma_free_attrs()
540 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting in dma_free_attrs()
543 * probably misusing the coherent API anyway. in dma_free_attrs()
553 else if (ops->free) in dma_free_attrs()
554 ops->free(dev, size, cpu_addr, dma_handle, attrs); in dma_free_attrs()
563 if (WARN_ON_ONCE(!dev->coherent_dma_mask)) in __dma_alloc_pages()
573 if (!ops->alloc_pages) in __dma_alloc_pages()
575 return ops->alloc_pages(dev, size, dma_handle, dir, gfp); in __dma_alloc_pages()
597 else if (ops->free_pages) in __dma_free_pages()
598 ops->free_pages(dev, size, page, dma_handle, dir); in __dma_free_pages()
614 if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff) in dma_mmap_pages()
615 return -ENXIO; in dma_mmap_pages()
616 return remap_pfn_range(vma, vma->vm_start, in dma_mmap_pages()
617 page_to_pfn(page) + vma->vm_pgoff, in dma_mmap_pages()
618 vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot); in dma_mmap_pages()
633 page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp); in alloc_single_sgt()
636 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in alloc_single_sgt()
637 sg_dma_len(sgt->sgl) = sgt->sgl->length; in alloc_single_sgt()
657 if (ops && ops->alloc_noncontiguous) in dma_alloc_noncontiguous()
658 sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs); in dma_alloc_noncontiguous()
663 sgt->nents = 1; in dma_alloc_noncontiguous()
664 debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs); in dma_alloc_noncontiguous()
673 __dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address, in free_single_sgt()
684 debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); in dma_free_noncontiguous()
685 if (ops && ops->free_noncontiguous) in dma_free_noncontiguous()
686 ops->free_noncontiguous(dev, size, sgt, dir); in dma_free_noncontiguous()
698 if (ops && ops->alloc_noncontiguous) in dma_vmap_noncontiguous()
699 return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL); in dma_vmap_noncontiguous()
700 return page_address(sg_page(sgt->sgl)); in dma_vmap_noncontiguous()
708 if (ops && ops->alloc_noncontiguous) in dma_vunmap_noncontiguous()
718 if (ops && ops->alloc_noncontiguous) { in dma_mmap_noncontiguous()
721 if (vma->vm_pgoff >= count || in dma_mmap_noncontiguous()
722 vma_pages(vma) > count - vma->vm_pgoff) in dma_mmap_noncontiguous()
723 return -ENXIO; in dma_mmap_noncontiguous()
724 return vm_map_pages(vma, sgt_handle(sgt)->pages, count); in dma_mmap_noncontiguous()
726 return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl)); in dma_mmap_noncontiguous()
735 * ->dma_supported sets the bypass flag, so we must always call in dma_supported()
740 if (!ops->dma_supported) in dma_supported()
742 return ops->dma_supported(dev, mask); in dma_supported()
749 /* if ops is not set, dma direct will be used which supports P2PDMA */ in dma_pci_p2pdma_supported()
755 * not be used with dma mapping ops that do not have support even in dma_pci_p2pdma_supported()
759 return ops->flags & DMA_F_PCI_P2PDMA_SUPPORTED; in dma_pci_p2pdma_supported()
771 if (!dev->dma_mask || !dma_supported(dev, mask)) in dma_set_mask()
772 return -EIO; in dma_set_mask()
775 *dev->dma_mask = mask; in dma_set_mask()
789 return -EIO; in dma_set_coherent_mask()
791 dev->coherent_dma_mask = mask; in dma_set_coherent_mask()
803 else if (ops && ops->max_mapping_size) in dma_max_mapping_size()
804 size = ops->max_mapping_size(dev); in dma_max_mapping_size()
815 if (ops && ops->opt_mapping_size) in dma_opt_mapping_size()
816 size = ops->opt_mapping_size(); in dma_opt_mapping_size()
828 return ops->sync_single_for_cpu || ops->sync_single_for_device; in dma_need_sync()
836 if (!ops || !ops->get_merge_boundary) in dma_get_merge_boundary()
839 return ops->get_merge_boundary(dev); in dma_get_merge_boundary()