1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2009-2010 PetaLogix
4 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
5 *
6 * Provide default implementations of the DMA mapping callbacks for
7 * directly mapped busses.
8 */
9
10 #include <linux/device.h>
11 #include <linux/dma-noncoherent.h>
12 #include <linux/gfp.h>
13 #include <linux/dma-debug.h>
14 #include <linux/export.h>
15 #include <linux/bug.h>
16 #include <asm/cacheflush.h>
17
__dma_sync(struct device * dev,phys_addr_t paddr,size_t size,enum dma_data_direction direction)18 static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
19 enum dma_data_direction direction)
20 {
21 switch (direction) {
22 case DMA_TO_DEVICE:
23 case DMA_BIDIRECTIONAL:
24 flush_dcache_range(paddr, paddr + size);
25 break;
26 case DMA_FROM_DEVICE:
27 invalidate_dcache_range(paddr, paddr + size);
28 break;
29 default:
30 BUG();
31 }
32 }
33
arch_sync_dma_for_device(struct device * dev,phys_addr_t paddr,size_t size,enum dma_data_direction dir)34 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
35 size_t size, enum dma_data_direction dir)
36 {
37 __dma_sync(dev, paddr, size, dir);
38 }
39
arch_sync_dma_for_cpu(struct device * dev,phys_addr_t paddr,size_t size,enum dma_data_direction dir)40 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
41 size_t size, enum dma_data_direction dir)
42 {
43 __dma_sync(dev, paddr, size, dir);
44 }
45
arch_dma_mmap(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t handle,size_t size,unsigned long attrs)46 int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
47 void *cpu_addr, dma_addr_t handle, size_t size,
48 unsigned long attrs)
49 {
50 #ifdef CONFIG_MMU
51 unsigned long user_count = vma_pages(vma);
52 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
53 unsigned long off = vma->vm_pgoff;
54 unsigned long pfn;
55
56 if (off >= count || user_count > (count - off))
57 return -ENXIO;
58
59 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
60 pfn = consistent_virt_to_pfn(cpu_addr);
61 return remap_pfn_range(vma, vma->vm_start, pfn + off,
62 vma->vm_end - vma->vm_start, vma->vm_page_prot);
63 #else
64 return -ENXIO;
65 #endif
66 }
67