1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 #include <linux/dma-noncoherent.h>
10 #include <asm/cache.h>
11 #include <asm/cacheflush.h>
12 
13 /*
14  * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c)
15  *  - hardware IOC not available (or "dma-coherent" not set for device in DT)
16  *  - But still handle both coherent and non-coherent requests from caller
17  *
18  * For DMA coherent hardware (IOC) generic code suffices
19  */
arch_dma_alloc(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)20 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
21 		gfp_t gfp, unsigned long attrs)
22 {
23 	unsigned long order = get_order(size);
24 	struct page *page;
25 	phys_addr_t paddr;
26 	void *kvaddr;
27 	bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT);
28 
29 	/*
30 	 * __GFP_HIGHMEM flag is cleared by upper layer functions
31 	 * (in include/linux/dma-mapping.h) so we should never get a
32 	 * __GFP_HIGHMEM here.
33 	 */
34 	BUG_ON(gfp & __GFP_HIGHMEM);
35 
36 	page = alloc_pages(gfp, order);
37 	if (!page)
38 		return NULL;
39 
40 	/* This is linear addr (0x8000_0000 based) */
41 	paddr = page_to_phys(page);
42 
43 	*dma_handle = paddr;
44 
45 	/*
46 	 * A coherent buffer needs MMU mapping to enforce non-cachability.
47 	 * kvaddr is kernel Virtual address (0x7000_0000 based).
48 	 */
49 	if (need_coh) {
50 		kvaddr = ioremap_nocache(paddr, size);
51 		if (kvaddr == NULL) {
52 			__free_pages(page, order);
53 			return NULL;
54 		}
55 	} else {
56 		kvaddr = (void *)(u32)paddr;
57 	}
58 
59 	/*
60 	 * Evict any existing L1 and/or L2 lines for the backing page
61 	 * in case it was used earlier as a normal "cached" page.
62 	 * Yeah this bit us - STAR 9000898266
63 	 *
64 	 * Although core does call flush_cache_vmap(), it gets kvaddr hence
65 	 * can't be used to efficiently flush L1 and/or L2 which need paddr
66 	 * Currently flush_cache_vmap nukes the L1 cache completely which
67 	 * will be optimized as a separate commit
68 	 */
69 	if (need_coh)
70 		dma_cache_wback_inv(paddr, size);
71 
72 	return kvaddr;
73 }
74 
arch_dma_free(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle,unsigned long attrs)75 void arch_dma_free(struct device *dev, size_t size, void *vaddr,
76 		dma_addr_t dma_handle, unsigned long attrs)
77 {
78 	phys_addr_t paddr = dma_handle;
79 	struct page *page = virt_to_page(paddr);
80 
81 	if (!(attrs & DMA_ATTR_NON_CONSISTENT))
82 		iounmap((void __force __iomem *)vaddr);
83 
84 	__free_pages(page, get_order(size));
85 }
86 
arch_dma_mmap(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)87 int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
88 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
89 		unsigned long attrs)
90 {
91 	unsigned long user_count = vma_pages(vma);
92 	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
93 	unsigned long pfn = __phys_to_pfn(dma_addr);
94 	unsigned long off = vma->vm_pgoff;
95 	int ret = -ENXIO;
96 
97 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
98 
99 	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
100 		return ret;
101 
102 	if (off < count && user_count <= (count - off)) {
103 		ret = remap_pfn_range(vma, vma->vm_start,
104 				      pfn + off,
105 				      user_count << PAGE_SHIFT,
106 				      vma->vm_page_prot);
107 	}
108 
109 	return ret;
110 }
111 
112 /*
113  * Cache operations depending on function and direction argument, inspired by
114  * https://lkml.org/lkml/2018/5/18/979
115  * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
116  * dma-mapping: provide a generic dma-noncoherent implementation)"
117  *
118  *          |   map          ==  for_device     |   unmap     ==  for_cpu
119  *          |----------------------------------------------------------------
120  * TO_DEV   |   writeback        writeback      |   none          none
121  * FROM_DEV |   invalidate       invalidate     |   invalidate*   invalidate*
122  * BIDIR    |   writeback+inv    writeback+inv  |   invalidate    invalidate
123  *
124  *     [*] needed for CPU speculative prefetches
125  *
126  * NOTE: we don't check the validity of direction argument as it is done in
127  * upper layer functions (in include/linux/dma-mapping.h)
128  */
129 
arch_sync_dma_for_device(struct device * dev,phys_addr_t paddr,size_t size,enum dma_data_direction dir)130 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
131 		size_t size, enum dma_data_direction dir)
132 {
133 	switch (dir) {
134 	case DMA_TO_DEVICE:
135 		dma_cache_wback(paddr, size);
136 		break;
137 
138 	case DMA_FROM_DEVICE:
139 		dma_cache_inv(paddr, size);
140 		break;
141 
142 	case DMA_BIDIRECTIONAL:
143 		dma_cache_wback_inv(paddr, size);
144 		break;
145 
146 	default:
147 		break;
148 	}
149 }
150 
arch_sync_dma_for_cpu(struct device * dev,phys_addr_t paddr,size_t size,enum dma_data_direction dir)151 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
152 		size_t size, enum dma_data_direction dir)
153 {
154 	switch (dir) {
155 	case DMA_TO_DEVICE:
156 		break;
157 
158 	/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
159 	case DMA_FROM_DEVICE:
160 	case DMA_BIDIRECTIONAL:
161 		dma_cache_inv(paddr, size);
162 		break;
163 
164 	default:
165 		break;
166 	}
167 }
168 
169 /*
170  * Plug in coherent or noncoherent dma ops
171  */
arch_setup_dma_ops(struct device * dev,u64 dma_base,u64 size,const struct iommu_ops * iommu,bool coherent)172 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
173 			const struct iommu_ops *iommu, bool coherent)
174 {
175 	/*
176 	 * IOC hardware snoops all DMA traffic keeping the caches consistent
177 	 * with memory - eliding need for any explicit cache maintenance of
178 	 * DMA buffers - so we can use dma_direct cache ops.
179 	 */
180 	if (is_isa_arcv2() && ioc_enable && coherent) {
181 		set_dma_ops(dev, &dma_direct_ops);
182 		dev_info(dev, "use dma_direct_ops cache ops\n");
183 	} else {
184 		set_dma_ops(dev, &dma_noncoherent_ops);
185 		dev_info(dev, "use dma_noncoherent_ops cache ops\n");
186 	}
187 }
188