1 /*
2 * DMA coherent memory allocation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * Copyright (C) 2002 - 2005 Tensilica Inc.
10 * Copyright (C) 2015 Cadence Design Systems Inc.
11 *
12 * Based on version for i386.
13 *
14 * Chris Zankel <chris@zankel.net>
15 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
16 */
17
18 #include <linux/dma-contiguous.h>
19 #include <linux/dma-noncoherent.h>
20 #include <linux/dma-direct.h>
21 #include <linux/gfp.h>
22 #include <linux/highmem.h>
23 #include <linux/mm.h>
24 #include <linux/types.h>
25 #include <asm/cacheflush.h>
26 #include <asm/io.h>
27 #include <asm/platform.h>
28
do_cache_op(phys_addr_t paddr,size_t size,void (* fn)(unsigned long,unsigned long))29 static void do_cache_op(phys_addr_t paddr, size_t size,
30 void (*fn)(unsigned long, unsigned long))
31 {
32 unsigned long off = paddr & (PAGE_SIZE - 1);
33 unsigned long pfn = PFN_DOWN(paddr);
34 struct page *page = pfn_to_page(pfn);
35
36 if (!PageHighMem(page))
37 fn((unsigned long)phys_to_virt(paddr), size);
38 else
39 while (size > 0) {
40 size_t sz = min_t(size_t, size, PAGE_SIZE - off);
41 void *vaddr = kmap_atomic(page);
42
43 fn((unsigned long)vaddr + off, sz);
44 kunmap_atomic(vaddr);
45 off = 0;
46 ++page;
47 size -= sz;
48 }
49 }
50
arch_sync_dma_for_cpu(struct device * dev,phys_addr_t paddr,size_t size,enum dma_data_direction dir)51 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
52 size_t size, enum dma_data_direction dir)
53 {
54 switch (dir) {
55 case DMA_BIDIRECTIONAL:
56 case DMA_FROM_DEVICE:
57 do_cache_op(paddr, size, __invalidate_dcache_range);
58 break;
59
60 case DMA_NONE:
61 BUG();
62 break;
63
64 default:
65 break;
66 }
67 }
68
arch_sync_dma_for_device(struct device * dev,phys_addr_t paddr,size_t size,enum dma_data_direction dir)69 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
70 size_t size, enum dma_data_direction dir)
71 {
72 switch (dir) {
73 case DMA_BIDIRECTIONAL:
74 case DMA_TO_DEVICE:
75 if (XCHAL_DCACHE_IS_WRITEBACK)
76 do_cache_op(paddr, size, __flush_dcache_range);
77 break;
78
79 case DMA_NONE:
80 BUG();
81 break;
82
83 default:
84 break;
85 }
86 }
87
88 #ifdef CONFIG_MMU
platform_vaddr_cached(const void * p)89 bool platform_vaddr_cached(const void *p)
90 {
91 unsigned long addr = (unsigned long)p;
92
93 return addr >= XCHAL_KSEG_CACHED_VADDR &&
94 addr - XCHAL_KSEG_CACHED_VADDR < XCHAL_KSEG_SIZE;
95 }
96
platform_vaddr_uncached(const void * p)97 bool platform_vaddr_uncached(const void *p)
98 {
99 unsigned long addr = (unsigned long)p;
100
101 return addr >= XCHAL_KSEG_BYPASS_VADDR &&
102 addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE;
103 }
104
platform_vaddr_to_uncached(void * p)105 void *platform_vaddr_to_uncached(void *p)
106 {
107 return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
108 }
109
platform_vaddr_to_cached(void * p)110 void *platform_vaddr_to_cached(void *p)
111 {
112 return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
113 }
114 #else
platform_vaddr_cached(const void * p)115 bool __attribute__((weak)) platform_vaddr_cached(const void *p)
116 {
117 WARN_ONCE(1, "Default %s implementation is used\n", __func__);
118 return true;
119 }
120
platform_vaddr_uncached(const void * p)121 bool __attribute__((weak)) platform_vaddr_uncached(const void *p)
122 {
123 WARN_ONCE(1, "Default %s implementation is used\n", __func__);
124 return false;
125 }
126
platform_vaddr_to_uncached(void * p)127 void __attribute__((weak)) *platform_vaddr_to_uncached(void *p)
128 {
129 WARN_ONCE(1, "Default %s implementation is used\n", __func__);
130 return p;
131 }
132
platform_vaddr_to_cached(void * p)133 void __attribute__((weak)) *platform_vaddr_to_cached(void *p)
134 {
135 WARN_ONCE(1, "Default %s implementation is used\n", __func__);
136 return p;
137 }
138 #endif
139
140 /*
141 * Note: We assume that the full memory space is always mapped to 'kseg'
142 * Otherwise we have to use page attributes (not implemented).
143 */
144
arch_dma_alloc(struct device * dev,size_t size,dma_addr_t * handle,gfp_t flag,unsigned long attrs)145 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
146 gfp_t flag, unsigned long attrs)
147 {
148 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
149 struct page *page = NULL;
150
151 /* ignore region speicifiers */
152
153 flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
154
155 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
156 flag |= GFP_DMA;
157
158 if (gfpflags_allow_blocking(flag))
159 page = dma_alloc_from_contiguous(dev, count, get_order(size),
160 flag & __GFP_NOWARN);
161
162 if (!page)
163 page = alloc_pages(flag, get_order(size));
164
165 if (!page)
166 return NULL;
167
168 *handle = phys_to_dma(dev, page_to_phys(page));
169
170 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
171 return page;
172 }
173
174 #ifdef CONFIG_MMU
175 if (PageHighMem(page)) {
176 void *p;
177
178 p = dma_common_contiguous_remap(page, size, VM_MAP,
179 pgprot_noncached(PAGE_KERNEL),
180 __builtin_return_address(0));
181 if (!p) {
182 if (!dma_release_from_contiguous(dev, page, count))
183 __free_pages(page, get_order(size));
184 }
185 return p;
186 }
187 #endif
188 BUG_ON(!platform_vaddr_cached(page_address(page)));
189 __invalidate_dcache_range((unsigned long)page_address(page), size);
190 return platform_vaddr_to_uncached(page_address(page));
191 }
192
arch_dma_free(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle,unsigned long attrs)193 void arch_dma_free(struct device *dev, size_t size, void *vaddr,
194 dma_addr_t dma_handle, unsigned long attrs)
195 {
196 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
197 struct page *page;
198
199 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
200 page = vaddr;
201 } else if (platform_vaddr_uncached(vaddr)) {
202 page = virt_to_page(platform_vaddr_to_cached(vaddr));
203 } else {
204 #ifdef CONFIG_MMU
205 dma_common_free_remap(vaddr, size, VM_MAP);
206 #endif
207 page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
208 }
209
210 if (!dma_release_from_contiguous(dev, page, count))
211 __free_pages(page, get_order(size));
212 }
213