1 /*
2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
3 *
4 * Provide default implementations of the DMA mapping callbacks for
5 * directly mapped busses.
6 */
7
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dma-debug.h>
11 #include <linux/gfp.h>
12 #include <linux/memblock.h>
13 #include <linux/export.h>
14 #include <linux/pci.h>
15 #include <asm/vio.h>
16 #include <asm/bug.h>
17 #include <asm/machdep.h>
18 #include <asm/swiotlb.h>
19 #include <asm/iommu.h>
20
21 /*
22 * Generic direct DMA implementation
23 *
24 * This implementation supports a per-device offset that can be applied if
25 * the address at which memory is visible to devices is not 0. Platform code
26 * can set archdata.dma_data to an unsigned long holding the offset. By
27 * default the offset is PCI_DRAM_OFFSET.
28 */
29
get_pfn_limit(struct device * dev)30 static u64 __maybe_unused get_pfn_limit(struct device *dev)
31 {
32 u64 pfn = (dev->coherent_dma_mask >> PAGE_SHIFT) + 1;
33 struct dev_archdata __maybe_unused *sd = &dev->archdata;
34
35 #ifdef CONFIG_SWIOTLB
36 if (sd->max_direct_dma_addr && dev->dma_ops == &powerpc_swiotlb_dma_ops)
37 pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT);
38 #endif
39
40 return pfn;
41 }
42
dma_nommu_dma_supported(struct device * dev,u64 mask)43 static int dma_nommu_dma_supported(struct device *dev, u64 mask)
44 {
45 #ifdef CONFIG_PPC64
46 u64 limit = get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
47
48 /* Limit fits in the mask, we are good */
49 if (mask >= limit)
50 return 1;
51
52 #ifdef CONFIG_FSL_SOC
53 /* Freescale gets another chance via ZONE_DMA/ZONE_DMA32, however
54 * that will have to be refined if/when they support iommus
55 */
56 return 1;
57 #endif
58 /* Sorry ... */
59 return 0;
60 #else
61 return 1;
62 #endif
63 }
64
__dma_nommu_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag,unsigned long attrs)65 void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
66 dma_addr_t *dma_handle, gfp_t flag,
67 unsigned long attrs)
68 {
69 void *ret;
70 #ifdef CONFIG_NOT_COHERENT_CACHE
71 ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
72 if (ret == NULL)
73 return NULL;
74 *dma_handle += get_dma_offset(dev);
75 return ret;
76 #else
77 struct page *page;
78 int node = dev_to_node(dev);
79 #ifdef CONFIG_FSL_SOC
80 u64 pfn = get_pfn_limit(dev);
81 int zone;
82
83 /*
84 * This code should be OK on other platforms, but we have drivers that
85 * don't set coherent_dma_mask. As a workaround we just ifdef it. This
86 * whole routine needs some serious cleanup.
87 */
88
89 zone = dma_pfn_limit_to_zone(pfn);
90 if (zone < 0) {
91 dev_err(dev, "%s: No suitable zone for pfn %#llx\n",
92 __func__, pfn);
93 return NULL;
94 }
95
96 switch (zone) {
97 case ZONE_DMA:
98 flag |= GFP_DMA;
99 break;
100 #ifdef CONFIG_ZONE_DMA32
101 case ZONE_DMA32:
102 flag |= GFP_DMA32;
103 break;
104 #endif
105 };
106 #endif /* CONFIG_FSL_SOC */
107
108 page = alloc_pages_node(node, flag, get_order(size));
109 if (page == NULL)
110 return NULL;
111 ret = page_address(page);
112 memset(ret, 0, size);
113 *dma_handle = __pa(ret) + get_dma_offset(dev);
114
115 return ret;
116 #endif
117 }
118
__dma_nommu_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle,unsigned long attrs)119 void __dma_nommu_free_coherent(struct device *dev, size_t size,
120 void *vaddr, dma_addr_t dma_handle,
121 unsigned long attrs)
122 {
123 #ifdef CONFIG_NOT_COHERENT_CACHE
124 __dma_free_coherent(size, vaddr);
125 #else
126 free_pages((unsigned long)vaddr, get_order(size));
127 #endif
128 }
129
dma_nommu_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag,unsigned long attrs)130 static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
131 dma_addr_t *dma_handle, gfp_t flag,
132 unsigned long attrs)
133 {
134 struct iommu_table *iommu;
135
136 /* The coherent mask may be smaller than the real mask, check if
137 * we can really use the direct ops
138 */
139 if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask))
140 return __dma_nommu_alloc_coherent(dev, size, dma_handle,
141 flag, attrs);
142
143 /* Ok we can't ... do we have an iommu ? If not, fail */
144 iommu = get_iommu_table_base(dev);
145 if (!iommu)
146 return NULL;
147
148 /* Try to use the iommu */
149 return iommu_alloc_coherent(dev, iommu, size, dma_handle,
150 dev->coherent_dma_mask, flag,
151 dev_to_node(dev));
152 }
153
dma_nommu_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle,unsigned long attrs)154 static void dma_nommu_free_coherent(struct device *dev, size_t size,
155 void *vaddr, dma_addr_t dma_handle,
156 unsigned long attrs)
157 {
158 struct iommu_table *iommu;
159
160 /* See comments in dma_nommu_alloc_coherent() */
161 if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask))
162 return __dma_nommu_free_coherent(dev, size, vaddr, dma_handle,
163 attrs);
164 /* Maybe we used an iommu ... */
165 iommu = get_iommu_table_base(dev);
166
167 /* If we hit that we should have never allocated in the first
168 * place so how come we are freeing ?
169 */
170 if (WARN_ON(!iommu))
171 return;
172 iommu_free_coherent(iommu, size, vaddr, dma_handle);
173 }
174
dma_nommu_mmap_coherent(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t handle,size_t size,unsigned long attrs)175 int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
176 void *cpu_addr, dma_addr_t handle, size_t size,
177 unsigned long attrs)
178 {
179 unsigned long pfn;
180
181 #ifdef CONFIG_NOT_COHERENT_CACHE
182 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
183 pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
184 #else
185 pfn = page_to_pfn(virt_to_page(cpu_addr));
186 #endif
187 return remap_pfn_range(vma, vma->vm_start,
188 pfn + vma->vm_pgoff,
189 vma->vm_end - vma->vm_start,
190 vma->vm_page_prot);
191 }
192
dma_nommu_map_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction direction,unsigned long attrs)193 static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
194 int nents, enum dma_data_direction direction,
195 unsigned long attrs)
196 {
197 struct scatterlist *sg;
198 int i;
199
200 for_each_sg(sgl, sg, nents, i) {
201 sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
202 sg->dma_length = sg->length;
203
204 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
205 continue;
206
207 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
208 }
209
210 return nents;
211 }
212
dma_nommu_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction,unsigned long attrs)213 static void dma_nommu_unmap_sg(struct device *dev, struct scatterlist *sg,
214 int nents, enum dma_data_direction direction,
215 unsigned long attrs)
216 {
217 }
218
dma_nommu_get_required_mask(struct device * dev)219 static u64 dma_nommu_get_required_mask(struct device *dev)
220 {
221 u64 end, mask;
222
223 end = memblock_end_of_DRAM() + get_dma_offset(dev);
224
225 mask = 1ULL << (fls64(end) - 1);
226 mask += mask - 1;
227
228 return mask;
229 }
230
dma_nommu_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)231 static inline dma_addr_t dma_nommu_map_page(struct device *dev,
232 struct page *page,
233 unsigned long offset,
234 size_t size,
235 enum dma_data_direction dir,
236 unsigned long attrs)
237 {
238 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
239 __dma_sync_page(page, offset, size, dir);
240
241 return page_to_phys(page) + offset + get_dma_offset(dev);
242 }
243
dma_nommu_unmap_page(struct device * dev,dma_addr_t dma_address,size_t size,enum dma_data_direction direction,unsigned long attrs)244 static inline void dma_nommu_unmap_page(struct device *dev,
245 dma_addr_t dma_address,
246 size_t size,
247 enum dma_data_direction direction,
248 unsigned long attrs)
249 {
250 }
251
252 #ifdef CONFIG_NOT_COHERENT_CACHE
dma_nommu_sync_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction direction)253 static inline void dma_nommu_sync_sg(struct device *dev,
254 struct scatterlist *sgl, int nents,
255 enum dma_data_direction direction)
256 {
257 struct scatterlist *sg;
258 int i;
259
260 for_each_sg(sgl, sg, nents, i)
261 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
262 }
263
dma_nommu_sync_single(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction direction)264 static inline void dma_nommu_sync_single(struct device *dev,
265 dma_addr_t dma_handle, size_t size,
266 enum dma_data_direction direction)
267 {
268 __dma_sync(bus_to_virt(dma_handle), size, direction);
269 }
270 #endif
271
272 const struct dma_map_ops dma_nommu_ops = {
273 .alloc = dma_nommu_alloc_coherent,
274 .free = dma_nommu_free_coherent,
275 .mmap = dma_nommu_mmap_coherent,
276 .map_sg = dma_nommu_map_sg,
277 .unmap_sg = dma_nommu_unmap_sg,
278 .dma_supported = dma_nommu_dma_supported,
279 .map_page = dma_nommu_map_page,
280 .unmap_page = dma_nommu_unmap_page,
281 .get_required_mask = dma_nommu_get_required_mask,
282 #ifdef CONFIG_NOT_COHERENT_CACHE
283 .sync_single_for_cpu = dma_nommu_sync_single,
284 .sync_single_for_device = dma_nommu_sync_single,
285 .sync_sg_for_cpu = dma_nommu_sync_sg,
286 .sync_sg_for_device = dma_nommu_sync_sg,
287 #endif
288 };
289 EXPORT_SYMBOL(dma_nommu_ops);
290
dma_set_coherent_mask(struct device * dev,u64 mask)291 int dma_set_coherent_mask(struct device *dev, u64 mask)
292 {
293 if (!dma_supported(dev, mask)) {
294 /*
295 * We need to special case the direct DMA ops which can
296 * support a fallback for coherent allocations. There
297 * is no dma_op->set_coherent_mask() so we have to do
298 * things the hard way:
299 */
300 if (get_dma_ops(dev) != &dma_nommu_ops ||
301 get_iommu_table_base(dev) == NULL ||
302 !dma_iommu_dma_supported(dev, mask))
303 return -EIO;
304 }
305 dev->coherent_dma_mask = mask;
306 return 0;
307 }
308 EXPORT_SYMBOL(dma_set_coherent_mask);
309
dma_set_mask(struct device * dev,u64 dma_mask)310 int dma_set_mask(struct device *dev, u64 dma_mask)
311 {
312 if (ppc_md.dma_set_mask)
313 return ppc_md.dma_set_mask(dev, dma_mask);
314
315 if (dev_is_pci(dev)) {
316 struct pci_dev *pdev = to_pci_dev(dev);
317 struct pci_controller *phb = pci_bus_to_host(pdev->bus);
318 if (phb->controller_ops.dma_set_mask)
319 return phb->controller_ops.dma_set_mask(pdev, dma_mask);
320 }
321
322 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
323 return -EIO;
324 *dev->dma_mask = dma_mask;
325 return 0;
326 }
327 EXPORT_SYMBOL(dma_set_mask);
328
__dma_get_required_mask(struct device * dev)329 u64 __dma_get_required_mask(struct device *dev)
330 {
331 const struct dma_map_ops *dma_ops = get_dma_ops(dev);
332
333 if (unlikely(dma_ops == NULL))
334 return 0;
335
336 if (dma_ops->get_required_mask)
337 return dma_ops->get_required_mask(dev);
338
339 return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
340 }
341
dma_get_required_mask(struct device * dev)342 u64 dma_get_required_mask(struct device *dev)
343 {
344 if (ppc_md.dma_get_required_mask)
345 return ppc_md.dma_get_required_mask(dev);
346
347 if (dev_is_pci(dev)) {
348 struct pci_dev *pdev = to_pci_dev(dev);
349 struct pci_controller *phb = pci_bus_to_host(pdev->bus);
350 if (phb->controller_ops.dma_get_required_mask)
351 return phb->controller_ops.dma_get_required_mask(pdev);
352 }
353
354 return __dma_get_required_mask(dev);
355 }
356 EXPORT_SYMBOL_GPL(dma_get_required_mask);
357
dma_init(void)358 static int __init dma_init(void)
359 {
360 #ifdef CONFIG_IBMVIO
361 dma_debug_add_bus(&vio_bus_type);
362 #endif
363
364 return 0;
365 }
366 fs_initcall(dma_init);
367
368