1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018 Christoph Hellwig.
4  *
5  * DMA operations that map physical memory directly without providing cache
6  * coherence.
7  */
8 #include <linux/export.h>
9 #include <linux/mm.h>
10 #include <linux/dma-direct.h>
11 #include <linux/dma-noncoherent.h>
12 #include <linux/scatterlist.h>
13 
dma_noncoherent_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)14 static void dma_noncoherent_sync_single_for_device(struct device *dev,
15 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
16 {
17 	arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
18 }
19 
dma_noncoherent_sync_sg_for_device(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir)20 static void dma_noncoherent_sync_sg_for_device(struct device *dev,
21 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
22 {
23 	struct scatterlist *sg;
24 	int i;
25 
26 	for_each_sg(sgl, sg, nents, i)
27 		arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
28 }
29 
dma_noncoherent_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)30 static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page,
31 		unsigned long offset, size_t size, enum dma_data_direction dir,
32 		unsigned long attrs)
33 {
34 	dma_addr_t addr;
35 
36 	addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
37 	if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
38 		arch_sync_dma_for_device(dev, page_to_phys(page) + offset,
39 				size, dir);
40 	return addr;
41 }
42 
dma_noncoherent_map_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir,unsigned long attrs)43 static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl,
44 		int nents, enum dma_data_direction dir, unsigned long attrs)
45 {
46 	nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs);
47 	if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
48 		dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir);
49 	return nents;
50 }
51 
52 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
53     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
dma_noncoherent_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)54 static void dma_noncoherent_sync_single_for_cpu(struct device *dev,
55 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
56 {
57 	arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
58 	arch_sync_dma_for_cpu_all(dev);
59 }
60 
dma_noncoherent_sync_sg_for_cpu(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir)61 static void dma_noncoherent_sync_sg_for_cpu(struct device *dev,
62 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
63 {
64 	struct scatterlist *sg;
65 	int i;
66 
67 	for_each_sg(sgl, sg, nents, i)
68 		arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
69 	arch_sync_dma_for_cpu_all(dev);
70 }
71 
dma_noncoherent_unmap_page(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)72 static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr,
73 		size_t size, enum dma_data_direction dir, unsigned long attrs)
74 {
75 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
76 		dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir);
77 }
78 
dma_noncoherent_unmap_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir,unsigned long attrs)79 static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl,
80 		int nents, enum dma_data_direction dir, unsigned long attrs)
81 {
82 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
83 		dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir);
84 }
85 #endif
86 
87 const struct dma_map_ops dma_noncoherent_ops = {
88 	.alloc			= arch_dma_alloc,
89 	.free			= arch_dma_free,
90 	.mmap			= arch_dma_mmap,
91 	.sync_single_for_device	= dma_noncoherent_sync_single_for_device,
92 	.sync_sg_for_device	= dma_noncoherent_sync_sg_for_device,
93 	.map_page		= dma_noncoherent_map_page,
94 	.map_sg			= dma_noncoherent_map_sg,
95 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
96     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
97 	.sync_single_for_cpu	= dma_noncoherent_sync_single_for_cpu,
98 	.sync_sg_for_cpu	= dma_noncoherent_sync_sg_for_cpu,
99 	.unmap_page		= dma_noncoherent_unmap_page,
100 	.unmap_sg		= dma_noncoherent_unmap_sg,
101 #endif
102 	.dma_supported		= dma_direct_supported,
103 	.mapping_error		= dma_direct_mapping_error,
104 	.cache_sync		= arch_dma_cache_sync,
105 };
106 EXPORT_SYMBOL(dma_noncoherent_ops);
107