1 /*
2  * Copyright 2023 NXP
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/drivers/dma.h>
8 #include <zephyr/logging/log.h>
9 #include <zephyr/cache.h>
10 
11 /* used for driver binding */
12 #define DT_DRV_COMPAT nxp_sof_host_dma
13 
14 /* macros used to parse DTS properties */
15 #define IDENTITY_VARGS(V, ...) IDENTITY(V)
16 
17 #define _SOF_HOST_DMA_CHANNEL_INDEX_ARRAY(inst)\
18 	LISTIFY(DT_INST_PROP_OR(inst, dma_channels, 0), IDENTITY_VARGS, (,))
19 
20 #define _SOF_HOST_DMA_CHANNEL_DECLARE(idx) {}
21 
22 #define SOF_HOST_DMA_CHANNELS_DECLARE(inst)\
23 	FOR_EACH(_SOF_HOST_DMA_CHANNEL_DECLARE,\
24 		 (,), _SOF_HOST_DMA_CHANNEL_INDEX_ARRAY(inst))
25 
26 LOG_MODULE_REGISTER(nxp_sof_host_dma);
27 
28 /* note: This driver doesn't attempt to provide
29  * a generic software-based DMA engine implementation.
30  * As its name suggests, its only usage is in SOF
31  * (Sound Open Firmware) for NXP plaforms which are
32  * able to access the host memory directly from the
33  * core on which the firmware is running.
34  */
35 
36 enum channel_state {
37 	CHAN_STATE_INIT = 0,
38 	CHAN_STATE_CONFIGURED,
39 };
40 
41 struct sof_host_dma_channel {
42 	uint32_t src;
43 	uint32_t dest;
44 	uint32_t size;
45 	uint32_t direction;
46 	enum channel_state state;
47 };
48 
49 struct sof_host_dma_data {
50 	/* this needs to be first */
51 	struct dma_context ctx;
52 	atomic_t channel_flags;
53 	struct sof_host_dma_channel *channels;
54 };
55 
channel_change_state(struct sof_host_dma_channel * chan,enum channel_state next)56 static int channel_change_state(struct sof_host_dma_channel *chan,
57 				enum channel_state next)
58 {
59 	enum channel_state prev = chan->state;
60 
61 	/* validate transition */
62 	switch (prev) {
63 	case CHAN_STATE_INIT:
64 	case CHAN_STATE_CONFIGURED:
65 		if (next != CHAN_STATE_CONFIGURED) {
66 			return -EPERM;
67 		}
68 		break;
69 	default:
70 		LOG_ERR("invalid channel previous state: %d", prev);
71 		return -EINVAL;
72 	}
73 
74 	chan->state = next;
75 
76 	return 0;
77 }
78 
sof_host_dma_reload(const struct device * dev,uint32_t chan_id,uint32_t src,uint32_t dst,size_t size)79 static int sof_host_dma_reload(const struct device *dev, uint32_t chan_id,
80 			       uint32_t src, uint32_t dst, size_t size)
81 {
82 	ARG_UNUSED(src);
83 	ARG_UNUSED(dst);
84 	ARG_UNUSED(size);
85 
86 	struct sof_host_dma_data *data;
87 	struct sof_host_dma_channel *chan;
88 	int ret;
89 
90 	data = dev->data;
91 
92 	if (chan_id >= data->ctx.dma_channels) {
93 		LOG_ERR("channel %d is not a valid channel ID", chan_id);
94 		return -EINVAL;
95 	}
96 
97 	/* fetch channel data */
98 	chan = &data->channels[chan_id];
99 
100 	/* validate state */
101 	if (chan->state != CHAN_STATE_CONFIGURED) {
102 		LOG_ERR("attempting to reload unconfigured DMA channel %d", chan_id);
103 		return -EINVAL;
104 	}
105 
106 	if (chan->direction == HOST_TO_MEMORY) {
107 		/* the host may have modified the region we're about to copy
108 		 * to local memory. In this case, the data cache holds stale
109 		 * data so invalidate it to force a read from the main memory.
110 		 */
111 		ret = sys_cache_data_invd_range(UINT_TO_POINTER(chan->src),
112 						chan->size);
113 		if (ret < 0) {
114 			LOG_ERR("failed to invalidate data cache range");
115 			return ret;
116 		}
117 	}
118 
119 	memcpy(UINT_TO_POINTER(chan->dest), UINT_TO_POINTER(chan->src), chan->size);
120 
121 	/*
122 	 * MEMORY_TO_HOST transfer: force range to main memory so that
123 	 * the host doesn't read any stale data.
124 	 *
125 	 * HOST_TO_MEMORY transfer:
126 	 *	SOF assumes that data is copied from host to local memory via
127 	 *	DMA, which is not the case for imx platforms. For these
128 	 *	platforms, the DSP is in charge of copying the data from host to
129 	 *	local memory.
130 	 *
131 	 *	Additionally, because of the aforementioned assumption,
132 	 *	SOF performs a cache invalidation on the destination
133 	 *	memory chunk before data is copied further down the
134 	 *	pipeline.
135 	 *
136 	 *	If the destination memory chunk is cacheable what seems
137 	 *	to happen is that the invalidation operation forces the
138 	 *	DSP to fetch the data from RAM instead of the cache.
139 	 *	Since a writeback was never performed on the destination
140 	 *	memory chunk, the RAM will contain stale data.
141 	 *
142 	 *	With this in mind, the writeback should also be
143 	 *	performed in HOST_TO_MEMORY transfers (aka playback)
144 	 *	to keep the cache and RAM in sync. This way, the DSP
145 	 *	will read the correct data from RAM (when forced to do
146 	 *	so by the cache invalidation operation).
147 	 *
148 	 *	TODO: this is NOT optimal since we perform two unneeded
149 	 *	cache management operations and should be addressed in
150 	 *	SOF at some point.
151 	 */
152 	ret = sys_cache_data_flush_range(UINT_TO_POINTER(chan->dest), chan->size);
153 	if (ret < 0) {
154 		LOG_ERR("failed to flush data cache range");
155 		return ret;
156 	}
157 
158 	return 0;
159 }
160 
161 
sof_host_dma_config(const struct device * dev,uint32_t chan_id,struct dma_config * config)162 static int sof_host_dma_config(const struct device *dev, uint32_t chan_id,
163 			       struct dma_config *config)
164 {
165 	struct sof_host_dma_data *data;
166 	struct sof_host_dma_channel *chan;
167 	int ret;
168 
169 	data = dev->data;
170 
171 	if (chan_id >= data->ctx.dma_channels) {
172 		LOG_ERR("channel %d is not a valid channel ID", chan_id);
173 		return -EINVAL;
174 	}
175 
176 	/* fetch channel data */
177 	chan = &data->channels[chan_id];
178 
179 	/* attempt a state transition */
180 	ret = channel_change_state(chan, CHAN_STATE_CONFIGURED);
181 	if (ret < 0) {
182 		LOG_ERR("failed to change channel %d's state to CONFIGURED", chan_id);
183 		return ret;
184 	}
185 
186 	/* SG configurations are not currently supported */
187 	if (config->block_count != 1) {
188 		LOG_ERR("invalid number of blocks: %d", config->block_count);
189 		return -EINVAL;
190 	}
191 
192 	if (!config->head_block->source_address) {
193 		LOG_ERR("got NULL source address");
194 		return -EINVAL;
195 	}
196 
197 	if (!config->head_block->dest_address) {
198 		LOG_ERR("got NULL destination address");
199 		return -EINVAL;
200 	}
201 
202 	if (!config->head_block->block_size) {
203 		LOG_ERR("got 0 bytes to copy");
204 		return -EINVAL;
205 	}
206 
207 	/* for now, only H2M and M2H transfers are supported */
208 	if (config->channel_direction != HOST_TO_MEMORY &&
209 	    config->channel_direction != MEMORY_TO_HOST) {
210 		LOG_ERR("invalid channel direction: %d",
211 			config->channel_direction);
212 		return -EINVAL;
213 	}
214 
215 	/* latch onto the passed configuration */
216 	chan->src = config->head_block->source_address;
217 	chan->dest = config->head_block->dest_address;
218 	chan->size = config->head_block->block_size;
219 	chan->direction = config->channel_direction;
220 
221 	LOG_DBG("configured channel %d with SRC 0x%x DST 0x%x SIZE 0x%x",
222 		chan_id, chan->src, chan->dest, chan->size);
223 
224 	return 0;
225 }
226 
sof_host_dma_start(const struct device * dev,uint32_t chan_id)227 static int sof_host_dma_start(const struct device *dev, uint32_t chan_id)
228 {
229 	/* nothing to be done here */
230 	return 0;
231 }
232 
sof_host_dma_stop(const struct device * dev,uint32_t chan_id)233 static int sof_host_dma_stop(const struct device *dev, uint32_t chan_id)
234 {
235 	/* nothing to be done here */
236 	return 0;
237 }
238 
sof_host_dma_suspend(const struct device * dev,uint32_t chan_id)239 static int sof_host_dma_suspend(const struct device *dev, uint32_t chan_id)
240 {
241 	/* nothing to be done here */
242 	return 0;
243 }
244 
sof_host_dma_resume(const struct device * dev,uint32_t chan_id)245 static int sof_host_dma_resume(const struct device *dev, uint32_t chan_id)
246 {
247 	/* nothing to be done here */
248 	return 0;
249 }
250 
sof_host_dma_get_status(const struct device * dev,uint32_t chan_id,struct dma_status * stat)251 static int sof_host_dma_get_status(const struct device *dev,
252 				   uint32_t chan_id, struct dma_status *stat)
253 {
254 	/* nothing to be done here */
255 	return 0;
256 }
257 
sof_host_dma_get_attribute(const struct device * dev,uint32_t type,uint32_t * val)258 static int sof_host_dma_get_attribute(const struct device *dev, uint32_t type, uint32_t *val)
259 {
260 	switch (type) {
261 	case DMA_ATTR_COPY_ALIGNMENT:
262 	case DMA_ATTR_BUFFER_SIZE_ALIGNMENT:
263 	case DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT:
264 		*val = CONFIG_DMA_NXP_SOF_HOST_DMA_ALIGN;
265 		break;
266 	default:
267 		LOG_ERR("invalid attribute type: %d", type);
268 		return -EINVAL;
269 	}
270 
271 	return 0;
272 }
273 
274 static DEVICE_API(dma, sof_host_dma_api) = {
275 	.reload = sof_host_dma_reload,
276 	.config = sof_host_dma_config,
277 	.start = sof_host_dma_start,
278 	.stop = sof_host_dma_stop,
279 	.suspend = sof_host_dma_suspend,
280 	.resume = sof_host_dma_resume,
281 	.get_status = sof_host_dma_get_status,
282 	.get_attribute = sof_host_dma_get_attribute,
283 };
284 
sof_host_dma_init(const struct device * dev)285 static int sof_host_dma_init(const struct device *dev)
286 {
287 	struct sof_host_dma_data *data = dev->data;
288 
289 	data->channel_flags = ATOMIC_INIT(0);
290 	data->ctx.atomic = &data->channel_flags;
291 
292 	return 0;
293 }
294 
295 static struct sof_host_dma_channel channels[] = {
296 	SOF_HOST_DMA_CHANNELS_DECLARE(0),
297 };
298 
299 static struct sof_host_dma_data sof_host_dma_data = {
300 	.ctx.magic = DMA_MAGIC,
301 	.ctx.dma_channels = ARRAY_SIZE(channels),
302 	.channels = channels,
303 };
304 
305 /* assumption: only 1 SOF_HOST_DMA instance */
306 DEVICE_DT_INST_DEFINE(0, sof_host_dma_init, NULL,
307 		      &sof_host_dma_data, NULL,
308 		      PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY,
309 		      &sof_host_dma_api);
310