1 /*
2  * Copyright 2020-2024 NXP
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @brief Common part of DMA drivers for imx rt series.
9  */
10 
11 #define DT_DRV_COMPAT nxp_mcux_edma
12 
13 #include <errno.h>
14 #include <soc.h>
15 #include <zephyr/init.h>
16 #include <zephyr/kernel.h>
17 #include <zephyr/devicetree.h>
18 #include <zephyr/sys/atomic.h>
19 #include <zephyr/drivers/dma.h>
20 #include <zephyr/drivers/clock_control.h>
21 #include <zephyr/sys/barrier.h>
22 
23 #include "dma_mcux_edma.h"
24 
25 #include <zephyr/logging/log.h>
26 #include <zephyr/irq.h>
27 
28 LOG_MODULE_REGISTER(dma_mcux_edma, CONFIG_DMA_LOG_LEVEL);
29 
30 #define HAS_CHANNEL_GAP(n)		DT_INST_NODE_HAS_PROP(n, channel_gap) ||
31 #define DMA_MCUX_HAS_CHANNEL_GAP	(DT_INST_FOREACH_STATUS_OKAY(HAS_CHANNEL_GAP) 0)
32 
33 struct dma_mcux_edma_config {
34 	DMA_Type *base;
35 #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT
36 	DMAMUX_Type **dmamux_base;
37 #endif
38 	uint8_t channels_per_mux;
39 	uint8_t dmamux_reg_offset;
40 	int dma_requests;
41 	int dma_channels; /* number of channels */
42 #if DMA_MCUX_HAS_CHANNEL_GAP
43 	uint32_t channel_gap[2];
44 #endif
45 	void (*irq_config_func)(const struct device *dev);
46 	edma_tcd_t (*tcdpool)[CONFIG_DMA_TCD_QUEUE_SIZE];
47 };
48 
49 
50 #ifdef CONFIG_HAS_MCUX_CACHE
51 
52 #ifdef CONFIG_DMA_MCUX_USE_DTCM_FOR_DMA_DESCRIPTORS
53 
54 #if DT_NODE_HAS_STATUS_OKAY(DT_CHOSEN(zephyr_dtcm))
55 #define EDMA_TCDPOOL_CACHE_ATTR __dtcm_noinit_section
56 #else /* DT_NODE_HAS_STATUS_OKAY(DT_CHOSEN(zephyr_dtcm)) */
57 #error Selected DTCM for MCUX DMA descriptors but no DTCM section.
58 #endif /* DT_NODE_HAS_STATUS_OKAY(DT_CHOSEN(zephyr_dtcm)) */
59 
60 #elif defined(CONFIG_NOCACHE_MEMORY)
61 #define EDMA_TCDPOOL_CACHE_ATTR __nocache
62 #else
63 /*
64  * Note: the TCD pool *must* be in non cacheable memory. All of the NXP SOCs
65  * that support caching memory have their default SRAM regions defined as a
66  * non cached memory region, but if the default SRAM region is changed EDMA
67  * TCD pools would be moved to cacheable memory, resulting in DMA cache
68  * coherency issues.
69  */
70 
71 #define EDMA_TCDPOOL_CACHE_ATTR
72 
73 #endif /* CONFIG_DMA_MCUX_USE_DTCM_FOR_DMA_DESCRIPTORS */
74 
75 #else /* CONFIG_HAS_MCUX_CACHE */
76 
77 #define EDMA_TCDPOOL_CACHE_ATTR
78 
79 #endif /* CONFIG_HAS_MCUX_CACHE */
80 
81 struct dma_mcux_channel_transfer_edma_settings {
82 	uint32_t source_data_size;
83 	uint32_t dest_data_size;
84 	uint32_t source_burst_length;
85 	uint32_t dest_burst_length;
86 	enum dma_channel_direction direction;
87 	edma_transfer_type_t transfer_type;
88 	bool valid;
89 	/* This var indicate it is dynamic SG mode or loop SG mode. */
90 	bool cyclic;
91 	/* These parameters are for cyclic mode only.
92 	 * Next empty TCD idx which can be used for transfer
93 	 */
94 	volatile uint8_t write_idx;
95 	/* How many TCDs in TCD pool is emtpy(can be used to write transfer parameters) */
96 	volatile uint8_t empty_tcds;
97 };
98 
99 
100 struct call_back {
101 	edma_transfer_config_t transferConfig;
102 	edma_handle_t edma_handle;
103 	const struct device *dev;
104 	void *user_data;
105 	dma_callback_t dma_callback;
106 	struct dma_mcux_channel_transfer_edma_settings transfer_settings;
107 	bool busy;
108 };
109 
110 struct dma_mcux_edma_data {
111 	struct dma_context dma_ctx;
112 	struct call_back *data_cb;
113 	atomic_t *channels_atomic;
114 };
115 
116 #define DEV_CFG(dev) \
117 	((const struct dma_mcux_edma_config *const)dev->config)
118 #define DEV_DATA(dev) ((struct dma_mcux_edma_data *)dev->data)
119 #define DEV_BASE(dev) ((DMA_Type *)DEV_CFG(dev)->base)
120 
121 #define DEV_CHANNEL_DATA(dev, ch) \
122 	((struct call_back *)(&(DEV_DATA(dev)->data_cb[ch])))
123 
124 #define DEV_EDMA_HANDLE(dev, ch) \
125 	((edma_handle_t *)(&(DEV_CHANNEL_DATA(dev, ch)->edma_handle)))
126 
127 #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT
128 #define DEV_DMAMUX_BASE(dev, idx) ((DMAMUX_Type *)DEV_CFG(dev)->dmamux_base[idx])
129 #define DEV_DMAMUX_IDX(dev, ch)	(ch / DEV_CFG(dev)->channels_per_mux)
130 
131 #define DEV_DMAMUX_CHANNEL(dev, ch) \
132 	(ch % DEV_CFG(dev)->channels_per_mux) ^ (DEV_CFG(dev)->dmamux_reg_offset)
133 #endif
134 
135 /* Definations for SW TCD fields */
136 #if defined(CONFIG_DMA_MCUX_EDMA) || defined(CONFIG_DMA_MCUX_EDMA_V3)
137 #define EDMA_TCD_SADDR(tcd, flag)     ((tcd)->SADDR)
138 #define EDMA_TCD_DADDR(tcd, flag)     ((tcd)->DADDR)
139 #define EDMA_TCD_BITER(tcd, flag)     ((tcd)->BITER)
140 #define EDMA_TCD_CITER(tcd, flag)     ((tcd)->CITER)
141 #define EDMA_TCD_CSR(tcd, flag)       ((tcd)->CSR)
142 #define EDMA_TCD_DLAST_SGA(tcd, flag) ((tcd)->DLAST_SGA)
143 #if defined(CONFIG_DMA_MCUX_EDMA_V3)
144 #define DMA_CSR_DREQ                  DMA_TCD_CSR_DREQ
145 #define EDMA_HW_TCD_CH_ACTIVE_MASK    (DMA_CH_CSR_ACTIVE_MASK)
146 #else
147 #define EDMA_HW_TCD_CH_ACTIVE_MASK    (DMA_CSR_ACTIVE_MASK)
148 #endif /* CONFIG_DMA_MCUX_EDMA_V3 */
149 #elif defined(CONFIG_DMA_MCUX_EDMA_V4)
150 /* Above macros have been defined in fsl_edma_core.h */
151 #define EDMA_HW_TCD_CH_ACTIVE_MASK (DMA_CH_CSR_ACTIVE_MASK)
152 #endif
153 
154 /* Definations for HW TCD fields */
155 #ifdef CONFIG_DMA_MCUX_EDMA
156 #define EDMA_HW_TCD_SADDR(dev, ch) (DEV_BASE(dev)->TCD[ch].SADDR)
157 #define EDMA_HW_TCD_DADDR(dev, ch) (DEV_BASE(dev)->TCD[ch].DADDR)
158 #define EDMA_HW_TCD_BITER(dev, ch) (DEV_BASE(dev)->TCD[ch].BITER_ELINKNO)
159 #define EDMA_HW_TCD_CITER(dev, ch) (DEV_BASE(dev)->TCD[ch].CITER_ELINKNO)
160 #define EDMA_HW_TCD_CSR(dev, ch)   (DEV_BASE(dev)->TCD[ch].CSR)
161 #elif defined(CONFIG_DMA_MCUX_EDMA_V3) || defined(CONFIG_DMA_MCUX_EDMA_V4)
162 #define EDMA_HW_TCD_SADDR(dev, ch) (DEV_BASE(dev)->CH[ch].TCD_SADDR)
163 #define EDMA_HW_TCD_DADDR(dev, ch) (DEV_BASE(dev)->CH[ch].TCD_DADDR)
164 #define EDMA_HW_TCD_BITER(dev, ch) (DEV_BASE(dev)->CH[ch].TCD_BITER_ELINKNO)
165 #define EDMA_HW_TCD_CITER(dev, ch) (DEV_BASE(dev)->CH[ch].TCD_CITER_ELINKNO)
166 #define EDMA_HW_TCD_CSR(dev, ch)   (DEV_BASE(dev)->CH[ch].TCD_CSR)
167 #endif
168 
169 /*
170  * The hardware channel (takes the gap into account) is used when access DMA registers.
171  * For data structures in the shim driver still use the primitive channel.
172  */
dma_mcux_edma_add_channel_gap(const struct device * dev,uint32_t channel)173 static ALWAYS_INLINE uint32_t dma_mcux_edma_add_channel_gap(const struct device *dev,
174 							    uint32_t channel)
175 {
176 #if DMA_MCUX_HAS_CHANNEL_GAP
177 	const struct dma_mcux_edma_config *config = DEV_CFG(dev);
178 
179 	return (channel < config->channel_gap[0]) ? channel :
180 		(channel + 1 + config->channel_gap[1] - config->channel_gap[0]);
181 #else
182 	ARG_UNUSED(dev);
183 	return channel;
184 #endif
185 }
186 
dma_mcux_edma_remove_channel_gap(const struct device * dev,uint32_t channel)187 static ALWAYS_INLINE uint32_t dma_mcux_edma_remove_channel_gap(const struct device *dev,
188 								uint32_t channel)
189 {
190 #if DMA_MCUX_HAS_CHANNEL_GAP
191 	const struct dma_mcux_edma_config *config = DEV_CFG(dev);
192 
193 	return (channel < config->channel_gap[0]) ? channel :
194 		(channel + config->channel_gap[0] - config->channel_gap[1] - 1);
195 #else
196 	ARG_UNUSED(dev);
197 	return channel;
198 #endif
199 }
200 
data_size_valid(const size_t data_size)201 static bool data_size_valid(const size_t data_size)
202 {
203 	return (data_size == 4U || data_size == 2U ||
204 		data_size == 1U || data_size == 8U ||
205 		data_size == 16U || data_size == 32U
206 #if defined(CONFIG_DMA_MCUX_EDMA_V3) || defined(CONFIG_DMA_MCUX_EDMA_V4)
207 		|| data_size == 64U
208 #endif
209 		);
210 }
211 
nxp_edma_callback(edma_handle_t * handle,void * param,bool transferDone,uint32_t tcds)212 static void nxp_edma_callback(edma_handle_t *handle, void *param, bool transferDone,
213 			      uint32_t tcds)
214 {
215 	int ret = -EIO;
216 	struct call_back *data = (struct call_back *)param;
217 	uint32_t channel = dma_mcux_edma_remove_channel_gap(data->dev, handle->channel);
218 
219 	if (data->transfer_settings.cyclic) {
220 		data->transfer_settings.empty_tcds++;
221 		/*In loop mode, DMA is always busy*/
222 		data->busy = 1;
223 		ret = DMA_STATUS_COMPLETE;
224 	} else if (transferDone) {
225 		/* DMA is no longer busy when there are no remaining TCDs to transfer */
226 		data->busy = (handle->tcdPool != NULL) && (handle->tcdUsed > 0);
227 		ret = DMA_STATUS_COMPLETE;
228 	}
229 	LOG_DBG("transfer %d", tcds);
230 	data->dma_callback(data->dev, data->user_data, channel, ret);
231 }
232 
dma_mcux_edma_irq_handler(const struct device * dev,uint32_t channel)233 static void dma_mcux_edma_irq_handler(const struct device *dev, uint32_t channel)
234 {
235 	uint32_t hw_channel = dma_mcux_edma_add_channel_gap(dev, channel);
236 	uint32_t flag = EDMA_GetChannelStatusFlags(DEV_BASE(dev), hw_channel);
237 
238 	if (flag & kEDMA_InterruptFlag) {
239 		LOG_DBG("IRQ OCCURRED");
240 		/* EDMA interrupt flag is cleared here */
241 		EDMA_HandleIRQ(DEV_EDMA_HANDLE(dev, channel));
242 		LOG_DBG("IRQ DONE");
243 	}
244 
245 #if DT_INST_PROP(0, no_error_irq)
246 	/* Channel shares the same irq for error and transfer complete */
247 	else if (flag & kEDMA_ErrorFlag) {
248 		EDMA_ClearChannelStatusFlags(DEV_BASE(dev), channel, 0xFFFFFFFF);
249 		EDMA_AbortTransfer(DEV_EDMA_HANDLE(dev, channel));
250 		DEV_CHANNEL_DATA(dev, channel)->busy = false;
251 		LOG_INF("channel %d error status is 0x%x", channel, flag);
252 	}
253 #endif
254 }
255 
256 #if !DT_INST_PROP(0, no_error_irq)
dma_mcux_edma_error_irq_handler(const struct device * dev)257 static void dma_mcux_edma_error_irq_handler(const struct device *dev)
258 {
259 	int i = 0;
260 	uint32_t flag = 0;
261 	uint32_t hw_channel;
262 
263 	for (i = 0; i < DEV_CFG(dev)->dma_channels; i++) {
264 		if (DEV_CHANNEL_DATA(dev, i)->busy) {
265 			hw_channel = dma_mcux_edma_add_channel_gap(dev, i);
266 			flag = EDMA_GetChannelStatusFlags(DEV_BASE(dev), hw_channel);
267 			EDMA_ClearChannelStatusFlags(DEV_BASE(dev), hw_channel, 0xFFFFFFFF);
268 			EDMA_AbortTransfer(DEV_EDMA_HANDLE(dev, i));
269 			DEV_CHANNEL_DATA(dev, i)->busy = false;
270 			LOG_INF("channel %d error status is 0x%x", hw_channel, flag);
271 		}
272 	}
273 
274 #if defined(CONFIG_CPU_CORTEX_M4)
275 	barrier_dsync_fence_full();
276 #endif
277 }
278 #endif
279 
280 #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(channels_shared_irq_mask)
dma_mcux_edma_multi_channels_irq_handler(const struct device * dev,uint32_t idx,uint32_t * buf,uint32_t mask_width)281 static void dma_mcux_edma_multi_channels_irq_handler(const struct device *dev, uint32_t idx,
282 			uint32_t *buf, uint32_t mask_width)
283 {
284 	uint32_t *num = &buf[mask_width * idx];
285 	uint32_t count = 0;
286 
287 	for (int _i = 0; _i < mask_width; _i++) {
288 		uint32_t value = (*num);
289 
290 		while (value > 0) {
291 			if ((value & 0x1) == 1) {
292 				dma_mcux_edma_irq_handler(dev, count);
293 			}
294 			value = value >> 1;
295 			count++;
296 		}
297 		num++;
298 	}
299 }
300 #endif
301 
302 /* Configure a channel */
dma_mcux_edma_configure(const struct device * dev,uint32_t channel,struct dma_config * config)303 static int dma_mcux_edma_configure(const struct device *dev, uint32_t channel,
304 				   struct dma_config *config)
305 {
306 	/* Check for invalid parameters before dereferencing them. */
307 	if (NULL == dev || NULL == config) {
308 		return -EINVAL;
309 	}
310 
311 	edma_handle_t *p_handle = DEV_EDMA_HANDLE(dev, channel);
312 	struct call_back *data = DEV_CHANNEL_DATA(dev, channel);
313 	struct dma_block_config *block_config = config->head_block;
314 	uint32_t slot = config->dma_slot;
315 	uint32_t hw_channel;
316 	edma_transfer_type_t transfer_type;
317 	unsigned int key;
318 	int ret = 0;
319 	edma_tcd_t *tcd = NULL;
320 
321 	if (slot >= DEV_CFG(dev)->dma_requests) {
322 		LOG_ERR("source number is out of scope %d", slot);
323 		return -ENOTSUP;
324 	}
325 
326 	if (channel >= DEV_CFG(dev)->dma_channels) {
327 		LOG_ERR("out of DMA channel %d", channel);
328 		return -EINVAL;
329 	}
330 
331 	hw_channel = dma_mcux_edma_add_channel_gap(dev, channel);
332 #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT
333 	uint8_t dmamux_idx, dmamux_channel;
334 
335 	dmamux_idx = DEV_DMAMUX_IDX(dev, channel);
336 	dmamux_channel = DEV_DMAMUX_CHANNEL(dev, channel);
337 #endif
338 	data->transfer_settings.valid = false;
339 
340 	switch (config->channel_direction) {
341 	case MEMORY_TO_MEMORY:
342 		transfer_type = kEDMA_MemoryToMemory;
343 		break;
344 	case MEMORY_TO_PERIPHERAL:
345 		transfer_type = kEDMA_MemoryToPeripheral;
346 		break;
347 	case PERIPHERAL_TO_MEMORY:
348 		transfer_type = kEDMA_PeripheralToMemory;
349 		break;
350 	case PERIPHERAL_TO_PERIPHERAL:
351 		transfer_type = kEDMA_PeripheralToPeripheral;
352 		break;
353 	default:
354 		LOG_ERR("not support transfer direction");
355 		return -EINVAL;
356 	}
357 
358 	if (!data_size_valid(config->source_data_size)) {
359 		LOG_ERR("Source unit size error, %d", config->source_data_size);
360 		return -EINVAL;
361 	}
362 
363 	if (!data_size_valid(config->dest_data_size)) {
364 		LOG_ERR("Dest unit size error, %d", config->dest_data_size);
365 		return -EINVAL;
366 	}
367 
368 	if (block_config->source_gather_en || block_config->dest_scatter_en) {
369 		if (config->block_count > CONFIG_DMA_TCD_QUEUE_SIZE) {
370 			LOG_ERR("please config DMA_TCD_QUEUE_SIZE as %d", config->block_count);
371 			return -EINVAL;
372 		}
373 	}
374 
375 	data->transfer_settings.source_data_size = config->source_data_size;
376 	data->transfer_settings.dest_data_size = config->dest_data_size;
377 	data->transfer_settings.source_burst_length = config->source_burst_length;
378 	data->transfer_settings.dest_burst_length = config->dest_burst_length;
379 	data->transfer_settings.direction = config->channel_direction;
380 	data->transfer_settings.transfer_type = transfer_type;
381 	data->transfer_settings.valid = true;
382 	data->transfer_settings.cyclic = config->cyclic;
383 
384 	/* Lock and page in the channel configuration */
385 	key = irq_lock();
386 
387 #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT
388 
389 #if DT_INST_PROP(0, nxp_a_on)
390 	if (config->source_handshake || config->dest_handshake ||
391 	    transfer_type == kEDMA_MemoryToMemory) {
392 		/*software trigger make the channel always on*/
393 		LOG_DBG("ALWAYS ON");
394 		DMAMUX_EnableAlwaysOn(DEV_DMAMUX_BASE(dev, dmamux_idx), dmamux_channel, true);
395 	} else {
396 		DMAMUX_SetSource(DEV_DMAMUX_BASE(dev, dmamux_idx), dmamux_channel, slot);
397 	}
398 #else
399 	DMAMUX_SetSource(DEV_DMAMUX_BASE(dev, dmamux_idx), dmamux_channel, slot);
400 #endif
401 
402 	/* dam_imx_rt_set_channel_priority(dev, channel, config); */
403 	DMAMUX_EnableChannel(DEV_DMAMUX_BASE(dev, dmamux_idx), dmamux_channel);
404 
405 #endif
406 
407 	if (data->busy) {
408 		EDMA_AbortTransfer(p_handle);
409 	}
410 	EDMA_ResetChannel(DEV_BASE(dev), hw_channel);
411 	EDMA_CreateHandle(p_handle, DEV_BASE(dev), hw_channel);
412 	EDMA_SetCallback(p_handle, nxp_edma_callback, (void *)data);
413 
414 #if defined(FSL_FEATURE_EDMA_HAS_CHANNEL_MUX) && FSL_FEATURE_EDMA_HAS_CHANNEL_MUX
415 	/* First release any peripheral previously associated with this channel */
416 	EDMA_SetChannelMux(DEV_BASE(dev), hw_channel, 0);
417 	EDMA_SetChannelMux(DEV_BASE(dev), hw_channel, slot);
418 #endif
419 
420 	LOG_DBG("channel is %d", channel);
421 	EDMA_EnableChannelInterrupts(DEV_BASE(dev), hw_channel, kEDMA_ErrorInterruptEnable);
422 
423 	/* Initialize all TCD pool as 0*/
424 	for (int i = 0; i < CONFIG_DMA_TCD_QUEUE_SIZE; i++) {
425 		memset(&DEV_CFG(dev)->tcdpool[channel][i], 0,
426 		       sizeof(DEV_CFG(dev)->tcdpool[channel][i]));
427 	}
428 
429 	if (block_config->source_gather_en || block_config->dest_scatter_en) {
430 		if (config->cyclic) {
431 			/* Loop SG mode */
432 			data->transfer_settings.write_idx = 0;
433 			data->transfer_settings.empty_tcds = CONFIG_DMA_TCD_QUEUE_SIZE;
434 
435 			EDMA_PrepareTransfer(
436 				&data->transferConfig, (void *)block_config->source_address,
437 				config->source_data_size, (void *)block_config->dest_address,
438 				config->dest_data_size, config->source_burst_length,
439 				block_config->block_size, transfer_type);
440 
441 			/* Init all TCDs with the para in transfer config and link them. */
442 			for (int i = 0; i < CONFIG_DMA_TCD_QUEUE_SIZE; i++) {
443 				EDMA_TcdSetTransferConfig(
444 					&DEV_CFG(dev)->tcdpool[channel][i], &data->transferConfig,
445 					&DEV_CFG(dev)->tcdpool[channel][(i + 1) %
446 									CONFIG_DMA_TCD_QUEUE_SIZE]);
447 
448 				/* Enable Major loop interrupt.*/
449 				EDMA_TcdEnableInterrupts(&DEV_CFG(dev)->tcdpool[channel][i],
450 							 kEDMA_MajorInterruptEnable);
451 			}
452 
453 			/* Load valid transfer parameters */
454 			while (block_config != NULL && data->transfer_settings.empty_tcds > 0) {
455 				tcd = &(DEV_CFG(dev)->tcdpool[channel]
456 							     [data->transfer_settings.write_idx]);
457 
458 				EDMA_TCD_SADDR(tcd, kEDMA_EDMA4Flag) = block_config->source_address;
459 				EDMA_TCD_DADDR(tcd, kEDMA_EDMA4Flag) = block_config->dest_address;
460 				EDMA_TCD_BITER(tcd, kEDMA_EDMA4Flag) =
461 					block_config->block_size / config->source_data_size;
462 				EDMA_TCD_CITER(tcd, kEDMA_EDMA4Flag) =
463 					block_config->block_size / config->source_data_size;
464 				/*Enable auto stop for last transfer.*/
465 				if (block_config->next_block == NULL) {
466 					EDMA_TCD_CSR(tcd, kEDMA_EDMA4Flag) |= DMA_CSR_DREQ(1U);
467 				} else {
468 					EDMA_TCD_CSR(tcd, kEDMA_EDMA4Flag) &= ~DMA_CSR_DREQ(1U);
469 				}
470 
471 				data->transfer_settings.write_idx =
472 					(data->transfer_settings.write_idx + 1) %
473 					CONFIG_DMA_TCD_QUEUE_SIZE;
474 				data->transfer_settings.empty_tcds--;
475 				block_config = block_config->next_block;
476 			}
477 
478 			if (block_config != NULL && data->transfer_settings.empty_tcds == 0) {
479 				/* User input more blocks than TCD number, return error */
480 				LOG_ERR("Too much request blocks,increase TCD buffer size!");
481 				ret = -ENOBUFS;
482 			}
483 			/* Push the 1st TCD into HW */
484 			EDMA_InstallTCD(p_handle->base, hw_channel,
485 					&DEV_CFG(dev)->tcdpool[channel][0]);
486 
487 		} else {
488 			/* Dynamic Scatter Gather mode */
489 			EDMA_InstallTCDMemory(p_handle, DEV_CFG(dev)->tcdpool[channel],
490 					      CONFIG_DMA_TCD_QUEUE_SIZE);
491 
492 			while (block_config != NULL) {
493 				EDMA_PrepareTransfer(&(data->transferConfig),
494 						     (void *)block_config->source_address,
495 						     config->source_data_size,
496 						     (void *)block_config->dest_address,
497 						     config->dest_data_size,
498 						     config->source_burst_length,
499 						     block_config->block_size, transfer_type);
500 
501 				const status_t submit_status =
502 					EDMA_SubmitTransfer(p_handle, &(data->transferConfig));
503 				if (submit_status != kStatus_Success) {
504 					LOG_ERR("Error submitting EDMA Transfer: 0x%x",
505 						submit_status);
506 					ret = -EFAULT;
507 				}
508 				block_config = block_config->next_block;
509 			}
510 		}
511 	} else {
512 		/* block_count shall be 1 */
513 		LOG_DBG("block size is: %d", block_config->block_size);
514 		EDMA_PrepareTransfer(&(data->transferConfig),
515 				     (void *)block_config->source_address,
516 				     config->source_data_size,
517 				     (void *)block_config->dest_address,
518 				     config->dest_data_size,
519 				     config->source_burst_length,
520 				     block_config->block_size, transfer_type);
521 
522 		const status_t submit_status =
523 			EDMA_SubmitTransfer(p_handle, &(data->transferConfig));
524 		if (submit_status != kStatus_Success) {
525 			LOG_ERR("Error submitting EDMA Transfer: 0x%x", submit_status);
526 			ret = -EFAULT;
527 		}
528 
529 		LOG_DBG("DMA TCD CSR 0x%x", EDMA_HW_TCD_CSR(dev, hw_channel));
530 	}
531 
532 	if (config->dest_chaining_en) {
533 		LOG_DBG("link major channel %d", config->linked_channel);
534 		EDMA_SetChannelLink(DEV_BASE(dev), channel, kEDMA_MajorLink,
535 				    config->linked_channel);
536 	}
537 	if (config->source_chaining_en) {
538 		LOG_DBG("link minor channel %d", config->linked_channel);
539 		EDMA_SetChannelLink(DEV_BASE(dev), channel, kEDMA_MinorLink,
540 				    config->linked_channel);
541 	}
542 
543 	data->busy = false;
544 	if (config->dma_callback) {
545 		LOG_DBG("INSTALL call back on channel %d", channel);
546 		data->user_data = config->user_data;
547 		data->dma_callback = config->dma_callback;
548 		data->dev = dev;
549 	}
550 
551 	irq_unlock(key);
552 
553 	return ret;
554 }
555 
dma_mcux_edma_start(const struct device * dev,uint32_t channel)556 static int dma_mcux_edma_start(const struct device *dev, uint32_t channel)
557 {
558 	struct call_back *data = DEV_CHANNEL_DATA(dev, channel);
559 
560 	LOG_DBG("START TRANSFER");
561 
562 #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT
563 	uint8_t dmamux_idx = DEV_DMAMUX_IDX(dev, channel);
564 	uint8_t dmamux_channel = DEV_DMAMUX_CHANNEL(dev, channel);
565 
566 	LOG_DBG("DMAMUX CHCFG 0x%x", DEV_DMAMUX_BASE(dev, dmamux_idx)->CHCFG[dmamux_channel]);
567 #endif
568 
569 #if !defined(CONFIG_DMA_MCUX_EDMA_V3) && !defined(CONFIG_DMA_MCUX_EDMA_V4)
570 	LOG_DBG("DMA CR 0x%x", DEV_BASE(dev)->CR);
571 #endif
572 	data->busy = true;
573 	EDMA_StartTransfer(DEV_EDMA_HANDLE(dev, channel));
574 	return 0;
575 }
576 
dma_mcux_edma_stop(const struct device * dev,uint32_t channel)577 static int dma_mcux_edma_stop(const struct device *dev, uint32_t channel)
578 {
579 	struct dma_mcux_edma_data *data = DEV_DATA(dev);
580 	uint32_t hw_channel;
581 
582 	hw_channel = dma_mcux_edma_add_channel_gap(dev, channel);
583 
584 	data->data_cb[channel].transfer_settings.valid = false;
585 
586 	if (!data->data_cb[channel].busy) {
587 		return 0;
588 	}
589 
590 	EDMA_AbortTransfer(DEV_EDMA_HANDLE(dev, channel));
591 	EDMA_ClearChannelStatusFlags(DEV_BASE(dev), hw_channel,
592 				     kEDMA_DoneFlag | kEDMA_ErrorFlag |
593 				     kEDMA_InterruptFlag);
594 	EDMA_ResetChannel(DEV_BASE(dev), hw_channel);
595 	data->data_cb[channel].busy = false;
596 	return 0;
597 }
598 
dma_mcux_edma_suspend(const struct device * dev,uint32_t channel)599 static int dma_mcux_edma_suspend(const struct device *dev, uint32_t channel)
600 {
601 	struct call_back *data = DEV_CHANNEL_DATA(dev, channel);
602 
603 	if (!data->busy) {
604 		return -EINVAL;
605 	}
606 	EDMA_StopTransfer(DEV_EDMA_HANDLE(dev, channel));
607 	return 0;
608 }
609 
dma_mcux_edma_resume(const struct device * dev,uint32_t channel)610 static int dma_mcux_edma_resume(const struct device *dev, uint32_t channel)
611 {
612 	struct call_back *data = DEV_CHANNEL_DATA(dev, channel);
613 
614 	if (!data->busy) {
615 		return -EINVAL;
616 	}
617 	EDMA_StartTransfer(DEV_EDMA_HANDLE(dev, channel));
618 	return 0;
619 }
620 
dma_mcux_edma_update_hw_tcd(const struct device * dev,uint32_t channel,uint32_t src,uint32_t dst,size_t size)621 static void dma_mcux_edma_update_hw_tcd(const struct device *dev, uint32_t channel, uint32_t src,
622 					uint32_t dst, size_t size)
623 {
624 	EDMA_HW_TCD_SADDR(dev, channel) = src;
625 	EDMA_HW_TCD_DADDR(dev, channel) = dst;
626 	EDMA_HW_TCD_BITER(dev, channel) = size;
627 	EDMA_HW_TCD_CITER(dev, channel) = size;
628 	EDMA_HW_TCD_CSR(dev, channel) |= DMA_CSR_DREQ(1U);
629 }
630 
dma_mcux_edma_reload(const struct device * dev,uint32_t channel,uint32_t src,uint32_t dst,size_t size)631 static int dma_mcux_edma_reload(const struct device *dev, uint32_t channel,
632 				uint32_t src, uint32_t dst, size_t size)
633 {
634 	struct call_back *data = DEV_CHANNEL_DATA(dev, channel);
635 	edma_tcd_t *tcd = NULL;
636 	edma_tcd_t *pre_tcd = NULL;
637 	uint32_t hw_id, sw_id;
638 	uint8_t pre_idx;
639 
640 	/* Lock the channel configuration */
641 	const unsigned int key = irq_lock();
642 	int ret = 0;
643 
644 	if (!data->transfer_settings.valid) {
645 		LOG_ERR("Invalid EDMA settings on initial config. Configure DMA before reload.");
646 		ret = -EFAULT;
647 		goto cleanup;
648 	}
649 
650 	if (data->transfer_settings.cyclic) {
651 		if (data->transfer_settings.empty_tcds == 0) {
652 			LOG_ERR("TCD list is full in loop mode.");
653 			ret = -ENOBUFS;
654 			goto cleanup;
655 		}
656 
657 		/* Convert size into major loop count */
658 		size = size / data->transfer_settings.dest_data_size;
659 
660 		/* Previous TCD index in circular list */
661 		pre_idx = data->transfer_settings.write_idx - 1;
662 		if (pre_idx >= CONFIG_DMA_TCD_QUEUE_SIZE)
663 			pre_idx = CONFIG_DMA_TCD_QUEUE_SIZE - 1;
664 
665 		/* Configure a TCD for the transfer */
666 		tcd = &(DEV_CFG(dev)->tcdpool[channel][data->transfer_settings.write_idx]);
667 		pre_tcd = &(DEV_CFG(dev)->tcdpool[channel][pre_idx]);
668 
669 		EDMA_TCD_SADDR(tcd, kEDMA_EDMA4Flag) = src;
670 		EDMA_TCD_DADDR(tcd, kEDMA_EDMA4Flag) = dst;
671 		EDMA_TCD_BITER(tcd, kEDMA_EDMA4Flag) = size;
672 		EDMA_TCD_CITER(tcd, kEDMA_EDMA4Flag) = size;
673 		/* Enable automatically stop */
674 		EDMA_TCD_CSR(tcd, kEDMA_EDMA4Flag) |= DMA_CSR_DREQ(1U);
675 		sw_id = EDMA_TCD_DLAST_SGA(tcd, kEDMA_EDMA4Flag);
676 
677 		/* Block the peripheral's hardware request trigger to prevent
678 		 * starting the DMA before updating the TCDs.  Make sure the
679 		 * code between EDMA_DisableChannelRequest() and
680 		 * EDMA_EnableChannelRequest() is minimum.
681 		 */
682 		EDMA_DisableChannelRequest(DEV_BASE(dev), channel);
683 
684 		/* Wait for the DMA to be inactive before updating the TCDs.
685 		 * The CSR[ACTIVE] bit will deassert quickly after the EDMA's
686 		 * minor loop burst completes.
687 		 */
688 		while (EDMA_HW_TCD_CSR(dev, channel) & EDMA_HW_TCD_CH_ACTIVE_MASK) {
689 			;
690 		}
691 
692 		/* Identify the current active TCD.  Use DLAST_SGA as the HW ID */
693 		hw_id = EDMA_GetNextTCDAddress(DEV_EDMA_HANDLE(dev, channel));
694 		if (data->transfer_settings.empty_tcds >= CONFIG_DMA_TCD_QUEUE_SIZE ||
695 		    hw_id == sw_id) {
696 			/* All transfers have been done.DMA is stopped automatically,
697 			 * invalid TCD has been loaded into the HW, update HW.
698 			 */
699 			dma_mcux_edma_update_hw_tcd(dev, channel, src, dst, size);
700 			LOG_DBG("Transfer done,auto stop");
701 
702 		} else {
703 			/* Previous TCD can automatically start this TCD.
704 			 * Enable the peripheral DMA request in the previous TCD
705 			 */
706 			EDMA_TCD_CSR(pre_tcd, kEDMA_EDMA4Flag) &= ~DMA_CSR_DREQ(1U);
707 
708 			if (data->transfer_settings.empty_tcds == CONFIG_DMA_TCD_QUEUE_SIZE - 1 ||
709 			    hw_id == (uint32_t)tcd) {
710 				/* DMA is running on last transfer. HW has loaded the last one,
711 				 * we need ensure it's DREQ is cleared.
712 				 */
713 				EDMA_EnableAutoStopRequest(DEV_BASE(dev), channel, false);
714 				LOG_DBG("Last transfer.");
715 			}
716 			LOG_DBG("Manu stop");
717 		}
718 
719 #ifdef CONFIG_DMA_MCUX_EDMA
720 		/* It seems that there is HW issue which may cause ESG bit is cleared.
721 		 * This is a workaround. Clear the DONE bit before setting ESG bit.
722 		 */
723 		EDMA_ClearChannelStatusFlags(DEV_BASE(dev), channel, kEDMA_DoneFlag);
724 		EDMA_HW_TCD_CSR(dev, channel) |= DMA_CSR_ESG_MASK;
725 #elif (CONFIG_DMA_MCUX_EDMA_V3 || CONFIG_DMA_MCUX_EDMA_V4)
726 		/*We have not verified if this issue exist on V3/V4 HW, jut place a holder here. */
727 #endif
728 		/* TCDs are configured.  Resume DMA */
729 		EDMA_EnableChannelRequest(DEV_BASE(dev), channel);
730 
731 		/* Update the write index and available TCD numbers. */
732 		data->transfer_settings.write_idx =
733 			(data->transfer_settings.write_idx + 1) % CONFIG_DMA_TCD_QUEUE_SIZE;
734 		data->transfer_settings.empty_tcds--;
735 
736 		LOG_DBG("w_idx:%d no:%d(ch:%d)", data->transfer_settings.write_idx,
737 			data->transfer_settings.empty_tcds, channel);
738 
739 	} else {
740 		/* Dynamice Scatter/Gather mode:
741 		 * If the tcdPool is not in use (no s/g) then only a single TCD
742 		 * can be active at once.
743 		 */
744 		if (data->busy && data->edma_handle.tcdPool == NULL) {
745 			LOG_ERR("EDMA busy. Wait until the transfer completes before reloading.");
746 			ret = -EBUSY;
747 			goto cleanup;
748 		}
749 
750 		EDMA_PrepareTransfer(&(data->transferConfig), (void *)src,
751 				     data->transfer_settings.source_data_size, (void *)dst,
752 				     data->transfer_settings.dest_data_size,
753 				     data->transfer_settings.source_burst_length, size,
754 				     data->transfer_settings.transfer_type);
755 
756 		const status_t submit_status =
757 			EDMA_SubmitTransfer(DEV_EDMA_HANDLE(dev, channel), &(data->transferConfig));
758 
759 		if (submit_status != kStatus_Success) {
760 			LOG_ERR("Error submitting EDMA Transfer: 0x%x", submit_status);
761 			ret = -EFAULT;
762 		}
763 	}
764 
765 cleanup:
766 	irq_unlock(key);
767 	return ret;
768 }
769 
dma_mcux_edma_get_status(const struct device * dev,uint32_t channel,struct dma_status * status)770 static int dma_mcux_edma_get_status(const struct device *dev, uint32_t channel,
771 				    struct dma_status *status)
772 {
773 	uint32_t hw_channel = dma_mcux_edma_add_channel_gap(dev, channel);
774 
775 	if (DEV_CHANNEL_DATA(dev, channel)->busy) {
776 		status->busy = true;
777 		/* pending_length is in bytes.  Multiply remaining major loop
778 		 * count by NBYTES for each minor loop
779 		 */
780 		status->pending_length =
781 			EDMA_GetRemainingMajorLoopCount(DEV_BASE(dev), hw_channel) *
782 			DEV_CHANNEL_DATA(dev, channel)->transfer_settings.source_data_size;
783 	} else {
784 		status->busy = false;
785 		status->pending_length = 0;
786 	}
787 	status->dir = DEV_CHANNEL_DATA(dev, channel)->transfer_settings.direction;
788 
789 #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT
790 	uint8_t dmamux_idx = DEV_DMAMUX_IDX(dev, channel);
791 	uint8_t dmamux_channel = DEV_DMAMUX_CHANNEL(dev, channel);
792 
793 	LOG_DBG("DMAMUX CHCFG 0x%x", DEV_DMAMUX_BASE(dev, dmamux_idx)->CHCFG[dmamux_channel]);
794 #endif
795 
796 #if defined(CONFIG_DMA_MCUX_EDMA_V3) || defined(CONFIG_DMA_MCUX_EDMA_V4)
797 	LOG_DBG("DMA MP_CSR 0x%x",  DEV_BASE(dev)->MP_CSR);
798 	LOG_DBG("DMA MP_ES 0x%x",   DEV_BASE(dev)->MP_ES);
799 	LOG_DBG("DMA CHx_ES 0x%x",  DEV_BASE(dev)->CH[hw_channel].CH_ES);
800 	LOG_DBG("DMA CHx_CSR 0x%x", DEV_BASE(dev)->CH[hw_channel].CH_CSR);
801 	LOG_DBG("DMA CHx_ES 0x%x",  DEV_BASE(dev)->CH[hw_channel].CH_ES);
802 	LOG_DBG("DMA CHx_INT 0x%x", DEV_BASE(dev)->CH[hw_channel].CH_INT);
803 	LOG_DBG("DMA TCD_CSR 0x%x", DEV_BASE(dev)->CH[hw_channel].TCD_CSR);
804 #else
805 	LOG_DBG("DMA CR 0x%x", DEV_BASE(dev)->CR);
806 	LOG_DBG("DMA INT 0x%x", DEV_BASE(dev)->INT);
807 	LOG_DBG("DMA ERQ 0x%x", DEV_BASE(dev)->ERQ);
808 	LOG_DBG("DMA ES 0x%x", DEV_BASE(dev)->ES);
809 	LOG_DBG("DMA ERR 0x%x", DEV_BASE(dev)->ERR);
810 	LOG_DBG("DMA HRS 0x%x", DEV_BASE(dev)->HRS);
811 	LOG_DBG("data csr is 0x%x", DEV_BASE(dev)->TCD[hw_channel].CSR);
812 #endif
813 	return 0;
814 }
815 
dma_mcux_edma_channel_filter(const struct device * dev,int channel_id,void * param)816 static bool dma_mcux_edma_channel_filter(const struct device *dev,
817 					 int channel_id, void *param)
818 {
819 	enum dma_channel_filter *filter = (enum dma_channel_filter *)param;
820 
821 	if (filter && *filter == DMA_CHANNEL_PERIODIC) {
822 		if (channel_id > 3) {
823 			return false;
824 		}
825 	}
826 	return true;
827 }
828 
829 static DEVICE_API(dma, dma_mcux_edma_api) = {
830 	.reload = dma_mcux_edma_reload,
831 	.config = dma_mcux_edma_configure,
832 	.start = dma_mcux_edma_start,
833 	.stop = dma_mcux_edma_stop,
834 	.suspend = dma_mcux_edma_suspend,
835 	.resume = dma_mcux_edma_resume,
836 	.get_status = dma_mcux_edma_get_status,
837 	.chan_filter = dma_mcux_edma_channel_filter,
838 };
839 
dma_mcux_edma_init(const struct device * dev)840 static int dma_mcux_edma_init(const struct device *dev)
841 {
842 	const struct dma_mcux_edma_config *config = dev->config;
843 	struct dma_mcux_edma_data *data = dev->data;
844 
845 	edma_config_t userConfig = { 0 };
846 
847 	LOG_DBG("INIT NXP EDMA");
848 
849 #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT
850 	uint8_t i;
851 
852 	for (i = 0; i < config->dma_channels / config->channels_per_mux; i++) {
853 		DMAMUX_Init(DEV_DMAMUX_BASE(dev, i));
854 	}
855 #endif
856 
857 	EDMA_GetDefaultConfig(&userConfig);
858 	EDMA_Init(DEV_BASE(dev), &userConfig);
859 #ifdef CONFIG_DMA_MCUX_EDMA_V3
860 	/* Channel linking available and will be controlled by each channel's link settings */
861 	EDMA_EnableAllChannelLink(DEV_BASE(dev), true);
862 #endif
863 	config->irq_config_func(dev);
864 	data->dma_ctx.magic = DMA_MAGIC;
865 	data->dma_ctx.dma_channels = config->dma_channels;
866 	data->dma_ctx.atomic = data->channels_atomic;
867 	return 0;
868 }
869 
870 /* The shared error interrupt (if have) must be declared as the last element in devicetree */
871 #if !DT_INST_PROP(0, no_error_irq)
872 #define NUM_IRQS_WITHOUT_ERROR_IRQ(n)	UTIL_DEC(DT_NUM_IRQS(DT_DRV_INST(n)))
873 #else
874 #define NUM_IRQS_WITHOUT_ERROR_IRQ(n)	DT_NUM_IRQS(DT_DRV_INST(n))
875 #endif
876 
877 #define IRQ_CONFIG(n, idx, fn)							\
878 	{									\
879 		IRQ_CONNECT(DT_INST_IRQ_BY_IDX(n, idx, irq),			\
880 			    DT_INST_IRQ_BY_IDX(n, idx, priority),		\
881 			    fn,							\
882 			    DEVICE_DT_INST_GET(n), 0);				\
883 			    irq_enable(DT_INST_IRQ_BY_IDX(n, idx, irq));	\
884 	}
885 
886 #define EDMA_CHANNELS_MASK(n) static uint32_t edma_channel_mask_##n[] =  \
887 				DT_PROP(DT_DRV_INST(n), channels_shared_irq_mask);
888 
889 #define GET_EDMA_CHANNEL_SHARED_IRQ_MASK_WIDTH(n) \
890 			(DT_INST_PROP(n, dma_channels) / 32)
891 
892 #define EDMA_CHANNELS_SHARED_REGISTER_IN_IRQ(dev, idx, n) \
893 		dma_mcux_edma_multi_channels_irq_handler(dev, idx, edma_channel_mask_##n, \
894 			GET_EDMA_CHANNEL_SHARED_IRQ_MASK_WIDTH(n));
895 
896 #define DMA_MCUX_EDMA_IRQ_DEFINE(idx, n)					\
897 	static void dma_mcux_edma_##n##_irq_##idx(const struct device *dev)	\
898 	{									\
899 		COND_CODE_1(DT_INST_NODE_HAS_PROP(n, channels_shared_irq_mask), \
900 			(EDMA_CHANNELS_SHARED_REGISTER_IN_IRQ(dev, idx, n)),	\
901 			(dma_mcux_edma_irq_handler(dev, idx);))	\
902 										\
903 		IF_ENABLED(UTIL_BOOL(DT_INST_PROP(n, irq_shared_offset)),	\
904 			  (dma_mcux_edma_irq_handler(dev,			\
905 			   idx + DT_INST_PROP(n, irq_shared_offset));))		\
906 										\
907 		IF_ENABLED(CONFIG_CPU_CORTEX_M4, (barrier_dsync_fence_full();))	\
908 	}
909 
910 #define DMA_MCUX_EDMA_IRQ_CONFIG(idx, n)					\
911 	IRQ_CONFIG(n, idx, dma_mcux_edma_##n##_irq_##idx)
912 
913 #define DMA_MCUX_EDMA_CONFIG_FUNC(n)						\
914 	IF_ENABLED(DT_INST_NODE_HAS_PROP(n, channels_shared_irq_mask), \
915 				(EDMA_CHANNELS_MASK(n))) \
916 	LISTIFY(NUM_IRQS_WITHOUT_ERROR_IRQ(n), DMA_MCUX_EDMA_IRQ_DEFINE, (), n) \
917 	static void dma_imx_config_func_##n(const struct device *dev)		\
918 	{									\
919 		ARG_UNUSED(dev);						\
920 										\
921 		LISTIFY(NUM_IRQS_WITHOUT_ERROR_IRQ(n),				\
922 			DMA_MCUX_EDMA_IRQ_CONFIG, (;), n)			\
923 										\
924 		COND_CODE_1(DT_INST_PROP(n, no_error_irq), (),			\
925 			(IRQ_CONFIG(n, NUM_IRQS_WITHOUT_ERROR_IRQ(n),		\
926 			dma_mcux_edma_error_irq_handler)))			\
927 										\
928 		LOG_DBG("install irq done");					\
929 	}
930 
931 #if DMA_MCUX_HAS_CHANNEL_GAP
932 #define DMA_MCUX_EDMA_CHANNEL_GAP(n)						\
933 	.channel_gap = DT_INST_PROP_OR(n, channel_gap,				\
934 				{[0 ... 1] = DT_INST_PROP(n, dma_channels)}),
935 #else
936 #define DMA_MCUX_EDMA_CHANNEL_GAP(n)
937 #endif
938 
939 #if defined(FSL_FEATURE_SOC_DMAMUX_COUNT) && FSL_FEATURE_SOC_DMAMUX_COUNT
940 #define DMA_MCUX_EDMA_MUX(idx, n)						\
941 	(DMAMUX_Type *)DT_INST_REG_ADDR_BY_IDX(n, UTIL_INC(idx))
942 
943 #define DMAMUX_BASE_INIT_DEFINE(n)						\
944 	static DMAMUX_Type *dmamux_base_##n[] = {				\
945 		LISTIFY(UTIL_DEC(DT_NUM_REGS(DT_DRV_INST(n))),			\
946 			DMA_MCUX_EDMA_MUX, (,), n)				\
947 	};
948 
949 #define DMAMUX_BASE_INIT(n) .dmamux_base = &dmamux_base_##n[0],
950 #define CHANNELS_PER_MUX(n) .channels_per_mux = DT_INST_PROP(n, dma_channels) /	\
951 						ARRAY_SIZE(dmamux_base_##n),
952 
953 #else
954 #define DMAMUX_BASE_INIT_DEFINE(n)
955 #define DMAMUX_BASE_INIT(n)
956 #define CHANNELS_PER_MUX(n)
957 #endif
958 
959 /*
960  * define the dma
961  */
962 #define DMA_INIT(n)								\
963 	DMAMUX_BASE_INIT_DEFINE(n)						\
964 	static void dma_imx_config_func_##n(const struct device *dev);		\
965 	static __aligned(32) EDMA_TCDPOOL_CACHE_ATTR edma_tcd_t			\
966 	dma_tcdpool##n[DT_INST_PROP(n, dma_channels)][CONFIG_DMA_TCD_QUEUE_SIZE];\
967 	static const struct dma_mcux_edma_config dma_config_##n = {		\
968 		.base = (DMA_Type *)DT_INST_REG_ADDR(n),			\
969 		DMAMUX_BASE_INIT(n)						\
970 		.dma_requests = DT_INST_PROP(n, dma_requests),			\
971 		.dma_channels = DT_INST_PROP(n, dma_channels),			\
972 		CHANNELS_PER_MUX(n)						\
973 		.irq_config_func = dma_imx_config_func_##n,			\
974 		.dmamux_reg_offset = DT_INST_PROP(n, dmamux_reg_offset),	\
975 		DMA_MCUX_EDMA_CHANNEL_GAP(n)					\
976 		.tcdpool = dma_tcdpool##n,					\
977 	};									\
978 										\
979 	static struct call_back							\
980 		dma_data_callback_##n[DT_INST_PROP(n, dma_channels)];		\
981 	static ATOMIC_DEFINE(							\
982 		dma_channels_atomic_##n, DT_INST_PROP(n, dma_channels));	\
983 	static struct dma_mcux_edma_data dma_data_##n = {			\
984 		.data_cb = dma_data_callback_##n,				\
985 		.channels_atomic = dma_channels_atomic_##n,			\
986 	};									\
987 										\
988 	DEVICE_DT_INST_DEFINE(n,						\
989 			      &dma_mcux_edma_init, NULL,			\
990 			      &dma_data_##n, &dma_config_##n,			\
991 			      PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY,		\
992 			      &dma_mcux_edma_api);				\
993 										\
994 	DMA_MCUX_EDMA_CONFIG_FUNC(n);
995 
996 DT_INST_FOREACH_STATUS_OKAY(DMA_INIT)
997