1 /*
2  * Copyright (c) 2020 NXP Semiconductor INC.
3  * All rights reserved.
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 /**
9  * @brief Common part of DMA drivers for imx rt series.
10  */
11 
12 #include <errno.h>
13 #include <soc.h>
14 #include <init.h>
15 #include <kernel.h>
16 #include <devicetree.h>
17 #include <sys/atomic.h>
18 #include <drivers/dma.h>
19 #include <drivers/clock_control.h>
20 
21 #include "dma_mcux_edma.h"
22 
23 #include <logging/log.h>
24 
25 #define DT_DRV_COMPAT nxp_mcux_edma
26 
27 LOG_MODULE_REGISTER(dma_mcux_edma, CONFIG_DMA_LOG_LEVEL);
28 
29 struct dma_mcux_edma_config {
30 	DMA_Type *base;
31 	DMAMUX_Type *dmamux_base;
32 	int dma_channels; /* number of channels */
33 	void (*irq_config_func)(const struct device *dev);
34 };
35 
36 static __aligned(32) edma_tcd_t
37 	tcdpool[DT_INST_PROP(0, dma_channels)][CONFIG_DMA_TCD_QUEUE_SIZE];
38 
39 struct call_back {
40 	edma_transfer_config_t transferConfig;
41 	edma_handle_t edma_handle;
42 	const struct device *dev;
43 	void *user_data;
44 	dma_callback_t dma_callback;
45 	enum dma_channel_direction dir;
46 	bool busy;
47 };
48 
49 struct dma_mcux_edma_data {
50 	struct dma_context dma_ctx;
51 	struct call_back data_cb[DT_INST_PROP(0, dma_channels)];
52 	ATOMIC_DEFINE(channels_atomic, DT_INST_PROP(0, dma_channels));
53 	struct k_mutex dma_mutex;
54 };
55 
56 #define DEV_CFG(dev)                                                           \
57 	((const struct dma_mcux_edma_config *const)dev->config)
58 #define DEV_DATA(dev) ((struct dma_mcux_edma_data *)dev->data)
59 #define DEV_BASE(dev) ((DMA_Type *)DEV_CFG(dev)->base)
60 
61 #define DEV_DMAMUX_BASE(dev) ((DMAMUX_Type *)DEV_CFG(dev)->dmamux_base)
62 
63 #define DEV_CHANNEL_DATA(dev, ch)                                              \
64 	((struct call_back *)(&(DEV_DATA(dev)->data_cb[ch])))
65 
66 #define DEV_EDMA_HANDLE(dev, ch)                                               \
67 	((edma_handle_t *)(&(DEV_CHANNEL_DATA(dev, ch)->edma_handle)))
68 
nxp_edma_callback(edma_handle_t * handle,void * param,bool transferDone,uint32_t tcds)69 static void nxp_edma_callback(edma_handle_t *handle, void *param,
70 			      bool transferDone, uint32_t tcds)
71 {
72 	int ret = 1;
73 	struct call_back *data = (struct call_back *)param;
74 	uint32_t channel = handle->channel;
75 
76 	if (transferDone) {
77 		data->busy = false;
78 		ret = 0;
79 	}
80 	LOG_DBG("transfer %d", tcds);
81 	data->dma_callback(data->dev, data->user_data, channel, ret);
82 }
83 
channel_irq(edma_handle_t * handle)84 static void channel_irq(edma_handle_t *handle)
85 {
86 	bool transfer_done;
87 
88 	/* Clear EDMA interrupt flag */
89 	handle->base->CINT = handle->channel;
90 	/* Check if transfer is already finished. */
91 	transfer_done = ((handle->base->TCD[handle->channel].CSR &
92 			  DMA_CSR_DONE_MASK) != 0U);
93 
94 	if (handle->tcdPool == NULL) {
95 		(handle->callback)(handle, handle->userData, transfer_done, 0);
96 	} else {
97 		uint32_t sga = handle->base->TCD[handle->channel].DLAST_SGA;
98 		uint32_t sga_index;
99 		int32_t tcds_done;
100 		uint8_t new_header;
101 
102 		sga -= (uint32_t)handle->tcdPool;
103 		sga_index = sga / sizeof(edma_tcd_t);
104 		/* Adjust header positions. */
105 		if (transfer_done) {
106 			new_header = (uint8_t)sga_index;
107 		} else {
108 			new_header = sga_index != 0U ?
109 					     (uint8_t)sga_index - 1U :
110 					     (uint8_t)handle->tcdSize - 1U;
111 		}
112 		/* Calculate the number of finished TCDs */
113 		if (new_header == (uint8_t)handle->header) {
114 			int8_t tmpTcdUsed = handle->tcdUsed;
115 			int8_t tmpTcdSize = handle->tcdSize;
116 
117 			if (tmpTcdUsed == tmpTcdSize) {
118 				tcds_done = handle->tcdUsed;
119 			} else {
120 				tcds_done = 0;
121 			}
122 		} else {
123 			tcds_done = (uint32_t)new_header - (uint32_t)handle->header;
124 			if (tcds_done < 0) {
125 				tcds_done += handle->tcdSize;
126 			}
127 		}
128 
129 		handle->header = (int8_t)new_header;
130 		handle->tcdUsed -= (int8_t)tcds_done;
131 		/* Invoke callback function. */
132 		if (handle->callback != NULL) {
133 			(handle->callback)(handle, handle->userData,
134 					   transfer_done, tcds_done);
135 		}
136 
137 		if (transfer_done) {
138 			handle->base->CDNE = handle->channel;
139 		}
140 	}
141 }
142 
dma_mcux_edma_irq_handler(const struct device * dev)143 static void dma_mcux_edma_irq_handler(const struct device *dev)
144 {
145 	int i = 0;
146 
147 	LOG_DBG("IRQ CALLED");
148 	for (i = 0; i < DT_INST_PROP(0, dma_channels); i++) {
149 		uint32_t flag = EDMA_GetChannelStatusFlags(DEV_BASE(dev), i);
150 
151 		if ((flag & (uint32_t)kEDMA_InterruptFlag) != 0U) {
152 			LOG_DBG("IRQ OCCURRED");
153 			channel_irq(DEV_EDMA_HANDLE(dev, i));
154 			LOG_DBG("IRQ DONE");
155 #if defined __CORTEX_M && (__CORTEX_M == 4U)
156 			__DSB();
157 #endif
158 		}
159 	}
160 }
161 
dma_mcux_edma_error_irq_handler(const struct device * dev)162 static void dma_mcux_edma_error_irq_handler(const struct device *dev)
163 {
164 	int i = 0;
165 	uint32_t flag = 0;
166 
167 	for (i = 0; i < DT_INST_PROP(0, dma_channels); i++) {
168 		if (DEV_CHANNEL_DATA(dev, i)->busy) {
169 			flag = EDMA_GetChannelStatusFlags(DEV_BASE(dev), i);
170 			LOG_INF("channel %d error status is 0x%x", i, flag);
171 			EDMA_ClearChannelStatusFlags(DEV_BASE(dev), i,
172 						     0xFFFFFFFF);
173 			EDMA_AbortTransfer(DEV_EDMA_HANDLE(dev, i));
174 			DEV_CHANNEL_DATA(dev, i)->busy = false;
175 		}
176 	}
177 
178 #if defined __CORTEX_M && (__CORTEX_M == 4U)
179 	__DSB();
180 #endif
181 }
182 
183 /* Configure a channel */
dma_mcux_edma_configure(const struct device * dev,uint32_t channel,struct dma_config * config)184 static int dma_mcux_edma_configure(const struct device *dev, uint32_t channel,
185 				   struct dma_config *config)
186 {
187 	edma_handle_t *p_handle = DEV_EDMA_HANDLE(dev, channel);
188 	struct call_back *data = DEV_CHANNEL_DATA(dev, channel);
189 	struct dma_block_config *block_config = config->head_block;
190 	uint32_t slot = config->dma_slot;
191 	edma_transfer_type_t transfer_type;
192 	int key;
193 
194 	if (NULL == dev || NULL == config) {
195 		return -EINVAL;
196 	}
197 
198 	if (slot > DT_INST_PROP(0, dma_requests)) {
199 		LOG_ERR("source number is outof scope %d", slot);
200 		return -ENOTSUP;
201 	}
202 
203 	if (channel > DT_INST_PROP(0, dma_channels)) {
204 		LOG_ERR("out of DMA channel %d", channel);
205 		return -EINVAL;
206 	}
207 
208 	data->dir = config->channel_direction;
209 	switch (config->channel_direction) {
210 	case MEMORY_TO_MEMORY:
211 		transfer_type = kEDMA_MemoryToMemory;
212 		break;
213 	case MEMORY_TO_PERIPHERAL:
214 		transfer_type = kEDMA_MemoryToPeripheral;
215 		break;
216 	case PERIPHERAL_TO_MEMORY:
217 		transfer_type = kEDMA_PeripheralToMemory;
218 		break;
219 	case PERIPHERAL_TO_PERIPHERAL:
220 		transfer_type = kEDMA_PeripheralToPeripheral;
221 		break;
222 	default:
223 		LOG_ERR("not support transfer direction");
224 		return -EINVAL;
225 	}
226 
227 	/* Lock and page in the channel configuration */
228 	key = irq_lock();
229 
230 #if DT_INST_PROP(0, nxp_a_on)
231 	if (config->source_handshake || config->dest_handshake ||
232 	    transfer_type == kEDMA_MemoryToMemory) {
233 		/*software trigger make the channel always on*/
234 		LOG_DBG("ALWAYS ON");
235 		DMAMUX_EnableAlwaysOn(DEV_DMAMUX_BASE(dev), channel, true);
236 	} else {
237 		DMAMUX_SetSource(DEV_DMAMUX_BASE(dev), channel, slot);
238 	}
239 #else
240 	DMAMUX_SetSource(DEV_DMAMUX_BASE(dev), channel, slot);
241 #endif
242 
243 	/* dam_imx_rt_set_channel_priority(dev, channel, config); */
244 	DMAMUX_EnableChannel(DEV_DMAMUX_BASE(dev), channel);
245 
246 	if (data->busy) {
247 		EDMA_AbortTransfer(p_handle);
248 	}
249 	EDMA_ResetChannel(DEV_BASE(dev), channel);
250 	EDMA_CreateHandle(p_handle, DEV_BASE(dev), channel);
251 	EDMA_SetCallback(p_handle, nxp_edma_callback, (void *)data);
252 
253 	LOG_DBG("channel is %d", p_handle->channel);
254 
255 	if (config->source_data_size != 4U && config->source_data_size != 2U &&
256 	    config->source_data_size != 1U && config->source_data_size != 8U &&
257 	    config->source_data_size != 16U &&
258 	    config->source_data_size != 32U) {
259 		LOG_ERR("Source unit size error, %d", config->source_data_size);
260 		return -EINVAL;
261 	}
262 
263 	if (config->dest_data_size != 4U && config->dest_data_size != 2U &&
264 	    config->dest_data_size != 1U && config->dest_data_size != 8U &&
265 	    config->dest_data_size != 16U && config->dest_data_size != 32U) {
266 		LOG_ERR("Dest unit size error, %d", config->dest_data_size);
267 		return -EINVAL;
268 	}
269 
270 	EDMA_EnableChannelInterrupts(DEV_BASE(dev), channel,
271 				     kEDMA_ErrorInterruptEnable);
272 
273 	if (block_config->source_gather_en || block_config->dest_scatter_en) {
274 		if (config->block_count > CONFIG_DMA_TCD_QUEUE_SIZE) {
275 			LOG_ERR("please config DMA_TCD_QUEUE_SIZE as %d",
276 				config->block_count);
277 			return -EINVAL;
278 		}
279 		EDMA_InstallTCDMemory(p_handle, tcdpool[channel],
280 				      CONFIG_DMA_TCD_QUEUE_SIZE);
281 		while (block_config != NULL) {
282 			EDMA_PrepareTransfer(
283 				&(data->transferConfig),
284 				(void *)block_config->source_address,
285 				config->source_data_size,
286 				(void *)block_config->dest_address,
287 				config->dest_data_size,
288 				config->source_burst_length,
289 				block_config->block_size, transfer_type);
290 			EDMA_SubmitTransfer(p_handle, &(data->transferConfig));
291 			block_config = block_config->next_block;
292 		}
293 	} else {
294 		/* block_count shall be 1 */
295 		status_t ret;
296 		LOG_DBG("block size is: %d", block_config->block_size);
297 		EDMA_PrepareTransfer(&(data->transferConfig),
298 				     (void *)block_config->source_address,
299 				     config->source_data_size,
300 				     (void *)block_config->dest_address,
301 				     config->dest_data_size,
302 				     config->source_burst_length,
303 				     block_config->block_size, transfer_type);
304 
305 		ret = EDMA_SubmitTransfer(p_handle, &(data->transferConfig));
306 		edma_tcd_t *tcdRegs =
307 			(edma_tcd_t *)(uint32_t)&p_handle->base->TCD[channel];
308 		if (ret != kStatus_Success) {
309 			LOG_ERR("submit error 0x%x", ret);
310 		}
311 		LOG_DBG("data csr is 0x%x", tcdRegs->CSR);
312 	}
313 
314 	if (config->dest_chaining_en) {
315 		LOG_DBG("link major channel %d", config->linked_channel);
316 		EDMA_SetChannelLink(DEV_BASE(dev), channel, kEDMA_MajorLink,
317 				    config->linked_channel);
318 	}
319 	if (config->source_chaining_en) {
320 		LOG_DBG("link minor channel %d", config->linked_channel);
321 		EDMA_SetChannelLink(DEV_BASE(dev), channel, kEDMA_MinorLink,
322 				    config->linked_channel);
323 	}
324 
325 	data->busy = false;
326 	if (config->dma_callback) {
327 		LOG_DBG("INSTALL call back on channel %d", channel);
328 		data->user_data = config->user_data;
329 		data->dma_callback = config->dma_callback;
330 		data->dev = dev;
331 	}
332 
333 	irq_unlock(key);
334 
335 	return 0;
336 }
337 
dma_mcux_edma_start(const struct device * dev,uint32_t channel)338 static int dma_mcux_edma_start(const struct device *dev, uint32_t channel)
339 {
340 	struct call_back *data = DEV_CHANNEL_DATA(dev, channel);
341 
342 	LOG_DBG("START TRANSFER");
343 	LOG_DBG("DMAMUX CHCFG 0x%x", DEV_DMAMUX_BASE(dev)->CHCFG[channel]);
344 	LOG_DBG("DMA CR 0x%x", DEV_BASE(dev)->CR);
345 	data->busy = true;
346 	EDMA_StartTransfer(DEV_EDMA_HANDLE(dev, channel));
347 	return 0;
348 }
349 
dma_mcux_edma_stop(const struct device * dev,uint32_t channel)350 static int dma_mcux_edma_stop(const struct device *dev, uint32_t channel)
351 {
352 	struct dma_mcux_edma_data *data = DEV_DATA(dev);
353 
354 	if (!data->data_cb[channel].busy) {
355 		return 0;
356 	}
357 	EDMA_AbortTransfer(DEV_EDMA_HANDLE(dev, channel));
358 	EDMA_ClearChannelStatusFlags(DEV_BASE(dev), channel,
359 				     kEDMA_DoneFlag | kEDMA_ErrorFlag |
360 					     kEDMA_InterruptFlag);
361 	EDMA_ResetChannel(DEV_BASE(dev), channel);
362 	data->data_cb[channel].busy = false;
363 	return 0;
364 }
365 
dma_mcux_edma_reload(const struct device * dev,uint32_t channel,uint32_t src,uint32_t dst,size_t size)366 static int dma_mcux_edma_reload(const struct device *dev, uint32_t channel,
367 				uint32_t src, uint32_t dst, size_t size)
368 {
369 	struct call_back *data = DEV_CHANNEL_DATA(dev, channel);
370 
371 	if (data->busy) {
372 		EDMA_AbortTransfer(DEV_EDMA_HANDLE(dev, channel));
373 	}
374 	return 0;
375 }
376 
dma_mcux_edma_get_status(const struct device * dev,uint32_t channel,struct dma_status * status)377 static int dma_mcux_edma_get_status(const struct device *dev,
378 				    uint32_t channel,
379 				    struct dma_status *status)
380 {
381 	edma_tcd_t *tcdRegs;
382 
383 	if (DEV_CHANNEL_DATA(dev, channel)->busy) {
384 		status->busy = true;
385 		status->pending_length =
386 			EDMA_GetRemainingMajorLoopCount(DEV_BASE(dev), channel);
387 	} else {
388 		status->busy = false;
389 		status->pending_length = 0;
390 	}
391 	status->dir = DEV_CHANNEL_DATA(dev, channel)->dir;
392 	LOG_DBG("DMAMUX CHCFG 0x%x", DEV_DMAMUX_BASE(dev)->CHCFG[channel]);
393 	LOG_DBG("DMA CR 0x%x", DEV_BASE(dev)->CR);
394 	LOG_DBG("DMA INT 0x%x", DEV_BASE(dev)->INT);
395 	LOG_DBG("DMA ERQ 0x%x", DEV_BASE(dev)->ERQ);
396 	LOG_DBG("DMA ES 0x%x", DEV_BASE(dev)->ES);
397 	LOG_DBG("DMA ERR 0x%x", DEV_BASE(dev)->ERR);
398 	LOG_DBG("DMA HRS 0x%x", DEV_BASE(dev)->HRS);
399 	tcdRegs = (edma_tcd_t *)((uint32_t)&DEV_BASE(dev)->TCD[channel]);
400 	LOG_DBG("data csr is 0x%x", tcdRegs->CSR);
401 	return 0;
402 }
403 
dma_mcux_edma_channel_filter(const struct device * dev,int channel_id,void * param)404 static bool dma_mcux_edma_channel_filter(const struct device *dev,
405 				int channel_id, void *param)
406 {
407 	enum dma_channel_filter *filter = (enum dma_channel_filter *)param;
408 
409 	if (filter && *filter == DMA_CHANNEL_PERIODIC) {
410 		if (channel_id > 3) {
411 			return false;
412 		}
413 	}
414 	return true;
415 }
416 
417 static const struct dma_driver_api dma_mcux_edma_api = {
418 	.reload = dma_mcux_edma_reload,
419 	.config = dma_mcux_edma_configure,
420 	.start = dma_mcux_edma_start,
421 	.stop = dma_mcux_edma_stop,
422 	.get_status = dma_mcux_edma_get_status,
423 	.chan_filter = dma_mcux_edma_channel_filter,
424 };
425 
dma_mcux_edma_init(const struct device * dev)426 static int dma_mcux_edma_init(const struct device *dev)
427 {
428 	edma_config_t userConfig = { 0 };
429 
430 	LOG_DBG("INIT NXP EDMA");
431 	DMAMUX_Init(DEV_DMAMUX_BASE(dev));
432 	EDMA_GetDefaultConfig(&userConfig);
433 	EDMA_Init(DEV_BASE(dev), &userConfig);
434 	DEV_CFG(dev)->irq_config_func(dev);
435 	memset(DEV_DATA(dev), 0, sizeof(struct dma_mcux_edma_data));
436 	memset(tcdpool, 0, sizeof(tcdpool));
437 	k_mutex_init(&DEV_DATA(dev)->dma_mutex);
438 	DEV_DATA(dev)->dma_ctx.magic = DMA_MAGIC;
439 	DEV_DATA(dev)->dma_ctx.dma_channels = DEV_CFG(dev)->dma_channels;
440 	DEV_DATA(dev)->dma_ctx.atomic = DEV_DATA(dev)->channels_atomic;
441 	return 0;
442 }
443 
444 static void dma_imx_config_func_0(const struct device *dev);
445 
446 static const struct dma_mcux_edma_config dma_config_0 = {
447 	.base = (DMA_Type *)DT_INST_REG_ADDR(0),
448 	.dmamux_base = (DMAMUX_Type *)DT_INST_REG_ADDR_BY_IDX(0, 1),
449 	.dma_channels = DT_INST_PROP(0, dma_channels),
450 	.irq_config_func = dma_imx_config_func_0,
451 };
452 
453 struct dma_mcux_edma_data dma_data;
454 /*
455  * define the dma
456  */
457 DEVICE_DT_INST_DEFINE(0, &dma_mcux_edma_init, NULL,
458 		    &dma_data, &dma_config_0, POST_KERNEL,
459 		    CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &dma_mcux_edma_api);
460 
dma_imx_config_func_0(const struct device * dev)461 void dma_imx_config_func_0(const struct device *dev)
462 {
463 	ARG_UNUSED(dev);
464 
465 	/*install the dma error handle*/
466 
467 	IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 0, irq),
468 		    DT_INST_IRQ_BY_IDX(0, 0, priority),
469 		    dma_mcux_edma_irq_handler, DEVICE_DT_INST_GET(0), 0);
470 	irq_enable(DT_INST_IRQ_BY_IDX(0, 0, irq));
471 
472 	IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 1, irq),
473 		    DT_INST_IRQ_BY_IDX(0, 1, priority),
474 		    dma_mcux_edma_irq_handler, DEVICE_DT_INST_GET(0), 0);
475 	irq_enable(DT_INST_IRQ_BY_IDX(0, 1, irq));
476 
477 	IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 2, irq),
478 		    DT_INST_IRQ_BY_IDX(0, 2, priority),
479 		    dma_mcux_edma_irq_handler, DEVICE_DT_INST_GET(0), 0);
480 	irq_enable(DT_INST_IRQ_BY_IDX(0, 2, irq));
481 
482 	IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 3, irq),
483 		    DT_INST_IRQ_BY_IDX(0, 3, priority),
484 		    dma_mcux_edma_irq_handler, DEVICE_DT_INST_GET(0), 0);
485 	irq_enable(DT_INST_IRQ_BY_IDX(0, 3, irq));
486 
487 	IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 4, irq),
488 		    DT_INST_IRQ_BY_IDX(0, 4, priority),
489 		    dma_mcux_edma_irq_handler, DEVICE_DT_INST_GET(0), 0);
490 	irq_enable(DT_INST_IRQ_BY_IDX(0, 4, irq));
491 
492 	IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 5, irq),
493 		    DT_INST_IRQ_BY_IDX(0, 5, priority),
494 		    dma_mcux_edma_irq_handler, DEVICE_DT_INST_GET(0), 0);
495 	irq_enable(DT_INST_IRQ_BY_IDX(0, 5, irq));
496 
497 	IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 6, irq),
498 		    DT_INST_IRQ_BY_IDX(0, 6, priority),
499 		    dma_mcux_edma_irq_handler, DEVICE_DT_INST_GET(0), 0);
500 	irq_enable(DT_INST_IRQ_BY_IDX(0, 6, irq));
501 
502 	IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 7, irq),
503 		    DT_INST_IRQ_BY_IDX(0, 7, priority),
504 		    dma_mcux_edma_irq_handler, DEVICE_DT_INST_GET(0), 0);
505 	irq_enable(DT_INST_IRQ_BY_IDX(0, 7, irq));
506 
507 	IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 8, irq),
508 		    DT_INST_IRQ_BY_IDX(0, 8, priority),
509 		    dma_mcux_edma_irq_handler, DEVICE_DT_INST_GET(0), 0);
510 	irq_enable(DT_INST_IRQ_BY_IDX(0, 8, irq));
511 
512 	IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 9, irq),
513 		    DT_INST_IRQ_BY_IDX(0, 9, priority),
514 		    dma_mcux_edma_irq_handler, DEVICE_DT_INST_GET(0), 0);
515 	irq_enable(DT_INST_IRQ_BY_IDX(0, 9, irq));
516 
517 	IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 10, irq),
518 		    DT_INST_IRQ_BY_IDX(0, 10, priority),
519 		    dma_mcux_edma_irq_handler, DEVICE_DT_INST_GET(0), 0);
520 	irq_enable(DT_INST_IRQ_BY_IDX(0, 10, irq));
521 
522 	IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 11, irq),
523 		    DT_INST_IRQ_BY_IDX(0, 11, priority),
524 		    dma_mcux_edma_irq_handler, DEVICE_DT_INST_GET(0), 0);
525 	irq_enable(DT_INST_IRQ_BY_IDX(0, 11, irq));
526 
527 	IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 12, irq),
528 		    DT_INST_IRQ_BY_IDX(0, 12, priority),
529 		    dma_mcux_edma_irq_handler, DEVICE_DT_INST_GET(0), 0);
530 	irq_enable(DT_INST_IRQ_BY_IDX(0, 12, irq));
531 
532 	IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 13, irq),
533 		    DT_INST_IRQ_BY_IDX(0, 13, priority),
534 		    dma_mcux_edma_irq_handler, DEVICE_DT_INST_GET(0), 0);
535 	irq_enable(DT_INST_IRQ_BY_IDX(0, 13, irq));
536 
537 	IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 14, irq),
538 		    DT_INST_IRQ_BY_IDX(0, 14, priority),
539 		    dma_mcux_edma_irq_handler, DEVICE_DT_INST_GET(0), 0);
540 	irq_enable(DT_INST_IRQ_BY_IDX(0, 14, irq));
541 
542 	IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 15, irq),
543 		    DT_INST_IRQ_BY_IDX(0, 15, priority),
544 		    dma_mcux_edma_irq_handler, DEVICE_DT_INST_GET(0), 0);
545 	irq_enable(DT_INST_IRQ_BY_IDX(0, 15, irq));
546 
547 	IRQ_CONNECT(DT_INST_IRQ_BY_IDX(0, 16, irq),
548 		    DT_INST_IRQ_BY_IDX(0, 16, priority),
549 		    dma_mcux_edma_error_irq_handler,
550 		    DEVICE_DT_INST_GET(0), 0);
551 	irq_enable(DT_INST_IRQ_BY_IDX(0, 16, irq));
552 
553 	LOG_DBG("install irq done");
554 }
555