1 /*
2  * Copyright 2024 NXP
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #ifndef ZEPHYR_DRIVERS_DMA_DMA_NXP_EDMA_H_
8 #define ZEPHYR_DRIVERS_DMA_DMA_NXP_EDMA_H_
9 
10 #include <zephyr/device.h>
11 #include <zephyr/irq.h>
12 #include <zephyr/drivers/dma.h>
13 #include <zephyr/logging/log.h>
14 #include <zephyr/pm/device_runtime.h>
15 #include <zephyr/pm/device.h>
16 
17 #include "fsl_edma_soc_rev2.h"
18 
19 LOG_MODULE_REGISTER(nxp_edma);
20 
21 /* used for driver binding */
22 #define DT_DRV_COMPAT nxp_edma
23 
24 /* workaround the fact that device_map() is not defined for SoCs with no MMU */
25 #ifndef DEVICE_MMIO_IS_IN_RAM
26 #define device_map(virt, phys, size, flags) *(virt) = (phys)
27 #endif /* DEVICE_MMIO_IS_IN_RAM */
28 
29 /* macros used to parse DTS properties */
30 
31 /* used in conjunction with LISTIFY which expects F to also take a variable
32  * number of arguments. Since IDENTITY doesn't do that we need to use a version
33  * of it which also takes a variable number of arguments.
34  */
35 #define IDENTITY_VARGS(V, ...) IDENTITY(V)
36 
37 /* used to generate an array of indexes for the channels */
38 #define _EDMA_CHANNEL_INDEX_ARRAY(inst)\
39 	LISTIFY(DT_INST_PROP_LEN_OR(inst, valid_channels, 0), IDENTITY_VARGS, (,))
40 
41 /* used to generate an array of indexes for the channels - this is different
42  * from _EDMA_CHANNEL_INDEX_ARRAY because the number of channels is passed
43  * explicitly through dma-channels so no need to deduce it from the length
44  * of the valid-channels property.
45  */
46 #define _EDMA_CHANNEL_INDEX_ARRAY_EXPLICIT(inst)\
47 	LISTIFY(DT_INST_PROP_OR(inst, dma_channels, 0), IDENTITY_VARGS, (,))
48 
49 /* used to generate an array of indexes for the interrupt */
50 #define _EDMA_INT_INDEX_ARRAY(inst)\
51 	LISTIFY(DT_NUM_IRQS(DT_INST(inst, DT_DRV_COMPAT)), IDENTITY_VARGS, (,))
52 
53 /* used to register an ISR/arg pair. TODO: should we also use the priority? */
54 #define _EDMA_INT_CONNECT(idx, inst)				\
55 	IRQ_CONNECT(DT_INST_IRQN_BY_IDX(inst, idx),		\
56 		    0, edma_isr,				\
57 		    &channels_##inst[idx], 0)
58 
59 #define _EDMA_CHANNEL_PD_DEVICE_OR_NULL(idx, inst)						\
60 	COND_CODE_1(CONFIG_PM_DEVICE_POWER_DOMAIN,						\
61 		    (DEVICE_DT_GET_OR_NULL(DT_INST_PHANDLE_BY_IDX(inst, power_domains, idx))),	\
62 		    (NULL))
63 
64 /* used to declare a struct edma_channel by the non-explicit macro suite */
65 #define _EDMA_CHANNEL_DECLARE(idx, inst)				\
66 {									\
67 	.id = DT_INST_PROP_BY_IDX(inst, valid_channels, idx),		\
68 	.dev = DEVICE_DT_INST_GET(inst),				\
69 	.irq = DT_INST_IRQN_BY_IDX(inst, idx),				\
70 	.pd_dev = _EDMA_CHANNEL_PD_DEVICE_OR_NULL(idx, inst),		\
71 }
72 
73 /* used to declare a struct edma_channel by the explicit macro suite */
74 #define _EDMA_CHANNEL_DECLARE_EXPLICIT(idx, inst)			\
75 {									\
76 	.id = idx,							\
77 	.dev = DEVICE_DT_INST_GET(inst),				\
78 	.irq = DT_INST_IRQN_BY_IDX(inst, idx),				\
79 	.pd_dev = _EDMA_CHANNEL_PD_DEVICE_OR_NULL(idx, inst),		\
80 }
81 
82 /* used to create an array of channel IDs via the valid-channels property */
83 #define _EDMA_CHANNEL_ARRAY(inst)					\
84 	{ FOR_EACH_FIXED_ARG(_EDMA_CHANNEL_DECLARE, (,),		\
85 			     inst, _EDMA_CHANNEL_INDEX_ARRAY(inst)) }
86 
87 /* used to create an array of channel IDs via the dma-channels property */
88 #define _EDMA_CHANNEL_ARRAY_EXPLICIT(inst)				\
89 	{ FOR_EACH_FIXED_ARG(_EDMA_CHANNEL_DECLARE_EXPLICIT, (,), inst,	\
90 			     _EDMA_CHANNEL_INDEX_ARRAY_EXPLICIT(inst)) }
91 
92 /* used to construct the channel array based on the specified property:
93  * dma-channels or valid-channels.
94  */
95 #define EDMA_CHANNEL_ARRAY_GET(inst)							\
96 	COND_CODE_1(DT_NODE_HAS_PROP(DT_INST(inst, DT_DRV_COMPAT), dma_channels),	\
97 		    (_EDMA_CHANNEL_ARRAY_EXPLICIT(inst)),				\
98 		    (_EDMA_CHANNEL_ARRAY(inst)))
99 
100 /* used to register edma_isr for all specified interrupts */
101 #define EDMA_CONNECT_INTERRUPTS(inst)				\
102 	FOR_EACH_FIXED_ARG(_EDMA_INT_CONNECT, (;),		\
103 			   inst, _EDMA_INT_INDEX_ARRAY(inst))
104 
105 #define EDMA_CHANS_ARE_CONTIGUOUS(inst)\
106 	DT_NODE_HAS_PROP(DT_INST(inst, DT_DRV_COMPAT), dma_channels)
107 
108 /* utility macros */
109 
110 /* a few words about EDMA_CHAN_PRODUCE_CONSUME_{A/B}:
111  *	- in the context of cyclic buffers we introduce
112  *	the concepts of consumer and producer channels.
113  *
114  *	- a consumer channel is a channel for which the
115  *	DMA copies data from a buffer, thus leading to
116  *	less data in said buffer (data is consumed with
117  *	each transfer).
118  *
119  *	- a producer channel is a channel for which the
120  *	DMA copies data into a buffer, thus leading to
121  *	more data in said buffer (data is produced with
122  *	each transfer).
123  *
124  *	- for consumer channels, each DMA interrupt will
125  *	signal that an amount of data has been consumed
126  *	from the buffer (half of the buffer size if
127  *	HALFMAJOR is enabled, the whole buffer otherwise).
128  *
129  *	- for producer channels, each DMA interrupt will
130  *	signal that an amount of data has been added
131  *	to the buffer.
132  *
133  *	- to signal this, the ISR uses EDMA_CHAN_PRODUCE_CONSUME_A
134  *	which will "consume" data from the buffer for
135  *	consumer channels and "produce" data for
136  *	producer channels.
137  *
138  *	- since the upper layers using this driver need
139  *	to let the EDMA driver know whenever they've produced
140  *	(in the case of consumer channels) or consumed
141  *	data (in the case of producer channels) they can
142  *	do so through the reload() function.
143  *
144  *	- reload() uses EDMA_CHAN_PRODUCE_CONSUME_B which
145  *	for consumer channels will "produce" data and
146  *	"consume" data for producer channels, thus letting
147  *	the driver know what action the upper layer has
148  *	performed (if the channel is a consumer it's only
149  *	natural that the upper layer will write/produce more
150  *	data to the buffer. The same rationale applies to
151  *	producer channels).
152  *
153  *	- EDMA_CHAN_PRODUCE_CONSUME_B is just the opposite
154  *	of EDMA_CHAN_PRODUCE_CONSUME_A. If one produces
155  *	data, the other will consume and vice-versa.
156  *
157  *	- all of this information is valid only in the
158  *	context of cyclic buffers. If this behaviour is
159  *	not enabled, querying the status will simply
160  *	resolve to querying CITER and BITER.
161  */
162 #define EDMA_CHAN_PRODUCE_CONSUME_A(chan, size)\
163 	((chan)->type == CHAN_TYPE_CONSUMER ?\
164 	 edma_chan_cyclic_consume(chan, size) :\
165 	 edma_chan_cyclic_produce(chan, size))
166 
167 #define EDMA_CHAN_PRODUCE_CONSUME_B(chan, size)\
168 	((chan)->type == CHAN_TYPE_CONSUMER ?\
169 	 edma_chan_cyclic_produce(chan, size) :\
170 	 edma_chan_cyclic_consume(chan, size))
171 
172 #define EDMA_CHAN_IS_ACTIVE(data, chan)\
173 	(EDMA_ChannelRegRead((data)->hal_cfg, (chan)->id, EDMA_TCD_CH_CSR) &\
174 	 EDMA_TCD_CH_CSR_ACTIVE_MASK)
175 
176 enum channel_type {
177 	CHAN_TYPE_CONSUMER = 0,
178 	CHAN_TYPE_PRODUCER,
179 };
180 
181 enum channel_state {
182 	CHAN_STATE_INIT = 0,
183 	CHAN_STATE_CONFIGURED,
184 	CHAN_STATE_STARTED,
185 	CHAN_STATE_STOPPED,
186 	CHAN_STATE_SUSPENDED,
187 	CHAN_STATE_RELEASING,
188 };
189 
190 struct edma_channel {
191 	/* channel ID, needs to be the same as the hardware channel ID */
192 	uint32_t id;
193 	/* pointer to device representing the EDMA instance, used by edma_isr */
194 	const struct device *dev;
195 	/* channel power domain device */
196 	const struct device *pd_dev;
197 	/* current state of the channel */
198 	enum channel_state state;
199 	/* type of the channel (PRODUCER/CONSUMER) - only applicable to cyclic
200 	 * buffer configurations.
201 	 */
202 	enum channel_type type;
203 	/* argument passed to the user-defined DMA callback */
204 	void *arg;
205 	/* user-defined callback, called at the end of a channel's interrupt
206 	 * handling.
207 	 */
208 	dma_callback_t cb;
209 	/* INTID associated with the channel */
210 	int irq;
211 	/* the channel's status */
212 	struct dma_status stat;
213 	/* cyclic buffer size - currently, this is set to head_block's size */
214 	uint32_t bsize;
215 	/* set to true if the channel uses a cyclic buffer configuration */
216 	bool cyclic_buffer;
217 };
218 
219 struct edma_data {
220 	/* this needs to be the first member */
221 	struct dma_context ctx;
222 	mm_reg_t regmap;
223 	struct edma_channel *channels;
224 	atomic_t channel_flags;
225 	edma_config_t *hal_cfg;
226 };
227 
228 struct edma_config {
229 	uint32_t regmap_phys;
230 	uint32_t regmap_size;
231 	void (*irq_config)(void);
232 	/* true if channels are contiguous. The channels may not be contiguous
233 	 * if the valid-channels property is used instead of dma-channels. This
234 	 * is used to improve the time complexity of the channel lookup
235 	 * function.
236 	 */
237 	bool contiguous_channels;
238 };
239 
channel_allows_transition(struct edma_channel * chan,enum channel_state next)240 static inline bool channel_allows_transition(struct edma_channel *chan,
241 					     enum channel_state next)
242 {
243 	enum channel_state prev = chan->state;
244 
245 	/* validate transition */
246 	switch (prev) {
247 	case CHAN_STATE_INIT:
248 		if (next != CHAN_STATE_CONFIGURED) {
249 			return false;
250 		}
251 		break;
252 	case CHAN_STATE_CONFIGURED:
253 		if (next != CHAN_STATE_STARTED &&
254 		    next != CHAN_STATE_CONFIGURED &&
255 		    next != CHAN_STATE_RELEASING) {
256 			return false;
257 		}
258 		break;
259 	case CHAN_STATE_STARTED:
260 		if (next != CHAN_STATE_STOPPED &&
261 		    next != CHAN_STATE_SUSPENDED) {
262 			return false;
263 		}
264 		break;
265 	case CHAN_STATE_STOPPED:
266 		if (next != CHAN_STATE_CONFIGURED &&
267 		    next != CHAN_STATE_RELEASING) {
268 			return false;
269 		}
270 		break;
271 	case CHAN_STATE_SUSPENDED:
272 		if (next != CHAN_STATE_STARTED &&
273 		    next != CHAN_STATE_STOPPED) {
274 			return false;
275 		}
276 		break;
277 	default:
278 		LOG_ERR("invalid channel previous state: %d", prev);
279 		return false;
280 	}
281 
282 	return true;
283 }
284 
get_transfer_type(enum dma_channel_direction dir,uint32_t * type)285 static inline int get_transfer_type(enum dma_channel_direction dir, uint32_t *type)
286 {
287 	switch (dir) {
288 	case MEMORY_TO_MEMORY:
289 		*type = kEDMA_TransferTypeM2M;
290 		break;
291 	case MEMORY_TO_PERIPHERAL:
292 		*type = kEDMA_TransferTypeM2P;
293 		break;
294 	case PERIPHERAL_TO_MEMORY:
295 		*type = kEDMA_TransferTypeP2M;
296 		break;
297 	default:
298 		LOG_ERR("invalid channel direction: %d", dir);
299 		return -EINVAL;
300 	}
301 
302 	return 0;
303 }
304 
data_size_is_valid(uint16_t size)305 static inline bool data_size_is_valid(uint16_t size)
306 {
307 	switch (size) {
308 	case 1:
309 	case 2:
310 	case 4:
311 	case 8:
312 	case 16:
313 	case 32:
314 	case 64:
315 		break;
316 	default:
317 		return false;
318 	}
319 
320 	return true;
321 }
322 
323 /* TODO: we may require setting the channel type through DTS
324  * or through struct dma_config. For now, we'll only support
325  * MEMORY_TO_PERIPHERAL and PERIPHERAL_TO_MEMORY directions
326  * and assume that these are bound to a certain channel type.
327  */
edma_set_channel_type(struct edma_channel * chan,enum dma_channel_direction dir)328 static inline int edma_set_channel_type(struct edma_channel *chan,
329 					enum dma_channel_direction dir)
330 {
331 	switch (dir) {
332 	case MEMORY_TO_PERIPHERAL:
333 		chan->type = CHAN_TYPE_CONSUMER;
334 		break;
335 	case PERIPHERAL_TO_MEMORY:
336 		chan->type = CHAN_TYPE_PRODUCER;
337 		break;
338 	default:
339 		LOG_ERR("unsupported transfer direction: %d", dir);
340 		return -ENOTSUP;
341 	}
342 
343 	return 0;
344 }
345 
346 /* this function is used in cyclic buffer configurations. What it does
347  * is it updates the channel's read position based on the number of
348  * bytes requested. If the number of bytes that's being read is higher
349  * than the number of bytes available in the buffer (pending_length)
350  * this will lead to an error. The main point of this check is to
351  * provide a way for the user to determine if data is consumed at a
352  * higher rate than it is being produced.
353  *
354  * This function is used in edma_isr() for CONSUMER channels to mark
355  * that data has been consumed (i.e: data has been transferred to the
356  * destination) (this is done via EDMA_CHAN_PRODUCE_CONSUME_A that's
357  * called in edma_isr()). For producer channels, this function is used
358  * in edma_reload() to mark the fact that the user of the EDMA driver
359  * has consumed data.
360  */
edma_chan_cyclic_consume(struct edma_channel * chan,uint32_t bytes)361 static inline int edma_chan_cyclic_consume(struct edma_channel *chan,
362 					   uint32_t bytes)
363 {
364 	if (bytes > chan->stat.pending_length) {
365 		return -EINVAL;
366 	}
367 
368 	chan->stat.read_position =
369 		(chan->stat.read_position + bytes) % chan->bsize;
370 
371 	if (chan->stat.read_position > chan->stat.write_position) {
372 		chan->stat.free = chan->stat.read_position -
373 			chan->stat.write_position;
374 	} else if (chan->stat.read_position == chan->stat.write_position) {
375 		chan->stat.free = chan->bsize;
376 	} else {
377 		chan->stat.free = chan->bsize -
378 			(chan->stat.write_position - chan->stat.read_position);
379 	}
380 
381 	chan->stat.pending_length = chan->bsize - chan->stat.free;
382 
383 	return 0;
384 }
385 
386 /* this function is used in cyclic buffer configurations. What it does
387  * is it updates the channel's write position based on the number of
388  * bytes requested. If the number of bytes that's being written is higher
389  * than the number of free bytes in the buffer this will lead to an error.
390  * The main point of this check is to provide a way for the user to determine
391  * if data is produced at a higher rate than it is being consumed.
392  *
393  * This function is used in edma_isr() for PRODUCER channels to mark
394  * that data has been produced (i.e: data has been transferred to the
395  * destination) (this is done via EDMA_CHAN_PRODUCE_CONSUME_A that's
396  * called in edma_isr()). For consumer channels, this function is used
397  * in edma_reload() to mark the fact that the user of the EDMA driver
398  * has produced data.
399  */
edma_chan_cyclic_produce(struct edma_channel * chan,uint32_t bytes)400 static inline int edma_chan_cyclic_produce(struct edma_channel *chan,
401 					   uint32_t bytes)
402 {
403 	if (bytes > chan->stat.free) {
404 		return -EINVAL;
405 	}
406 
407 	chan->stat.write_position =
408 		(chan->stat.write_position + bytes) % chan->bsize;
409 
410 	if (chan->stat.write_position > chan->stat.read_position) {
411 		chan->stat.pending_length = chan->stat.write_position -
412 			chan->stat.read_position;
413 	} else if (chan->stat.write_position == chan->stat.read_position) {
414 		chan->stat.pending_length = chan->bsize;
415 	} else {
416 		chan->stat.pending_length = chan->bsize -
417 			(chan->stat.read_position - chan->stat.write_position);
418 	}
419 
420 	chan->stat.free = chan->bsize - chan->stat.pending_length;
421 
422 	return 0;
423 }
424 
edma_dump_channel_registers(struct edma_data * data,uint32_t chan_id)425 static inline void edma_dump_channel_registers(struct edma_data *data,
426 					       uint32_t chan_id)
427 {
428 	uint32_t mux_reg;
429 
430 	LOG_DBG("dumping channel data for channel %d", chan_id);
431 
432 	LOG_DBG("CH_CSR: 0x%x",
433 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_CSR));
434 	LOG_DBG("CH_ES: 0x%x",
435 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_ES));
436 	LOG_DBG("CH_INT: 0x%x",
437 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_INT));
438 	LOG_DBG("CH_SBR: 0x%x",
439 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_SBR));
440 	LOG_DBG("CH_PRI: 0x%x",
441 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_PRI));
442 
443 	if (EDMA_HAS_MUX(data->hal_cfg)) {
444 		if (data->hal_cfg->flags & EDMA_HAS_MP_MUX_FLAG) {
445 			mux_reg = EDMA_MP_CH_MUX;
446 		} else {
447 			mux_reg = EDMA_TCD_CH_MUX;
448 		}
449 
450 		LOG_DBG("CH_MUX: 0x%x", EDMA_ChannelRegRead(data->hal_cfg, chan_id, mux_reg));
451 	}
452 
453 	LOG_DBG("TCD_SADDR: 0x%x",
454 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_SADDR));
455 	LOG_DBG("TCD_SOFF: 0x%x",
456 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_SOFF));
457 	LOG_DBG("TCD_ATTR: 0x%x",
458 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_ATTR));
459 	LOG_DBG("TCD_NBYTES: 0x%x",
460 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_NBYTES));
461 	LOG_DBG("TCD_SLAST_SDA: 0x%x",
462 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_SLAST_SDA));
463 	LOG_DBG("TCD_DADDR: 0x%x",
464 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_DADDR));
465 	LOG_DBG("TCD_DOFF: 0x%x",
466 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_DOFF));
467 	LOG_DBG("TCD_CITER: 0x%x",
468 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CITER));
469 	LOG_DBG("TCD_DLAST_SGA: 0x%x",
470 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_DLAST_SGA));
471 	LOG_DBG("TCD_CSR: 0x%x",
472 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CSR));
473 	LOG_DBG("TCD_BITER: 0x%x",
474 		EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_BITER));
475 }
476 
set_slast_dlast(struct dma_config * dma_cfg,uint32_t transfer_type,struct edma_data * data,uint32_t chan_id)477 static inline int set_slast_dlast(struct dma_config *dma_cfg,
478 				  uint32_t transfer_type,
479 				  struct edma_data *data,
480 				  uint32_t chan_id)
481 {
482 	int32_t slast, dlast;
483 
484 	if (transfer_type == kEDMA_TransferTypeP2M) {
485 		slast = 0;
486 	} else {
487 		switch (dma_cfg->head_block->source_addr_adj) {
488 		case DMA_ADDR_ADJ_INCREMENT:
489 			slast = (int32_t)dma_cfg->head_block->block_size;
490 			break;
491 		case DMA_ADDR_ADJ_DECREMENT:
492 			slast = (-1) * (int32_t)dma_cfg->head_block->block_size;
493 			break;
494 		default:
495 			LOG_ERR("unsupported SADDR adjustment: %d",
496 				dma_cfg->head_block->source_addr_adj);
497 			return -EINVAL;
498 		}
499 	}
500 
501 	if (transfer_type == kEDMA_TransferTypeM2P) {
502 		dlast = 0;
503 	} else {
504 		switch (dma_cfg->head_block->dest_addr_adj) {
505 		case DMA_ADDR_ADJ_INCREMENT:
506 			dlast = (int32_t)dma_cfg->head_block->block_size;
507 			break;
508 		case DMA_ADDR_ADJ_DECREMENT:
509 			dlast = (-1) * (int32_t)dma_cfg->head_block->block_size;
510 			break;
511 		default:
512 			LOG_ERR("unsupported DADDR adjustment: %d",
513 				dma_cfg->head_block->dest_addr_adj);
514 			return -EINVAL;
515 		}
516 	}
517 
518 	LOG_DBG("attempting to commit SLAST %d", slast);
519 	LOG_DBG("attempting to commit DLAST %d", dlast);
520 
521 	/* commit configuration */
522 	EDMA_ChannelRegWrite(data->hal_cfg, chan_id, EDMA_TCD_SLAST_SDA, slast);
523 	EDMA_ChannelRegWrite(data->hal_cfg, chan_id, EDMA_TCD_DLAST_SGA, dlast);
524 
525 	if (data->hal_cfg->flags & EDMA_HAS_64BIT_TCD_FLAG) {
526 		EDMA_ChannelRegWrite(data->hal_cfg, chan_id, EDMA_TCD_SLAST_SDA_HIGH,
527 				     slast >= 0x0 ? 0x0 : 0xffffffff);
528 		EDMA_ChannelRegWrite(data->hal_cfg, chan_id, EDMA_TCD_DLAST_SGA_HIGH,
529 				     dlast >= 0x0 ? 0x0 : 0xffffffff);
530 	}
531 
532 	return 0;
533 }
534 
535 /* the NXP HAL EDMA driver uses some custom return values
536  * that need to be converted to standard error codes. This function
537  * performs exactly this translation.
538  */
to_std_error(int edma_err)539 static inline int to_std_error(int edma_err)
540 {
541 	switch (edma_err) {
542 	case kStatus_EDMA_InvalidConfiguration:
543 	case kStatus_InvalidArgument:
544 		return -EINVAL;
545 	case kStatus_Busy:
546 		return -EBUSY;
547 	default:
548 		LOG_ERR("unknown EDMA error code: %d", edma_err);
549 		return -EINVAL;
550 	}
551 }
552 
553 #endif /* ZEPHYR_DRIVERS_DMA_DMA_NXP_EDMA_H_ */
554