1 /*
2 * Copyright 2024 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include "dma_nxp_edma.h"
8
9 /* TODO list:
10 * 1) Support for requesting a specific channel.
11 * 2) Support for checking if DMA transfer is pending when attempting config. (?)
12 * 3) Support for error interrupt.
13 * 4) Support for error if buffer overflow/underrun.
14 * 5) Ideally, HALFMAJOR should be set on a per-channel basis not through a
15 * config. If not possible, this should be done through a DTS property. Also,
16 * maybe do the same for INTMAJOR IRQ.
17 */
18
edma_isr(const void * parameter)19 static void edma_isr(const void *parameter)
20 {
21 const struct edma_config *cfg;
22 struct edma_data *data;
23 struct edma_channel *chan;
24 int ret;
25 uint32_t update_size;
26
27 chan = (struct edma_channel *)parameter;
28 cfg = chan->dev->config;
29 data = chan->dev->data;
30
31 if (!EDMA_ChannelRegRead(data->hal_cfg, chan->id, EDMA_TCD_CH_INT)) {
32 /* skip, interrupt was probably triggered by another channel */
33 return;
34 }
35
36 /* clear interrupt */
37 EDMA_ChannelRegUpdate(data->hal_cfg, chan->id,
38 EDMA_TCD_CH_INT, EDMA_TCD_CH_INT_MASK, 0);
39
40 if (chan->cyclic_buffer) {
41 update_size = chan->bsize;
42
43 if (IS_ENABLED(CONFIG_DMA_NXP_EDMA_ENABLE_HALFMAJOR_IRQ)) {
44 update_size = chan->bsize / 2;
45 } else {
46 update_size = chan->bsize;
47 }
48
49 /* TODO: add support for error handling here */
50 ret = EDMA_CHAN_PRODUCE_CONSUME_A(chan, update_size);
51 if (ret < 0) {
52 LOG_ERR("chan %d buffer overflow/underrun", chan->id);
53 }
54 }
55
56 /* TODO: are there any sanity checks we have to perform before invoking
57 * the registered callback?
58 */
59 if (chan->cb) {
60 chan->cb(chan->dev, chan->arg, chan->id, DMA_STATUS_COMPLETE);
61 }
62 }
63
lookup_channel(const struct device * dev,uint32_t chan_id)64 static struct edma_channel *lookup_channel(const struct device *dev,
65 uint32_t chan_id)
66 {
67 struct edma_data *data;
68 const struct edma_config *cfg;
69 int i;
70
71 data = dev->data;
72 cfg = dev->config;
73
74
75 /* optimization: if dma-channels property is present then
76 * the channel data associated with the passed channel ID
77 * can be found at index chan_id in the array of channels.
78 */
79 if (cfg->contiguous_channels) {
80 /* check for index out of bounds */
81 if (chan_id >= data->ctx.dma_channels) {
82 return NULL;
83 }
84
85 return &data->channels[chan_id];
86 }
87
88 /* channels are passed through the valid-channels property.
89 * As such, since some channels may be missing we need to
90 * look through the entire channels array for an ID match.
91 */
92 for (i = 0; i < data->ctx.dma_channels; i++) {
93 if (data->channels[i].id == chan_id) {
94 return &data->channels[i];
95 }
96 }
97
98 return NULL;
99 }
100
edma_config(const struct device * dev,uint32_t chan_id,struct dma_config * dma_cfg)101 static int edma_config(const struct device *dev, uint32_t chan_id,
102 struct dma_config *dma_cfg)
103 {
104 struct edma_data *data;
105 const struct edma_config *cfg;
106 struct edma_channel *chan;
107 uint32_t transfer_type;
108 int ret;
109
110 data = dev->data;
111 cfg = dev->config;
112
113 if (!dma_cfg->head_block) {
114 LOG_ERR("head block shouldn't be NULL");
115 return -EINVAL;
116 }
117
118 /* validate source data size (SSIZE) */
119 if (!EDMA_TransferWidthIsValid(data->hal_cfg, dma_cfg->source_data_size)) {
120 LOG_ERR("invalid source data size: %d",
121 dma_cfg->source_data_size);
122 return -EINVAL;
123 }
124
125 /* validate destination data size (DSIZE) */
126 if (!EDMA_TransferWidthIsValid(data->hal_cfg, dma_cfg->dest_data_size)) {
127 LOG_ERR("invalid destination data size: %d",
128 dma_cfg->dest_data_size);
129 return -EINVAL;
130 }
131
132 /* validate configured alignment */
133 if (!EDMA_TransferWidthIsValid(data->hal_cfg, CONFIG_DMA_NXP_EDMA_ALIGN)) {
134 LOG_ERR("configured alignment %d is invalid",
135 CONFIG_DMA_NXP_EDMA_ALIGN);
136 return -EINVAL;
137 }
138
139 /* Scatter-Gather configurations currently not supported */
140 if (dma_cfg->block_count != 1) {
141 LOG_ERR("number of blocks %d not supported", dma_cfg->block_count);
142 return -ENOTSUP;
143 }
144
145 /* source address shouldn't be NULL */
146 if (!dma_cfg->head_block->source_address) {
147 LOG_ERR("source address cannot be NULL");
148 return -EINVAL;
149 }
150
151 /* destination address shouldn't be NULL */
152 if (!dma_cfg->head_block->dest_address) {
153 LOG_ERR("destination address cannot be NULL");
154 return -EINVAL;
155 }
156
157 /* check source address's (SADDR) alignment with respect to the data size (SSIZE)
158 *
159 * Failing to meet this condition will lead to the assertion of the SAE
160 * bit (see CHn_ES register).
161 *
162 * TODO: this will also restrict scenarios such as the following:
163 * SADDR is 8B aligned and SSIZE is 16B. I've tested this
164 * scenario and seems to raise no hardware errors (I'm assuming
165 * because this doesn't break the 8B boundary of the 64-bit system
166 * I tested it on). Is there a need to allow such a scenario?
167 */
168 if (dma_cfg->head_block->source_address % dma_cfg->source_data_size) {
169 LOG_ERR("source address 0x%x alignment doesn't match data size %d",
170 dma_cfg->head_block->source_address,
171 dma_cfg->source_data_size);
172 return -EINVAL;
173 }
174
175 /* check destination address's (DADDR) alignment with respect to the data size (DSIZE)
176 * Failing to meet this condition will lead to the assertion of the DAE
177 * bit (see CHn_ES register).
178 */
179 if (dma_cfg->head_block->dest_address % dma_cfg->dest_data_size) {
180 LOG_ERR("destination address 0x%x alignment doesn't match data size %d",
181 dma_cfg->head_block->dest_address,
182 dma_cfg->dest_data_size);
183 return -EINVAL;
184 }
185
186 /* source burst length should match destination burst length.
187 * This is because the burst length is the equivalent of NBYTES which
188 * is used for both the destination and the source.
189 */
190 if (dma_cfg->source_burst_length !=
191 dma_cfg->dest_burst_length) {
192 LOG_ERR("source burst length %d doesn't match destination burst length %d",
193 dma_cfg->source_burst_length,
194 dma_cfg->dest_burst_length);
195 return -EINVAL;
196 }
197
198 /* total number of bytes should be a multiple of NBYTES.
199 *
200 * This is needed because the EDMA engine performs transfers based
201 * on CITER (integer value) and NBYTES, thus it has no knowledge of
202 * the total transfer size. If the total transfer size is not a
203 * multiple of NBYTES then we'll end up with copying a wrong number
204 * of bytes (CITER = TOTAL_SIZE / BITER). This, of course, raises
205 * no error in the hardware but it's still wrong.
206 */
207 if (dma_cfg->head_block->block_size % dma_cfg->source_burst_length) {
208 LOG_ERR("block size %d should be a multiple of NBYTES %d",
209 dma_cfg->head_block->block_size,
210 dma_cfg->source_burst_length);
211 return -EINVAL;
212 }
213
214 /* check if NBYTES is a multiple of MAX(SSIZE, DSIZE).
215 *
216 * This stems from the fact that NBYTES needs to be a multiple
217 * of SSIZE AND DSIZE. If NBYTES is a multiple of MAX(SSIZE, DSIZE)
218 * then it will for sure satisfy the aforementioned condition (since
219 * SSIZE and DSIZE are powers of 2).
220 *
221 * Failing to meet this condition will lead to the assertion of the
222 * NCE bit (see CHn_ES register).
223 */
224 if (dma_cfg->source_burst_length %
225 MAX(dma_cfg->source_data_size, dma_cfg->dest_data_size)) {
226 LOG_ERR("NBYTES %d should be a multiple of MAX(SSIZE(%d), DSIZE(%d))",
227 dma_cfg->source_burst_length,
228 dma_cfg->source_data_size,
229 dma_cfg->dest_data_size);
230 return -EINVAL;
231 }
232
233 /* fetch channel data */
234 chan = lookup_channel(dev, chan_id);
235 if (!chan) {
236 LOG_ERR("channel ID %u is not valid", chan_id);
237 return -EINVAL;
238 }
239
240 /* save the block size for later usage in edma_reload */
241 chan->bsize = dma_cfg->head_block->block_size;
242
243 if (dma_cfg->cyclic) {
244 chan->cyclic_buffer = true;
245
246 chan->stat.read_position = 0;
247 chan->stat.write_position = 0;
248
249 /* ASSUMPTION: for CONSUMER-type channels, the buffer from
250 * which the engine consumes should be full, while in the
251 * case of PRODUCER-type channels it should be empty.
252 */
253 switch (dma_cfg->channel_direction) {
254 case MEMORY_TO_PERIPHERAL:
255 chan->type = CHAN_TYPE_CONSUMER;
256 chan->stat.free = 0;
257 chan->stat.pending_length = chan->bsize;
258 break;
259 case PERIPHERAL_TO_MEMORY:
260 chan->type = CHAN_TYPE_PRODUCER;
261 chan->stat.pending_length = 0;
262 chan->stat.free = chan->bsize;
263 break;
264 default:
265 LOG_ERR("unsupported transfer dir %d for cyclic mode",
266 dma_cfg->channel_direction);
267 return -ENOTSUP;
268 }
269 } else {
270 chan->cyclic_buffer = false;
271 }
272
273 /* change channel's state to CONFIGURED */
274 ret = channel_change_state(chan, CHAN_STATE_CONFIGURED);
275 if (ret < 0) {
276 LOG_ERR("failed to change channel %d state to CONFIGURED", chan_id);
277 return ret;
278 }
279
280 ret = get_transfer_type(dma_cfg->channel_direction, &transfer_type);
281 if (ret < 0) {
282 return ret;
283 }
284
285 chan->cb = dma_cfg->dma_callback;
286 chan->arg = dma_cfg->user_data;
287
288 /* warning: this sets SOFF and DOFF to SSIZE and DSIZE which are POSITIVE. */
289 ret = EDMA_ConfigureTransfer(data->hal_cfg, chan_id,
290 dma_cfg->head_block->source_address,
291 dma_cfg->head_block->dest_address,
292 dma_cfg->source_data_size,
293 dma_cfg->dest_data_size,
294 dma_cfg->source_burst_length,
295 dma_cfg->head_block->block_size,
296 transfer_type);
297 if (ret < 0) {
298 LOG_ERR("failed to configure transfer");
299 return to_std_error(ret);
300 }
301
302 /* TODO: channel MUX should be forced to 0 based on the previous state */
303 if (EDMA_HAS_MUX(data->hal_cfg)) {
304 ret = EDMA_SetChannelMux(data->hal_cfg, chan_id, dma_cfg->dma_slot);
305 if (ret < 0) {
306 LOG_ERR("failed to set channel MUX");
307 return to_std_error(ret);
308 }
309 }
310
311 /* set SLAST and DLAST */
312 ret = set_slast_dlast(dma_cfg, transfer_type, data, chan_id);
313 if (ret < 0) {
314 return ret;
315 }
316
317 /* allow interrupting the CPU when a major cycle is completed.
318 *
319 * interesting note: only 1 major loop is performed per slave peripheral
320 * DMA request. For instance, if block_size = 768 and burst_size = 192
321 * we're going to get 4 transfers of 192 bytes. Each of these transfers
322 * translates to a DMA request made by the slave peripheral.
323 */
324 EDMA_ChannelRegUpdate(data->hal_cfg, chan_id,
325 EDMA_TCD_CSR, EDMA_TCD_CSR_INTMAJOR_MASK, 0);
326
327 if (IS_ENABLED(CONFIG_DMA_NXP_EDMA_ENABLE_HALFMAJOR_IRQ)) {
328 /* if enabled through the above configuration, also
329 * allow the CPU to be interrupted when CITER = BITER / 2.
330 */
331 EDMA_ChannelRegUpdate(data->hal_cfg, chan_id, EDMA_TCD_CSR,
332 EDMA_TCD_CSR_INTHALF_MASK, 0);
333 }
334
335 /* enable channel interrupt */
336 irq_enable(chan->irq);
337
338 /* dump register status - for debugging purposes */
339 edma_dump_channel_registers(data, chan_id);
340
341 return 0;
342 }
343
edma_get_status(const struct device * dev,uint32_t chan_id,struct dma_status * stat)344 static int edma_get_status(const struct device *dev, uint32_t chan_id,
345 struct dma_status *stat)
346 {
347 struct edma_data *data;
348 struct edma_channel *chan;
349 uint32_t citer, biter, done;
350 unsigned int key;
351
352 data = dev->data;
353
354 /* fetch channel data */
355 chan = lookup_channel(dev, chan_id);
356 if (!chan) {
357 LOG_ERR("channel ID %u is not valid", chan_id);
358 return -EINVAL;
359 }
360
361 if (chan->cyclic_buffer) {
362 key = irq_lock();
363
364 stat->free = chan->stat.free;
365 stat->pending_length = chan->stat.pending_length;
366
367 irq_unlock(key);
368 } else {
369 /* note: no locking required here. The DMA interrupts
370 * have no effect over CITER and BITER.
371 */
372 citer = EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CITER);
373 biter = EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_BITER);
374 done = EDMA_ChannelRegRead(data->hal_cfg, chan_id, EDMA_TCD_CH_CSR) &
375 EDMA_TCD_CH_CSR_DONE_MASK;
376 if (done) {
377 stat->free = chan->bsize;
378 stat->pending_length = 0;
379 } else {
380 stat->free = (biter - citer) * (chan->bsize / biter);
381 stat->pending_length = chan->bsize - stat->free;
382 }
383 }
384
385 LOG_DBG("free: %d, pending: %d", stat->free, stat->pending_length);
386
387 return 0;
388 }
389
edma_suspend(const struct device * dev,uint32_t chan_id)390 static int edma_suspend(const struct device *dev, uint32_t chan_id)
391 {
392 struct edma_data *data;
393 const struct edma_config *cfg;
394 struct edma_channel *chan;
395 int ret;
396
397 data = dev->data;
398 cfg = dev->config;
399
400 /* fetch channel data */
401 chan = lookup_channel(dev, chan_id);
402 if (!chan) {
403 LOG_ERR("channel ID %u is not valid", chan_id);
404 return -EINVAL;
405 }
406
407 edma_dump_channel_registers(data, chan_id);
408
409 /* change channel's state to SUSPENDED */
410 ret = channel_change_state(chan, CHAN_STATE_SUSPENDED);
411 if (ret < 0) {
412 LOG_ERR("failed to change channel %d state to SUSPENDED", chan_id);
413 return ret;
414 }
415
416 LOG_DBG("suspending channel %u", chan_id);
417
418 /* disable HW requests */
419 EDMA_ChannelRegUpdate(data->hal_cfg, chan_id,
420 EDMA_TCD_CH_CSR, 0, EDMA_TCD_CH_CSR_ERQ_MASK);
421
422 return 0;
423 }
424
edma_stop(const struct device * dev,uint32_t chan_id)425 static int edma_stop(const struct device *dev, uint32_t chan_id)
426 {
427 struct edma_data *data;
428 const struct edma_config *cfg;
429 struct edma_channel *chan;
430 enum channel_state prev_state;
431 int ret;
432
433 data = dev->data;
434 cfg = dev->config;
435
436 /* fetch channel data */
437 chan = lookup_channel(dev, chan_id);
438 if (!chan) {
439 LOG_ERR("channel ID %u is not valid", chan_id);
440 return -EINVAL;
441 }
442
443 prev_state = chan->state;
444
445 /* change channel's state to STOPPED */
446 ret = channel_change_state(chan, CHAN_STATE_STOPPED);
447 if (ret < 0) {
448 LOG_ERR("failed to change channel %d state to STOPPED", chan_id);
449 return ret;
450 }
451
452 LOG_DBG("stopping channel %u", chan_id);
453
454 if (prev_state == CHAN_STATE_SUSPENDED) {
455 /* if the channel has been suspended then there's
456 * no point in disabling the HW requests again. Just
457 * jump to the channel release operation.
458 */
459 goto out_release_channel;
460 }
461
462 /* disable HW requests */
463 EDMA_ChannelRegUpdate(data->hal_cfg, chan_id, EDMA_TCD_CH_CSR, 0,
464 EDMA_TCD_CH_CSR_ERQ_MASK);
465 out_release_channel:
466
467 /* clear the channel MUX so that it can used by a different peripheral.
468 *
469 * note: because the channel is released during dma_stop() that means
470 * dma_start() can no longer be immediately called. This is because
471 * one needs to re-configure the channel MUX which can only be done
472 * through dma_config(). As such, if one intends to reuse the current
473 * configuration then please call dma_suspend() instead of dma_stop().
474 */
475 if (EDMA_HAS_MUX(data->hal_cfg)) {
476 ret = EDMA_SetChannelMux(data->hal_cfg, chan_id, 0);
477 if (ret < 0) {
478 LOG_ERR("failed to set channel MUX");
479 return to_std_error(ret);
480 }
481 }
482
483 edma_dump_channel_registers(data, chan_id);
484
485 return 0;
486 }
487
edma_start(const struct device * dev,uint32_t chan_id)488 static int edma_start(const struct device *dev, uint32_t chan_id)
489 {
490 struct edma_data *data;
491 const struct edma_config *cfg;
492 struct edma_channel *chan;
493 int ret;
494
495 data = dev->data;
496 cfg = dev->config;
497
498 /* fetch channel data */
499 chan = lookup_channel(dev, chan_id);
500 if (!chan) {
501 LOG_ERR("channel ID %u is not valid", chan_id);
502 return -EINVAL;
503 }
504
505 /* change channel's state to STARTED */
506 ret = channel_change_state(chan, CHAN_STATE_STARTED);
507 if (ret < 0) {
508 LOG_ERR("failed to change channel %d state to STARTED", chan_id);
509 return ret;
510 }
511
512 LOG_DBG("starting channel %u", chan_id);
513
514 /* enable HW requests */
515 EDMA_ChannelRegUpdate(data->hal_cfg, chan_id,
516 EDMA_TCD_CH_CSR, EDMA_TCD_CH_CSR_ERQ_MASK, 0);
517
518 return 0;
519 }
520
edma_reload(const struct device * dev,uint32_t chan_id,uint32_t src,uint32_t dst,size_t size)521 static int edma_reload(const struct device *dev, uint32_t chan_id, uint32_t src,
522 uint32_t dst, size_t size)
523 {
524 struct edma_data *data;
525 struct edma_channel *chan;
526 int ret;
527 unsigned int key;
528
529 data = dev->data;
530
531 /* fetch channel data */
532 chan = lookup_channel(dev, chan_id);
533 if (!chan) {
534 LOG_ERR("channel ID %u is not valid", chan_id);
535 return -EINVAL;
536 }
537
538 /* channel needs to be started to allow reloading */
539 if (chan->state != CHAN_STATE_STARTED) {
540 LOG_ERR("reload is only supported on started channels");
541 return -EINVAL;
542 }
543
544 if (chan->cyclic_buffer) {
545 key = irq_lock();
546 ret = EDMA_CHAN_PRODUCE_CONSUME_B(chan, size);
547 irq_unlock(key);
548 if (ret < 0) {
549 LOG_ERR("chan %d buffer overflow/underrun", chan_id);
550 return ret;
551 }
552 }
553
554 return 0;
555 }
556
edma_get_attribute(const struct device * dev,uint32_t type,uint32_t * val)557 static int edma_get_attribute(const struct device *dev, uint32_t type, uint32_t *val)
558 {
559 switch (type) {
560 case DMA_ATTR_BUFFER_SIZE_ALIGNMENT:
561 case DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT:
562 *val = CONFIG_DMA_NXP_EDMA_ALIGN;
563 break;
564 case DMA_ATTR_MAX_BLOCK_COUNT:
565 /* this is restricted to 1 because SG configurations are not supported */
566 *val = 1;
567 break;
568 default:
569 LOG_ERR("invalid attribute type: %d", type);
570 return -EINVAL;
571 }
572
573 return 0;
574 }
575
edma_channel_filter(const struct device * dev,int chan_id,void * param)576 static bool edma_channel_filter(const struct device *dev, int chan_id, void *param)
577 {
578 int *requested_channel;
579
580 if (!param) {
581 return false;
582 }
583
584 requested_channel = param;
585
586 if (*requested_channel == chan_id && lookup_channel(dev, chan_id)) {
587 return true;
588 }
589
590 return false;
591 }
592
593 static const struct dma_driver_api edma_api = {
594 .reload = edma_reload,
595 .config = edma_config,
596 .start = edma_start,
597 .stop = edma_stop,
598 .suspend = edma_suspend,
599 .resume = edma_start,
600 .get_status = edma_get_status,
601 .get_attribute = edma_get_attribute,
602 .chan_filter = edma_channel_filter,
603 };
604
edma_init(const struct device * dev)605 static int edma_init(const struct device *dev)
606 {
607 const struct edma_config *cfg;
608 struct edma_data *data;
609 mm_reg_t regmap;
610
611 data = dev->data;
612 cfg = dev->config;
613
614 /* map instance MMIO */
615 device_map(®map, cfg->regmap_phys, cfg->regmap_size, K_MEM_CACHE_NONE);
616
617 /* overwrite physical address set in the HAL configuration.
618 * We can down-cast the virtual address to a 32-bit address because
619 * we know we're working with 32-bit addresses only.
620 */
621 data->hal_cfg->regmap = (uint32_t)POINTER_TO_UINT(regmap);
622
623 cfg->irq_config();
624
625 /* dma_request_channel() uses this variable to keep track of the
626 * available channels. As such, it needs to be initialized with NULL
627 * which signifies that all channels are initially available.
628 */
629 data->channel_flags = ATOMIC_INIT(0);
630 data->ctx.atomic = &data->channel_flags;
631 data->ctx.dma_channels = data->hal_cfg->channels;
632
633 return 0;
634 }
635
636 /* a few comments about the BUILD_ASSERT statements:
637 * 1) dma-channels and valid-channels should be mutually exclusive.
638 * This means that you specify the one or the other. There's no real
639 * need to have both of them.
640 * 2) Number of channels should match the number of interrupts for
641 * said channels (TODO: what about error interrupts?)
642 * 3) The channel-mux property shouldn't be specified unless
643 * the eDMA is MUX-capable (signaled via the EDMA_HAS_CHAN_MUX
644 * configuration).
645 */
646 #define EDMA_INIT(inst) \
647 \
648 BUILD_ASSERT(!DT_NODE_HAS_PROP(DT_INST(inst, DT_DRV_COMPAT), dma_channels) || \
649 !DT_NODE_HAS_PROP(DT_INST(inst, DT_DRV_COMPAT), valid_channels), \
650 "dma_channels and valid_channels are mutually exclusive"); \
651 \
652 BUILD_ASSERT(DT_INST_PROP_OR(inst, dma_channels, 0) == \
653 DT_NUM_IRQS(DT_INST(inst, DT_DRV_COMPAT)) || \
654 DT_INST_PROP_LEN_OR(inst, valid_channels, 0) == \
655 DT_NUM_IRQS(DT_INST(inst, DT_DRV_COMPAT)), \
656 "number of interrupts needs to match number of channels"); \
657 \
658 BUILD_ASSERT(DT_PROP_OR(DT_INST(inst, DT_DRV_COMPAT), hal_cfg_index, 0) < \
659 ARRAY_SIZE(s_edmaConfigs), \
660 "HAL configuration index out of bounds"); \
661 \
662 static struct edma_channel channels_##inst[] = EDMA_CHANNEL_ARRAY_GET(inst); \
663 \
664 static void interrupt_config_function_##inst(void) \
665 { \
666 EDMA_CONNECT_INTERRUPTS(inst); \
667 } \
668 \
669 static struct edma_config edma_config_##inst = { \
670 .regmap_phys = DT_INST_REG_ADDR(inst), \
671 .regmap_size = DT_INST_REG_SIZE(inst), \
672 .irq_config = interrupt_config_function_##inst, \
673 .contiguous_channels = EDMA_CHANS_ARE_CONTIGUOUS(inst), \
674 }; \
675 \
676 static struct edma_data edma_data_##inst = { \
677 .channels = channels_##inst, \
678 .ctx.magic = DMA_MAGIC, \
679 .hal_cfg = &EDMA_HAL_CFG_GET(inst), \
680 }; \
681 \
682 DEVICE_DT_INST_DEFINE(inst, &edma_init, NULL, \
683 &edma_data_##inst, &edma_config_##inst, \
684 PRE_KERNEL_1, CONFIG_DMA_INIT_PRIORITY, \
685 &edma_api); \
686
687 DT_INST_FOREACH_STATUS_OKAY(EDMA_INIT);
688