1 /*
2 * Copyright (c) 2023, Meta
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <errno.h>
8 #include <stdint.h>
9 #include <stdio.h>
10
11 #include <zephyr/device.h>
12 #include <zephyr/drivers/dma.h>
13 #include <zephyr/kernel.h>
14 #include <zephyr/logging/log.h>
15 #include <zephyr/pm/device.h>
16 #include <zephyr/sys/util.h>
17
18 #define DT_DRV_COMPAT zephyr_dma_emul
19
20 #ifdef CONFIG_DMA_64BIT
21 #define dma_addr_t uint64_t
22 #else
23 #define dma_addr_t uint32_t
24 #endif
25
26 enum dma_emul_channel_state {
27 DMA_EMUL_CHANNEL_UNUSED,
28 DMA_EMUL_CHANNEL_LOADED,
29 DMA_EMUL_CHANNEL_STARTED,
30 DMA_EMUL_CHANNEL_STOPPED,
31 };
32
33 struct dma_emul_xfer_desc {
34 struct dma_config config;
35 };
36
37 struct dma_emul_work {
38 const struct device *dev;
39 uint32_t channel;
40 struct k_work work;
41 };
42
43 struct dma_emul_config {
44 uint32_t channel_mask;
45 size_t num_channels;
46 size_t num_requests;
47 size_t addr_align;
48 size_t size_align;
49 size_t copy_align;
50
51 k_thread_stack_t *work_q_stack;
52 size_t work_q_stack_size;
53 int work_q_priority;
54
55 /* points to an array of size num_channels */
56 struct dma_emul_xfer_desc *xfer;
57 /* points to an array of size num_channels * num_requests */
58 struct dma_block_config *block;
59 };
60
61 struct dma_emul_data {
62 struct dma_context dma_ctx;
63 atomic_t *channels_atomic;
64 struct k_spinlock lock;
65 struct k_work_q work_q;
66 struct dma_emul_work work;
67 };
68
69 static void dma_emul_work_handler(struct k_work *work);
70
71 LOG_MODULE_REGISTER(dma_emul, CONFIG_DMA_LOG_LEVEL);
72
dma_emul_xfer_is_error_status(int status)73 static inline bool dma_emul_xfer_is_error_status(int status)
74 {
75 return status < 0;
76 }
77
dma_emul_channel_state_to_string(enum dma_emul_channel_state state)78 static inline const char *const dma_emul_channel_state_to_string(enum dma_emul_channel_state state)
79 {
80 switch (state) {
81 case DMA_EMUL_CHANNEL_UNUSED:
82 return "UNUSED";
83 case DMA_EMUL_CHANNEL_LOADED:
84 return "LOADED";
85 case DMA_EMUL_CHANNEL_STARTED:
86 return "STARTED";
87 case DMA_EMUL_CHANNEL_STOPPED:
88 return "STOPPED";
89 default:
90 return "(invalid)";
91 };
92 }
93
94 /*
95 * Repurpose the "_reserved" field for keeping track of internal
96 * channel state.
97 *
98 * Note: these must be called with data->lock locked!
99 */
dma_emul_get_channel_state(const struct device * dev,uint32_t channel)100 static enum dma_emul_channel_state dma_emul_get_channel_state(const struct device *dev,
101 uint32_t channel)
102 {
103 const struct dma_emul_config *config = dev->config;
104
105 __ASSERT_NO_MSG(channel < config->num_channels);
106
107 return (enum dma_emul_channel_state)config->xfer[channel].config._reserved;
108 }
109
dma_emul_set_channel_state(const struct device * dev,uint32_t channel,enum dma_emul_channel_state state)110 static void dma_emul_set_channel_state(const struct device *dev, uint32_t channel,
111 enum dma_emul_channel_state state)
112 {
113 const struct dma_emul_config *config = dev->config;
114
115 LOG_DBG("setting channel %u state to %s", channel, dma_emul_channel_state_to_string(state));
116
117 __ASSERT_NO_MSG(channel < config->num_channels);
118 __ASSERT_NO_MSG(state >= DMA_EMUL_CHANNEL_UNUSED && state <= DMA_EMUL_CHANNEL_STOPPED);
119
120 config->xfer[channel].config._reserved = state;
121 }
122
dma_emul_xfer_config_to_string(const struct dma_config * cfg)123 static const char *dma_emul_xfer_config_to_string(const struct dma_config *cfg)
124 {
125 static char buffer[1024];
126
127 snprintf(buffer, sizeof(buffer),
128 "{"
129 "\n\tslot: %u"
130 "\n\tchannel_direction: %u"
131 "\n\tcomplete_callback_en: %u"
132 "\n\terror_callback_dis: %u"
133 "\n\tsource_handshake: %u"
134 "\n\tdest_handshake: %u"
135 "\n\tchannel_priority: %u"
136 "\n\tsource_chaining_en: %u"
137 "\n\tdest_chaining_en: %u"
138 "\n\tlinked_channel: %u"
139 "\n\tcyclic: %u"
140 "\n\t_reserved: %u"
141 "\n\tsource_data_size: %u"
142 "\n\tdest_data_size: %u"
143 "\n\tsource_burst_length: %u"
144 "\n\tdest_burst_length: %u"
145 "\n\tblock_count: %u"
146 "\n\thead_block: %p"
147 "\n\tuser_data: %p"
148 "\n\tdma_callback: %p"
149 "\n}",
150 cfg->dma_slot, cfg->channel_direction, cfg->complete_callback_en,
151 cfg->error_callback_dis, cfg->source_handshake, cfg->dest_handshake,
152 cfg->channel_priority, cfg->source_chaining_en, cfg->dest_chaining_en,
153 cfg->linked_channel, cfg->cyclic, cfg->_reserved, cfg->source_data_size,
154 cfg->dest_data_size, cfg->source_burst_length, cfg->dest_burst_length,
155 cfg->block_count, cfg->head_block, cfg->user_data, cfg->dma_callback);
156
157 return buffer;
158 }
159
dma_emul_block_config_to_string(const struct dma_block_config * cfg)160 static const char *dma_emul_block_config_to_string(const struct dma_block_config *cfg)
161 {
162 static char buffer[1024];
163
164 snprintf(buffer, sizeof(buffer),
165 "{"
166 "\n\tsource_address: %p"
167 "\n\tdest_address: %p"
168 "\n\tsource_gather_interval: %u"
169 "\n\tdest_scatter_interval: %u"
170 "\n\tdest_scatter_count: %u"
171 "\n\tsource_gather_count: %u"
172 "\n\tblock_size: %u"
173 "\n\tnext_block: %p"
174 "\n\tsource_gather_en: %u"
175 "\n\tdest_scatter_en: %u"
176 "\n\tsource_addr_adj: %u"
177 "\n\tdest_addr_adj: %u"
178 "\n\tsource_reload_en: %u"
179 "\n\tdest_reload_en: %u"
180 "\n\tfifo_mode_control: %u"
181 "\n\tflow_control_mode: %u"
182 "\n\t_reserved: %u"
183 "\n}",
184 (void *)cfg->source_address, (void *)cfg->dest_address,
185 cfg->source_gather_interval, cfg->dest_scatter_interval, cfg->dest_scatter_count,
186 cfg->source_gather_count, cfg->block_size, cfg->next_block, cfg->source_gather_en,
187 cfg->dest_scatter_en, cfg->source_addr_adj, cfg->dest_addr_adj,
188 cfg->source_reload_en, cfg->dest_reload_en, cfg->fifo_mode_control,
189 cfg->flow_control_mode, cfg->_reserved
190
191 );
192
193 return buffer;
194 }
195
dma_emul_work_handler(struct k_work * work)196 static void dma_emul_work_handler(struct k_work *work)
197 {
198 size_t i;
199 size_t bytes;
200 uint32_t channel;
201 k_spinlock_key_t key;
202 struct dma_block_config block;
203 struct dma_config xfer_config;
204 enum dma_emul_channel_state state;
205 struct dma_emul_xfer_desc *xfer;
206 struct dma_emul_work *dma_work = CONTAINER_OF(work, struct dma_emul_work, work);
207 const struct device *dev = dma_work->dev;
208 struct dma_emul_data *data = dev->data;
209 const struct dma_emul_config *config = dev->config;
210
211 channel = dma_work->channel;
212
213 do {
214 key = k_spin_lock(&data->lock);
215 xfer = &config->xfer[channel];
216 /*
217 * copy the dma_config so we don't have to worry about
218 * it being asynchronously updated.
219 */
220 memcpy(&xfer_config, &xfer->config, sizeof(xfer_config));
221 k_spin_unlock(&data->lock, key);
222
223 LOG_DBG("processing xfer %p for channel %u", xfer, channel);
224 for (i = 0; i < xfer_config.block_count; ++i) {
225
226 LOG_DBG("processing block %zu", i);
227
228 key = k_spin_lock(&data->lock);
229 /*
230 * copy the dma_block_config so we don't have to worry about
231 * it being asynchronously updated.
232 */
233 memcpy(&block,
234 &config->block[channel * config->num_requests +
235 xfer_config.dma_slot + i],
236 sizeof(block));
237 k_spin_unlock(&data->lock, key);
238
239 /* transfer data in bursts */
240 for (bytes = MIN(block.block_size, xfer_config.dest_burst_length);
241 bytes > 0; block.block_size -= bytes, block.source_address += bytes,
242 block.dest_address += bytes,
243 bytes = MIN(block.block_size, xfer_config.dest_burst_length)) {
244
245 key = k_spin_lock(&data->lock);
246 state = dma_emul_get_channel_state(dev, channel);
247 k_spin_unlock(&data->lock, key);
248
249 if (state == DMA_EMUL_CHANNEL_STOPPED) {
250 LOG_DBG("asynchronously canceled");
251 if (!xfer_config.error_callback_dis) {
252 xfer_config.dma_callback(dev, xfer_config.user_data,
253 channel, -ECANCELED);
254 } else {
255 LOG_DBG("error_callback_dis is not set (async "
256 "cancel)");
257 }
258 goto out;
259 }
260
261 __ASSERT_NO_MSG(state == DMA_EMUL_CHANNEL_STARTED);
262
263 /*
264 * FIXME: create a backend API (memcpy, TCP/UDP socket, etc)
265 * Simple copy for now
266 */
267 memcpy((void *)(uintptr_t)block.dest_address,
268 (void *)(uintptr_t)block.source_address, bytes);
269 }
270 }
271
272 key = k_spin_lock(&data->lock);
273 dma_emul_set_channel_state(dev, channel, DMA_EMUL_CHANNEL_STOPPED);
274 k_spin_unlock(&data->lock, key);
275
276 /* FIXME: tests/drivers/dma/chan_blen_transfer/ does not set complete_callback_en */
277 if (true) {
278 xfer_config.dma_callback(dev, xfer_config.user_data, channel,
279 DMA_STATUS_COMPLETE);
280 } else {
281 LOG_DBG("complete_callback_en is not set");
282 }
283
284 if (xfer_config.source_chaining_en || xfer_config.dest_chaining_en) {
285 LOG_DBG("%s(): Linked channel %u -> %u", __func__, channel,
286 xfer_config.linked_channel);
287 __ASSERT_NO_MSG(channel != xfer_config.linked_channel);
288 channel = xfer_config.linked_channel;
289 } else {
290 LOG_DBG("%s(): done!", __func__);
291 break;
292 }
293 } while (true);
294
295 out:
296 return;
297 }
298
dma_emul_config_valid(const struct device * dev,uint32_t channel,const struct dma_config * xfer_config)299 static bool dma_emul_config_valid(const struct device *dev, uint32_t channel,
300 const struct dma_config *xfer_config)
301 {
302 size_t i;
303 struct dma_block_config *block;
304 const struct dma_emul_config *config = dev->config;
305
306 if (xfer_config->dma_slot >= config->num_requests) {
307 LOG_ERR("invalid dma_slot %u", xfer_config->dma_slot);
308 return false;
309 }
310
311 if (channel >= config->num_channels) {
312 LOG_ERR("invalid DMA channel %u", channel);
313 return false;
314 }
315
316 if (xfer_config->dest_burst_length != xfer_config->source_burst_length) {
317 LOG_ERR("burst length does not agree. source: %u dest: %u ",
318 xfer_config->source_burst_length, xfer_config->dest_burst_length);
319 return false;
320 }
321
322 for (i = 0, block = xfer_config->head_block; i < xfer_config->block_count;
323 ++i, block = block->next_block) {
324 if (block == NULL) {
325 LOG_ERR("block %zu / %u is NULL", i + 1, xfer_config->block_count);
326 return false;
327 }
328
329 if (i >= config->num_requests) {
330 LOG_ERR("not enough slots to store block %zu / %u", i + 1,
331 xfer_config->block_count);
332 return false;
333 }
334 }
335
336 /*
337 * FIXME:
338 *
339 * Need to verify all of the fields in struct dma_config with different DT
340 * configurations so that the driver model is at least consistent and
341 * verified by CI.
342 */
343
344 return true;
345 }
346
dma_emul_configure(const struct device * dev,uint32_t channel,struct dma_config * xfer_config)347 static int dma_emul_configure(const struct device *dev, uint32_t channel,
348 struct dma_config *xfer_config)
349 {
350 size_t i;
351 int ret = 0;
352 size_t block_idx;
353 k_spinlock_key_t key;
354 struct dma_block_config *block;
355 struct dma_block_config *block_it;
356 enum dma_emul_channel_state state;
357 struct dma_emul_xfer_desc *xfer;
358 struct dma_emul_data *data = dev->data;
359 const struct dma_emul_config *config = dev->config;
360
361 if (!dma_emul_config_valid(dev, channel, xfer_config)) {
362 return -EINVAL;
363 }
364
365 key = k_spin_lock(&data->lock);
366 xfer = &config->xfer[channel];
367
368 LOG_DBG("%s():\nchannel: %u\nconfig: %s", __func__, channel,
369 dma_emul_xfer_config_to_string(xfer_config));
370
371 block_idx = channel * config->num_requests + xfer_config->dma_slot;
372
373 block = &config->block[channel * config->num_requests + xfer_config->dma_slot];
374 state = dma_emul_get_channel_state(dev, channel);
375 switch (state) {
376 case DMA_EMUL_CHANNEL_UNUSED:
377 case DMA_EMUL_CHANNEL_STOPPED:
378 /* copy the configuration into the driver */
379 memcpy(&xfer->config, xfer_config, sizeof(xfer->config));
380
381 /* copy all blocks into slots */
382 for (i = 0, block_it = xfer_config->head_block; i < xfer_config->block_count;
383 ++i, block_it = block_it->next_block, ++block) {
384 __ASSERT_NO_MSG(block_it != NULL);
385
386 LOG_DBG("block_config %s", dma_emul_block_config_to_string(block_it));
387
388 memcpy(block, block_it, sizeof(*block));
389 }
390 dma_emul_set_channel_state(dev, channel, DMA_EMUL_CHANNEL_LOADED);
391
392 break;
393 default:
394 LOG_ERR("attempt to configure DMA in state %d", state);
395 ret = -EBUSY;
396 }
397 k_spin_unlock(&data->lock, key);
398
399 return ret;
400 }
401
dma_emul_reload(const struct device * dev,uint32_t channel,dma_addr_t src,dma_addr_t dst,size_t size)402 static int dma_emul_reload(const struct device *dev, uint32_t channel, dma_addr_t src,
403 dma_addr_t dst, size_t size)
404 {
405 LOG_DBG("%s()", __func__);
406
407 return -ENOSYS;
408 }
409
dma_emul_start(const struct device * dev,uint32_t channel)410 static int dma_emul_start(const struct device *dev, uint32_t channel)
411 {
412 int ret = 0;
413 k_spinlock_key_t key;
414 enum dma_emul_channel_state state;
415 struct dma_emul_xfer_desc *xfer;
416 struct dma_config *xfer_config;
417 struct dma_emul_data *data = dev->data;
418 const struct dma_emul_config *config = dev->config;
419
420 LOG_DBG("%s(channel: %u)", __func__, channel);
421
422 if (channel >= config->num_channels) {
423 return -EINVAL;
424 }
425
426 key = k_spin_lock(&data->lock);
427 xfer = &config->xfer[channel];
428 state = dma_emul_get_channel_state(dev, channel);
429 switch (state) {
430 case DMA_EMUL_CHANNEL_STARTED:
431 /* start after being started already is a no-op */
432 break;
433 case DMA_EMUL_CHANNEL_LOADED:
434 case DMA_EMUL_CHANNEL_STOPPED:
435 data->work.channel = channel;
436 while (true) {
437 dma_emul_set_channel_state(dev, channel, DMA_EMUL_CHANNEL_STARTED);
438
439 xfer_config = &config->xfer[channel].config;
440 if (xfer_config->source_chaining_en || xfer_config->dest_chaining_en) {
441 LOG_DBG("%s(): Linked channel %u -> %u", __func__, channel,
442 xfer_config->linked_channel);
443 channel = xfer_config->linked_channel;
444 } else {
445 break;
446 }
447 }
448 ret = k_work_submit_to_queue(&data->work_q, &data->work.work);
449 ret = (ret < 0) ? ret : 0;
450 break;
451 default:
452 LOG_ERR("attempt to start dma in invalid state %d", state);
453 ret = -EIO;
454 break;
455 }
456 k_spin_unlock(&data->lock, key);
457
458 return ret;
459 }
460
dma_emul_stop(const struct device * dev,uint32_t channel)461 static int dma_emul_stop(const struct device *dev, uint32_t channel)
462 {
463 k_spinlock_key_t key;
464 struct dma_emul_data *data = dev->data;
465
466 key = k_spin_lock(&data->lock);
467 dma_emul_set_channel_state(dev, channel, DMA_EMUL_CHANNEL_STOPPED);
468 k_spin_unlock(&data->lock, key);
469
470 return 0;
471 }
472
dma_emul_suspend(const struct device * dev,uint32_t channel)473 static int dma_emul_suspend(const struct device *dev, uint32_t channel)
474 {
475 LOG_DBG("%s()", __func__);
476
477 return -ENOSYS;
478 }
479
dma_emul_resume(const struct device * dev,uint32_t channel)480 static int dma_emul_resume(const struct device *dev, uint32_t channel)
481 {
482 LOG_DBG("%s()", __func__);
483
484 return -ENOSYS;
485 }
486
dma_emul_get_status(const struct device * dev,uint32_t channel,struct dma_status * status)487 static int dma_emul_get_status(const struct device *dev, uint32_t channel,
488 struct dma_status *status)
489 {
490 LOG_DBG("%s()", __func__);
491
492 return -ENOSYS;
493 }
494
dma_emul_get_attribute(const struct device * dev,uint32_t type,uint32_t * value)495 static int dma_emul_get_attribute(const struct device *dev, uint32_t type, uint32_t *value)
496 {
497 LOG_DBG("%s()", __func__);
498
499 return -ENOSYS;
500 }
501
dma_emul_chan_filter(const struct device * dev,int channel,void * filter_param)502 static bool dma_emul_chan_filter(const struct device *dev, int channel, void *filter_param)
503 {
504 bool success;
505 k_spinlock_key_t key;
506 struct dma_emul_data *data = dev->data;
507
508 key = k_spin_lock(&data->lock);
509 /* lets assume the struct dma_context handles races properly */
510 success = dma_emul_get_channel_state(dev, channel) == DMA_EMUL_CHANNEL_UNUSED;
511 k_spin_unlock(&data->lock, key);
512
513 return success;
514 }
515
516 static DEVICE_API(dma, dma_emul_driver_api) = {
517 .config = dma_emul_configure,
518 .reload = dma_emul_reload,
519 .start = dma_emul_start,
520 .stop = dma_emul_stop,
521 .suspend = dma_emul_suspend,
522 .resume = dma_emul_resume,
523 .get_status = dma_emul_get_status,
524 .get_attribute = dma_emul_get_attribute,
525 .chan_filter = dma_emul_chan_filter,
526 };
527
528 #ifdef CONFIG_PM_DEVICE
dma_emul_pm_device_pm_action(const struct device * dev,enum pm_device_action action)529 static int dma_emul_pm_device_pm_action(const struct device *dev, enum pm_device_action action)
530 {
531 ARG_UNUSED(dev);
532 ARG_UNUSED(action);
533
534 return 0;
535 }
536 #endif
537
dma_emul_init(const struct device * dev)538 static int dma_emul_init(const struct device *dev)
539 {
540 struct dma_emul_data *data = dev->data;
541 const struct dma_emul_config *config = dev->config;
542
543 data->work.dev = dev;
544 data->dma_ctx.magic = DMA_MAGIC;
545 data->dma_ctx.dma_channels = config->num_channels;
546 data->dma_ctx.atomic = data->channels_atomic;
547
548 k_work_queue_init(&data->work_q);
549 k_work_init(&data->work.work, dma_emul_work_handler);
550 k_work_queue_start(&data->work_q, config->work_q_stack, config->work_q_stack_size,
551 config->work_q_priority, NULL);
552
553 return 0;
554 }
555
556 #define DMA_EMUL_INST_HAS_PROP(_inst, _prop) DT_NODE_HAS_PROP(DT_DRV_INST(_inst), _prop)
557
558 #define DMA_EMUL_INST_CHANNEL_MASK(_inst) \
559 DT_INST_PROP_OR(_inst, dma_channel_mask, \
560 DMA_EMUL_INST_HAS_PROP(_inst, dma_channels) \
561 ? ((DT_INST_PROP(_inst, dma_channels) > 0) \
562 ? BIT_MASK(DT_INST_PROP_OR(_inst, dma_channels, 0)) \
563 : 0) \
564 : 0)
565
566 #define DMA_EMUL_INST_NUM_CHANNELS(_inst) \
567 DT_INST_PROP_OR(_inst, dma_channels, \
568 DMA_EMUL_INST_HAS_PROP(_inst, dma_channel_mask) \
569 ? POPCOUNT(DT_INST_PROP_OR(_inst, dma_channel_mask, 0)) \
570 : 0)
571
572 #define DMA_EMUL_INST_NUM_REQUESTS(_inst) DT_INST_PROP_OR(_inst, dma_requests, 1)
573
574 #define DEFINE_DMA_EMUL(_inst) \
575 BUILD_ASSERT(DMA_EMUL_INST_HAS_PROP(_inst, dma_channel_mask) || \
576 DMA_EMUL_INST_HAS_PROP(_inst, dma_channels), \
577 "at least one of dma_channel_mask or dma_channels must be provided"); \
578 \
579 BUILD_ASSERT(DMA_EMUL_INST_NUM_CHANNELS(_inst) <= 32, "invalid dma-channels property"); \
580 \
581 static K_THREAD_STACK_DEFINE(work_q_stack_##_inst, DT_INST_PROP(_inst, stack_size)); \
582 \
583 static struct dma_emul_xfer_desc \
584 dma_emul_xfer_desc_##_inst[DMA_EMUL_INST_NUM_CHANNELS(_inst)]; \
585 \
586 static struct dma_block_config \
587 dma_emul_block_config_##_inst[DMA_EMUL_INST_NUM_CHANNELS(_inst) * \
588 DMA_EMUL_INST_NUM_REQUESTS(_inst)]; \
589 \
590 static const struct dma_emul_config dma_emul_config_##_inst = { \
591 .channel_mask = DMA_EMUL_INST_CHANNEL_MASK(_inst), \
592 .num_channels = DMA_EMUL_INST_NUM_CHANNELS(_inst), \
593 .num_requests = DMA_EMUL_INST_NUM_REQUESTS(_inst), \
594 .addr_align = DT_INST_PROP_OR(_inst, dma_buf_addr_alignment, 1), \
595 .size_align = DT_INST_PROP_OR(_inst, dma_buf_size_alignment, 1), \
596 .copy_align = DT_INST_PROP_OR(_inst, dma_copy_alignment, 1), \
597 .work_q_stack = (k_thread_stack_t *)&work_q_stack_##_inst, \
598 .work_q_stack_size = K_THREAD_STACK_SIZEOF(work_q_stack_##_inst), \
599 .work_q_priority = DT_INST_PROP_OR(_inst, priority, 0), \
600 .xfer = dma_emul_xfer_desc_##_inst, \
601 .block = dma_emul_block_config_##_inst, \
602 }; \
603 \
604 static ATOMIC_DEFINE(dma_emul_channels_atomic_##_inst, \
605 DT_INST_PROP_OR(_inst, dma_channels, 0)); \
606 \
607 static struct dma_emul_data dma_emul_data_##_inst = { \
608 .channels_atomic = dma_emul_channels_atomic_##_inst, \
609 }; \
610 \
611 PM_DEVICE_DT_INST_DEFINE(_inst, dma_emul_pm_device_pm_action); \
612 \
613 DEVICE_DT_INST_DEFINE(_inst, dma_emul_init, PM_DEVICE_DT_INST_GET(_inst), \
614 &dma_emul_data_##_inst, &dma_emul_config_##_inst, POST_KERNEL, \
615 CONFIG_DMA_INIT_PRIORITY, &dma_emul_driver_api);
616
617 DT_INST_FOREACH_STATUS_OKAY(DEFINE_DMA_EMUL)
618