1 /*
2  * Copyright (c) 2023, Meta
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <errno.h>
8 #include <stdint.h>
9 #include <stdio.h>
10 
11 #include <zephyr/device.h>
12 #include <zephyr/drivers/dma.h>
13 #include <zephyr/kernel.h>
14 #include <zephyr/logging/log.h>
15 #include <zephyr/pm/device.h>
16 #include <zephyr/sys/util.h>
17 
18 #define DT_DRV_COMPAT zephyr_dma_emul
19 
20 #ifdef CONFIG_DMA_64BIT
21 #define dma_addr_t uint64_t
22 #else
23 #define dma_addr_t uint32_t
24 #endif
25 
26 enum dma_emul_channel_state {
27 	DMA_EMUL_CHANNEL_UNUSED,
28 	DMA_EMUL_CHANNEL_LOADED,
29 	DMA_EMUL_CHANNEL_STARTED,
30 	DMA_EMUL_CHANNEL_STOPPED,
31 };
32 
33 struct dma_emul_xfer_desc {
34 	struct dma_config config;
35 };
36 
37 struct dma_emul_work {
38 	const struct device *dev;
39 	uint32_t channel;
40 	struct k_work work;
41 };
42 
43 struct dma_emul_config {
44 	uint32_t channel_mask;
45 	size_t num_channels;
46 	size_t num_requests;
47 	size_t addr_align;
48 	size_t size_align;
49 	size_t copy_align;
50 
51 	k_thread_stack_t *work_q_stack;
52 	size_t work_q_stack_size;
53 	int work_q_priority;
54 
55 	/* points to an array of size num_channels */
56 	struct dma_emul_xfer_desc *xfer;
57 	/* points to an array of size num_channels * num_requests */
58 	struct dma_block_config *block;
59 };
60 
61 struct dma_emul_data {
62 	struct dma_context dma_ctx;
63 	atomic_t *channels_atomic;
64 	struct k_spinlock lock;
65 	struct k_work_q work_q;
66 	struct dma_emul_work work;
67 };
68 
69 static void dma_emul_work_handler(struct k_work *work);
70 
71 LOG_MODULE_REGISTER(dma_emul, CONFIG_DMA_LOG_LEVEL);
72 
dma_emul_channel_state_to_string(enum dma_emul_channel_state state)73 static inline const char *const dma_emul_channel_state_to_string(enum dma_emul_channel_state state)
74 {
75 	switch (state) {
76 	case DMA_EMUL_CHANNEL_UNUSED:
77 		return "UNUSED";
78 	case DMA_EMUL_CHANNEL_LOADED:
79 		return "LOADED";
80 	case DMA_EMUL_CHANNEL_STARTED:
81 		return "STARTED";
82 	case DMA_EMUL_CHANNEL_STOPPED:
83 		return "STOPPED";
84 	default:
85 		return "(invalid)";
86 	};
87 }
88 
89 /*
90  * Repurpose the "_reserved" field for keeping track of internal
91  * channel state.
92  *
93  * Note: these must be called with data->lock locked!
94  */
dma_emul_get_channel_state(const struct device * dev,uint32_t channel)95 static enum dma_emul_channel_state dma_emul_get_channel_state(const struct device *dev,
96 							      uint32_t channel)
97 {
98 	const struct dma_emul_config *config = dev->config;
99 
100 	__ASSERT_NO_MSG(channel < config->num_channels);
101 
102 	return (enum dma_emul_channel_state)config->xfer[channel].config._reserved;
103 }
104 
dma_emul_set_channel_state(const struct device * dev,uint32_t channel,enum dma_emul_channel_state state)105 static void dma_emul_set_channel_state(const struct device *dev, uint32_t channel,
106 				       enum dma_emul_channel_state state)
107 {
108 	const struct dma_emul_config *config = dev->config;
109 
110 	LOG_DBG("setting channel %u state to %s", channel, dma_emul_channel_state_to_string(state));
111 
112 	__ASSERT_NO_MSG(channel < config->num_channels);
113 	__ASSERT_NO_MSG(state >= DMA_EMUL_CHANNEL_UNUSED && state <= DMA_EMUL_CHANNEL_STOPPED);
114 
115 	config->xfer[channel].config._reserved = state;
116 }
117 
dma_emul_xfer_config_to_string(const struct dma_config * cfg)118 static const char *dma_emul_xfer_config_to_string(const struct dma_config *cfg)
119 {
120 	static char buffer[1024];
121 
122 	snprintf(buffer, sizeof(buffer),
123 		 "{"
124 		 "\n\tslot: %u"
125 		 "\n\tchannel_direction: %u"
126 		 "\n\tcomplete_callback_en: %u"
127 		 "\n\terror_callback_dis: %u"
128 		 "\n\tsource_handshake: %u"
129 		 "\n\tdest_handshake: %u"
130 		 "\n\tchannel_priority: %u"
131 		 "\n\tsource_chaining_en: %u"
132 		 "\n\tdest_chaining_en: %u"
133 		 "\n\tlinked_channel: %u"
134 		 "\n\tcyclic: %u"
135 		 "\n\t_reserved: %u"
136 		 "\n\tsource_data_size: %u"
137 		 "\n\tdest_data_size: %u"
138 		 "\n\tsource_burst_length: %u"
139 		 "\n\tdest_burst_length: %u"
140 		 "\n\tblock_count: %u"
141 		 "\n\thead_block: %p"
142 		 "\n\tuser_data: %p"
143 		 "\n\tdma_callback: %p"
144 		 "\n}",
145 		 cfg->dma_slot, cfg->channel_direction, cfg->complete_callback_en,
146 		 cfg->error_callback_dis, cfg->source_handshake, cfg->dest_handshake,
147 		 cfg->channel_priority, cfg->source_chaining_en, cfg->dest_chaining_en,
148 		 cfg->linked_channel, cfg->cyclic, cfg->_reserved, cfg->source_data_size,
149 		 cfg->dest_data_size, cfg->source_burst_length, cfg->dest_burst_length,
150 		 cfg->block_count, cfg->head_block, cfg->user_data, cfg->dma_callback);
151 
152 	return buffer;
153 }
154 
dma_emul_block_config_to_string(const struct dma_block_config * cfg)155 static const char *dma_emul_block_config_to_string(const struct dma_block_config *cfg)
156 {
157 	static char buffer[1024];
158 
159 	snprintf(buffer, sizeof(buffer),
160 		 "{"
161 		 "\n\tsource_address: %p"
162 		 "\n\tdest_address: %p"
163 		 "\n\tsource_gather_interval: %u"
164 		 "\n\tdest_scatter_interval: %u"
165 		 "\n\tdest_scatter_count: %u"
166 		 "\n\tsource_gather_count: %u"
167 		 "\n\tblock_size: %u"
168 		 "\n\tnext_block: %p"
169 		 "\n\tsource_gather_en: %u"
170 		 "\n\tdest_scatter_en: %u"
171 		 "\n\tsource_addr_adj: %u"
172 		 "\n\tdest_addr_adj: %u"
173 		 "\n\tsource_reload_en: %u"
174 		 "\n\tdest_reload_en: %u"
175 		 "\n\tfifo_mode_control: %u"
176 		 "\n\tflow_control_mode: %u"
177 		 "\n\t_reserved: %u"
178 		 "\n}",
179 		 (void *)cfg->source_address, (void *)cfg->dest_address,
180 		 cfg->source_gather_interval, cfg->dest_scatter_interval, cfg->dest_scatter_count,
181 		 cfg->source_gather_count, cfg->block_size, cfg->next_block, cfg->source_gather_en,
182 		 cfg->dest_scatter_en, cfg->source_addr_adj, cfg->dest_addr_adj,
183 		 cfg->source_reload_en, cfg->dest_reload_en, cfg->fifo_mode_control,
184 		 cfg->flow_control_mode, cfg->_reserved
185 
186 	);
187 
188 	return buffer;
189 }
190 
dma_emul_work_handler(struct k_work * work)191 static void dma_emul_work_handler(struct k_work *work)
192 {
193 	size_t i;
194 	size_t bytes;
195 	uint32_t channel;
196 	k_spinlock_key_t key;
197 	struct dma_block_config block;
198 	struct dma_config xfer_config;
199 	enum dma_emul_channel_state state;
200 	struct dma_emul_xfer_desc *xfer;
201 	struct dma_emul_work *dma_work = CONTAINER_OF(work, struct dma_emul_work, work);
202 	const struct device *dev = dma_work->dev;
203 	struct dma_emul_data *data = dev->data;
204 	const struct dma_emul_config *config = dev->config;
205 
206 	channel = dma_work->channel;
207 
208 	do {
209 		key = k_spin_lock(&data->lock);
210 		xfer = &config->xfer[channel];
211 		/*
212 		 * copy the dma_config so we don't have to worry about
213 		 * it being asynchronously updated.
214 		 */
215 		memcpy(&xfer_config, &xfer->config, sizeof(xfer_config));
216 		k_spin_unlock(&data->lock, key);
217 
218 		LOG_DBG("processing xfer %p for channel %u", xfer, channel);
219 		for (i = 0; i < xfer_config.block_count; ++i) {
220 
221 			LOG_DBG("processing block %zu", i);
222 
223 			key = k_spin_lock(&data->lock);
224 			/*
225 			 * copy the dma_block_config so we don't have to worry about
226 			 * it being asynchronously updated.
227 			 */
228 			memcpy(&block,
229 			       &config->block[channel * config->num_requests +
230 					      xfer_config.dma_slot + i],
231 			       sizeof(block));
232 			k_spin_unlock(&data->lock, key);
233 
234 			/* transfer data in bursts */
235 			for (bytes = MIN(block.block_size, xfer_config.dest_burst_length);
236 			     bytes > 0; block.block_size -= bytes, block.source_address += bytes,
237 			    block.dest_address += bytes,
238 			    bytes = MIN(block.block_size, xfer_config.dest_burst_length)) {
239 
240 				key = k_spin_lock(&data->lock);
241 				state = dma_emul_get_channel_state(dev, channel);
242 				k_spin_unlock(&data->lock, key);
243 
244 				if (state == DMA_EMUL_CHANNEL_STOPPED) {
245 					LOG_DBG("asynchronously canceled");
246 					if (!xfer_config.error_callback_dis) {
247 						xfer_config.dma_callback(dev, xfer_config.user_data,
248 									 channel, -ECANCELED);
249 					} else {
250 						LOG_DBG("error_callback_dis is not set (async "
251 							"cancel)");
252 					}
253 					goto out;
254 				}
255 
256 				__ASSERT_NO_MSG(state == DMA_EMUL_CHANNEL_STARTED);
257 
258 				/*
259 				 * FIXME: create a backend API (memcpy, TCP/UDP socket, etc)
260 				 * Simple copy for now
261 				 */
262 				memcpy((void *)(uintptr_t)block.dest_address,
263 				       (void *)(uintptr_t)block.source_address, bytes);
264 			}
265 		}
266 
267 		key = k_spin_lock(&data->lock);
268 		dma_emul_set_channel_state(dev, channel, DMA_EMUL_CHANNEL_STOPPED);
269 		k_spin_unlock(&data->lock, key);
270 
271 		/* FIXME: tests/drivers/dma/chan_blen_transfer/ does not set complete_callback_en */
272 		if (true) {
273 			xfer_config.dma_callback(dev, xfer_config.user_data, channel,
274 						 DMA_STATUS_COMPLETE);
275 		} else {
276 			LOG_DBG("complete_callback_en is not set");
277 		}
278 
279 		if (xfer_config.source_chaining_en || xfer_config.dest_chaining_en) {
280 			LOG_DBG("%s(): Linked channel %u -> %u", __func__, channel,
281 				xfer_config.linked_channel);
282 			__ASSERT_NO_MSG(channel != xfer_config.linked_channel);
283 			channel = xfer_config.linked_channel;
284 		} else {
285 			LOG_DBG("%s(): done!", __func__);
286 			break;
287 		}
288 	} while (true);
289 
290 out:
291 	return;
292 }
293 
dma_emul_config_valid(const struct device * dev,uint32_t channel,const struct dma_config * xfer_config)294 static bool dma_emul_config_valid(const struct device *dev, uint32_t channel,
295 				  const struct dma_config *xfer_config)
296 {
297 	size_t i;
298 	struct dma_block_config *block;
299 	const struct dma_emul_config *config = dev->config;
300 
301 	if (xfer_config->dma_slot >= config->num_requests) {
302 		LOG_ERR("invalid dma_slot %u", xfer_config->dma_slot);
303 		return false;
304 	}
305 
306 	if (channel >= config->num_channels) {
307 		LOG_ERR("invalid DMA channel %u", channel);
308 		return false;
309 	}
310 
311 	if (xfer_config->dest_burst_length != xfer_config->source_burst_length) {
312 		LOG_ERR("burst length does not agree. source: %u dest: %u ",
313 			xfer_config->source_burst_length, xfer_config->dest_burst_length);
314 		return false;
315 	}
316 
317 	for (i = 0, block = xfer_config->head_block; i < xfer_config->block_count;
318 	     ++i, block = block->next_block) {
319 		if (block == NULL) {
320 			LOG_ERR("block %zu / %u is NULL", i + 1, xfer_config->block_count);
321 			return false;
322 		}
323 
324 		if (i >= config->num_requests) {
325 			LOG_ERR("not enough slots to store block %zu / %u", i + 1,
326 				xfer_config->block_count);
327 			return false;
328 		}
329 	}
330 
331 	/*
332 	 * FIXME:
333 	 *
334 	 * Need to verify all of the fields in struct dma_config with different DT
335 	 * configurations so that the driver model is at least consistent and
336 	 * verified by CI.
337 	 */
338 
339 	return true;
340 }
341 
dma_emul_configure(const struct device * dev,uint32_t channel,struct dma_config * xfer_config)342 static int dma_emul_configure(const struct device *dev, uint32_t channel,
343 			      struct dma_config *xfer_config)
344 {
345 	size_t i;
346 	int ret = 0;
347 	size_t block_idx;
348 	k_spinlock_key_t key;
349 	struct dma_block_config *block;
350 	struct dma_block_config *block_it;
351 	enum dma_emul_channel_state state;
352 	struct dma_emul_xfer_desc *xfer;
353 	struct dma_emul_data *data = dev->data;
354 	const struct dma_emul_config *config = dev->config;
355 
356 	if (!dma_emul_config_valid(dev, channel, xfer_config)) {
357 		return -EINVAL;
358 	}
359 
360 	key = k_spin_lock(&data->lock);
361 	xfer = &config->xfer[channel];
362 
363 	LOG_DBG("%s():\nchannel: %u\nconfig: %s", __func__, channel,
364 		dma_emul_xfer_config_to_string(xfer_config));
365 
366 	block_idx = channel * config->num_requests + xfer_config->dma_slot;
367 
368 	block = &config->block[channel * config->num_requests + xfer_config->dma_slot];
369 	state = dma_emul_get_channel_state(dev, channel);
370 	switch (state) {
371 	case DMA_EMUL_CHANNEL_UNUSED:
372 	case DMA_EMUL_CHANNEL_STOPPED:
373 		/* copy the configuration into the driver */
374 		memcpy(&xfer->config, xfer_config, sizeof(xfer->config));
375 
376 		/* copy all blocks into slots */
377 		for (i = 0, block_it = xfer_config->head_block; i < xfer_config->block_count;
378 		     ++i, block_it = block_it->next_block, ++block) {
379 			__ASSERT_NO_MSG(block_it != NULL);
380 
381 			LOG_DBG("block_config %s", dma_emul_block_config_to_string(block_it));
382 
383 			memcpy(block, block_it, sizeof(*block));
384 		}
385 		dma_emul_set_channel_state(dev, channel, DMA_EMUL_CHANNEL_LOADED);
386 
387 		break;
388 	default:
389 		LOG_ERR("attempt to configure DMA in state %d", state);
390 		ret = -EBUSY;
391 	}
392 	k_spin_unlock(&data->lock, key);
393 
394 	return ret;
395 }
396 
dma_emul_reload(const struct device * dev,uint32_t channel,dma_addr_t src,dma_addr_t dst,size_t size)397 static int dma_emul_reload(const struct device *dev, uint32_t channel, dma_addr_t src,
398 			   dma_addr_t dst, size_t size)
399 {
400 	LOG_DBG("%s()", __func__);
401 
402 	return -ENOSYS;
403 }
404 
dma_emul_start(const struct device * dev,uint32_t channel)405 static int dma_emul_start(const struct device *dev, uint32_t channel)
406 {
407 	int ret = 0;
408 	k_spinlock_key_t key;
409 	enum dma_emul_channel_state state;
410 	struct dma_emul_xfer_desc *xfer;
411 	struct dma_config *xfer_config;
412 	struct dma_emul_data *data = dev->data;
413 	const struct dma_emul_config *config = dev->config;
414 
415 	LOG_DBG("%s(channel: %u)", __func__, channel);
416 
417 	if (channel >= config->num_channels) {
418 		return -EINVAL;
419 	}
420 
421 	key = k_spin_lock(&data->lock);
422 	xfer = &config->xfer[channel];
423 	state = dma_emul_get_channel_state(dev, channel);
424 	switch (state) {
425 	case DMA_EMUL_CHANNEL_STARTED:
426 		/* start after being started already is a no-op */
427 		break;
428 	case DMA_EMUL_CHANNEL_LOADED:
429 	case DMA_EMUL_CHANNEL_STOPPED:
430 		data->work.channel = channel;
431 		while (true) {
432 			dma_emul_set_channel_state(dev, channel, DMA_EMUL_CHANNEL_STARTED);
433 
434 			xfer_config = &config->xfer[channel].config;
435 			if (xfer_config->source_chaining_en || xfer_config->dest_chaining_en) {
436 				LOG_DBG("%s(): Linked channel %u -> %u", __func__, channel,
437 					xfer_config->linked_channel);
438 				channel = xfer_config->linked_channel;
439 			} else {
440 				break;
441 			}
442 		}
443 		ret = k_work_submit_to_queue(&data->work_q, &data->work.work);
444 		ret = (ret < 0) ? ret : 0;
445 		break;
446 	default:
447 		LOG_ERR("attempt to start dma in invalid state %d", state);
448 		ret = -EIO;
449 		break;
450 	}
451 	k_spin_unlock(&data->lock, key);
452 
453 	return ret;
454 }
455 
dma_emul_stop(const struct device * dev,uint32_t channel)456 static int dma_emul_stop(const struct device *dev, uint32_t channel)
457 {
458 	k_spinlock_key_t key;
459 	struct dma_emul_data *data = dev->data;
460 
461 	key = k_spin_lock(&data->lock);
462 	dma_emul_set_channel_state(dev, channel, DMA_EMUL_CHANNEL_STOPPED);
463 	k_spin_unlock(&data->lock, key);
464 
465 	return 0;
466 }
467 
dma_emul_suspend(const struct device * dev,uint32_t channel)468 static int dma_emul_suspend(const struct device *dev, uint32_t channel)
469 {
470 	LOG_DBG("%s()", __func__);
471 
472 	return -ENOSYS;
473 }
474 
dma_emul_resume(const struct device * dev,uint32_t channel)475 static int dma_emul_resume(const struct device *dev, uint32_t channel)
476 {
477 	LOG_DBG("%s()", __func__);
478 
479 	return -ENOSYS;
480 }
481 
dma_emul_get_status(const struct device * dev,uint32_t channel,struct dma_status * status)482 static int dma_emul_get_status(const struct device *dev, uint32_t channel,
483 			       struct dma_status *status)
484 {
485 	LOG_DBG("%s()", __func__);
486 
487 	return -ENOSYS;
488 }
489 
dma_emul_get_attribute(const struct device * dev,uint32_t type,uint32_t * value)490 static int dma_emul_get_attribute(const struct device *dev, uint32_t type, uint32_t *value)
491 {
492 	LOG_DBG("%s()", __func__);
493 
494 	return -ENOSYS;
495 }
496 
dma_emul_chan_filter(const struct device * dev,int channel,void * filter_param)497 static bool dma_emul_chan_filter(const struct device *dev, int channel, void *filter_param)
498 {
499 	bool success;
500 	k_spinlock_key_t key;
501 	struct dma_emul_data *data = dev->data;
502 
503 	key = k_spin_lock(&data->lock);
504 	/* lets assume the struct dma_context handles races properly */
505 	success = dma_emul_get_channel_state(dev, channel) == DMA_EMUL_CHANNEL_UNUSED;
506 	k_spin_unlock(&data->lock, key);
507 
508 	return success;
509 }
510 
511 static DEVICE_API(dma, dma_emul_driver_api) = {
512 	.config = dma_emul_configure,
513 	.reload = dma_emul_reload,
514 	.start = dma_emul_start,
515 	.stop = dma_emul_stop,
516 	.suspend = dma_emul_suspend,
517 	.resume = dma_emul_resume,
518 	.get_status = dma_emul_get_status,
519 	.get_attribute = dma_emul_get_attribute,
520 	.chan_filter = dma_emul_chan_filter,
521 };
522 
523 #ifdef CONFIG_PM_DEVICE
dma_emul_pm_device_pm_action(const struct device * dev,enum pm_device_action action)524 static int dma_emul_pm_device_pm_action(const struct device *dev, enum pm_device_action action)
525 {
526 	ARG_UNUSED(dev);
527 	ARG_UNUSED(action);
528 
529 	return 0;
530 }
531 #endif
532 
dma_emul_init(const struct device * dev)533 static int dma_emul_init(const struct device *dev)
534 {
535 	struct dma_emul_data *data = dev->data;
536 	const struct dma_emul_config *config = dev->config;
537 
538 	data->work.dev = dev;
539 	data->dma_ctx.magic = DMA_MAGIC;
540 	data->dma_ctx.dma_channels = config->num_channels;
541 	data->dma_ctx.atomic = data->channels_atomic;
542 
543 	k_work_queue_init(&data->work_q);
544 	k_work_init(&data->work.work, dma_emul_work_handler);
545 	k_work_queue_start(&data->work_q, config->work_q_stack, config->work_q_stack_size,
546 			   config->work_q_priority, NULL);
547 
548 	return 0;
549 }
550 
551 #define DMA_EMUL_INST_HAS_PROP(_inst, _prop) DT_NODE_HAS_PROP(DT_DRV_INST(_inst), _prop)
552 
553 #define DMA_EMUL_INST_CHANNEL_MASK(_inst)                                                          \
554 	DT_INST_PROP_OR(_inst, dma_channel_mask,                                                   \
555 			DMA_EMUL_INST_HAS_PROP(_inst, dma_channels)                                \
556 				? ((DT_INST_PROP(_inst, dma_channels) > 0)                         \
557 					   ? BIT_MASK(DT_INST_PROP_OR(_inst, dma_channels, 0))     \
558 					   : 0)                                                    \
559 				: 0)
560 
561 #define DMA_EMUL_INST_NUM_CHANNELS(_inst)                                                          \
562 	DT_INST_PROP_OR(_inst, dma_channels,                                                       \
563 			DMA_EMUL_INST_HAS_PROP(_inst, dma_channel_mask)                            \
564 				? POPCOUNT(DT_INST_PROP_OR(_inst, dma_channel_mask, 0))            \
565 				: 0)
566 
567 #define DMA_EMUL_INST_NUM_REQUESTS(_inst) DT_INST_PROP_OR(_inst, dma_requests, 1)
568 
569 #define DEFINE_DMA_EMUL(_inst)                                                                     \
570 	BUILD_ASSERT(DMA_EMUL_INST_HAS_PROP(_inst, dma_channel_mask) ||                            \
571 			     DMA_EMUL_INST_HAS_PROP(_inst, dma_channels),                          \
572 		     "at least one of dma_channel_mask or dma_channels must be provided");         \
573                                                                                                    \
574 	BUILD_ASSERT(DMA_EMUL_INST_NUM_CHANNELS(_inst) <= 32, "invalid dma-channels property");    \
575                                                                                                    \
576 	static K_THREAD_STACK_DEFINE(work_q_stack_##_inst, DT_INST_PROP(_inst, stack_size));       \
577                                                                                                    \
578 	static struct dma_emul_xfer_desc                                                           \
579 		dma_emul_xfer_desc_##_inst[DMA_EMUL_INST_NUM_CHANNELS(_inst)];                     \
580                                                                                                    \
581 	static struct dma_block_config                                                             \
582 		dma_emul_block_config_##_inst[DMA_EMUL_INST_NUM_CHANNELS(_inst) *                  \
583 					      DMA_EMUL_INST_NUM_REQUESTS(_inst)];                  \
584                                                                                                    \
585 	static const struct dma_emul_config dma_emul_config_##_inst = {                            \
586 		.channel_mask = DMA_EMUL_INST_CHANNEL_MASK(_inst),                                 \
587 		.num_channels = DMA_EMUL_INST_NUM_CHANNELS(_inst),                                 \
588 		.num_requests = DMA_EMUL_INST_NUM_REQUESTS(_inst),                                 \
589 		.addr_align = DT_INST_PROP_OR(_inst, dma_buf_addr_alignment, 1),                   \
590 		.size_align = DT_INST_PROP_OR(_inst, dma_buf_size_alignment, 1),                   \
591 		.copy_align = DT_INST_PROP_OR(_inst, dma_copy_alignment, 1),                       \
592 		.work_q_stack = (k_thread_stack_t *)&work_q_stack_##_inst,                         \
593 		.work_q_stack_size = K_THREAD_STACK_SIZEOF(work_q_stack_##_inst),                  \
594 		.work_q_priority = DT_INST_PROP_OR(_inst, priority, 0),                            \
595 		.xfer = dma_emul_xfer_desc_##_inst,                                                \
596 		.block = dma_emul_block_config_##_inst,                                            \
597 	};                                                                                         \
598                                                                                                    \
599 	static ATOMIC_DEFINE(dma_emul_channels_atomic_##_inst,                                     \
600 			     DT_INST_PROP_OR(_inst, dma_channels, 0));                             \
601                                                                                                    \
602 	static struct dma_emul_data dma_emul_data_##_inst = {                                      \
603 		.channels_atomic = dma_emul_channels_atomic_##_inst,                               \
604 	};                                                                                         \
605                                                                                                    \
606 	PM_DEVICE_DT_INST_DEFINE(_inst, dma_emul_pm_device_pm_action);                             \
607                                                                                                    \
608 	DEVICE_DT_INST_DEFINE(_inst, dma_emul_init, PM_DEVICE_DT_INST_GET(_inst),                  \
609 			      &dma_emul_data_##_inst, &dma_emul_config_##_inst, POST_KERNEL,       \
610 			      CONFIG_DMA_INIT_PRIORITY, &dma_emul_driver_api);
611 
612 DT_INST_FOREACH_STATUS_OKAY(DEFINE_DMA_EMUL)
613