1 /* dma.c - DMA test source file */
2 
3 /*
4  * Copyright (c) 2016 Intel Corporation.
5  * Copyright (c) 2021 Linaro Limited.
6  *
7  * SPDX-License-Identifier: Apache-2.0
8  */
9 
10 /**
11  * @file
12  * @brief Verify zephyr dma memory to memory transfer loops
13  * @details
14  * - Test Steps
15  *   -# Set dma channel configuration including source/dest addr, burstlen
16  *   -# Set direction memory-to-memory
17  *   -# Start transfer
18  *   -# Move to next dest addr
19  *   -# Back to first step
20  * - Expected Results
21  *   -# Data is transferred correctly from src to dest, for each loop
22  */
23 
24 #include <zephyr/kernel.h>
25 
26 #include <zephyr/device.h>
27 #include <zephyr/drivers/dma.h>
28 #include <zephyr/pm/device.h>
29 #include <zephyr/ztest.h>
30 
31 /* in millisecond */
32 #define SLEEPTIME 250
33 
34 #define TRANSFER_LOOPS (4)
35 #define DMA_DATA_ALIGNMENT DT_INST_PROP_OR(tst_dma0, dma_buf_addr_alignment, 32)
36 
__aligned(DMA_DATA_ALIGNMENT)37 static __aligned(DMA_DATA_ALIGNMENT) uint8_t tx_data[CONFIG_DMA_LOOP_TRANSFER_SIZE];
38 static __aligned(DMA_DATA_ALIGNMENT) uint8_t
39 	rx_data[TRANSFER_LOOPS][CONFIG_DMA_LOOP_TRANSFER_SIZE] = { { 0 } };
40 
41 volatile uint32_t transfer_count;
42 volatile uint32_t done;
43 static struct dma_config dma_cfg = {0};
44 static struct dma_block_config dma_block_cfg = {0};
45 static int test_case_id;
46 
test_transfer(const struct device * dev,uint32_t id)47 static void test_transfer(const struct device *dev, uint32_t id)
48 {
49 	transfer_count++;
50 	if (transfer_count < TRANSFER_LOOPS) {
51 		dma_block_cfg.block_size = sizeof(tx_data);
52 #ifdef CONFIG_DMA_64BIT
53 		dma_block_cfg.source_address = (uint64_t)tx_data;
54 		dma_block_cfg.dest_address = (uint64_t)rx_data[transfer_count];
55 #else
56 		dma_block_cfg.source_address = (uint32_t)tx_data;
57 		dma_block_cfg.dest_address = (uint32_t)rx_data[transfer_count];
58 #endif
59 
60 		zassert_ok(dma_config(dev, id, &dma_cfg), "Not able to config transfer %d",
61 			   transfer_count + 1);
62 		zassert_ok(dma_start(dev, id), "Not able to start next transfer %d",
63 			   transfer_count + 1);
64 	}
65 }
66 
dma_user_callback(const struct device * dma_dev,void * arg,uint32_t id,int status)67 static void dma_user_callback(const struct device *dma_dev, void *arg,
68 			      uint32_t id, int status)
69 {
70 	/* test case is done so ignore the interrupt */
71 	if (done) {
72 		return;
73 	}
74 
75 	zassert_false(status < 0, "DMA could not proceed, an error occurred\n");
76 
77 #ifdef CONFIG_DMAMUX_STM32
78 	/* the channel is the DMAMUX's one
79 	 * the device is the DMAMUX, given through
80 	 * the stream->user_data by the dma_stm32_irq_handler
81 	 */
82 	test_transfer((const struct device *)arg, id);
83 #else
84 	test_transfer(dma_dev, id);
85 #endif /* CONFIG_DMAMUX_STM32 */
86 }
87 
test_loop(const struct device * dma)88 static int test_loop(const struct device *dma)
89 {
90 	static int chan_id;
91 
92 	test_case_id = 0;
93 	TC_PRINT("DMA memory to memory transfer started\n");
94 
95 	memset(tx_data, 0, sizeof(tx_data));
96 
97 	for (int i = 0; i < CONFIG_DMA_LOOP_TRANSFER_SIZE; i++) {
98 		tx_data[i] = i;
99 	}
100 
101 	memset(rx_data, 0, sizeof(rx_data));
102 
103 	if (!device_is_ready(dma)) {
104 		TC_PRINT("dma controller device is not ready\n");
105 		return TC_FAIL;
106 	}
107 
108 	TC_PRINT("Preparing DMA Controller: %s\n", dma->name);
109 	dma_cfg.channel_direction = MEMORY_TO_MEMORY;
110 	dma_cfg.source_data_size = 1U;
111 	dma_cfg.dest_data_size = 1U;
112 	dma_cfg.source_burst_length = 1U;
113 	dma_cfg.dest_burst_length = 1U;
114 #ifdef CONFIG_DMAMUX_STM32
115 	dma_cfg.user_data = (void *)dma;
116 #else
117 	dma_cfg.user_data = NULL;
118 #endif /* CONFIG_DMAMUX_STM32 */
119 	dma_cfg.dma_callback = dma_user_callback;
120 	dma_cfg.block_count = 1U;
121 	dma_cfg.head_block = &dma_block_cfg;
122 
123 #ifdef CONFIG_DMA_MCUX_TEST_SLOT_START
124 	dma_cfg.dma_slot = CONFIG_DMA_MCUX_TEST_SLOT_START;
125 #endif
126 
127 	chan_id = dma_request_channel(dma, NULL);
128 	if (chan_id < 0) {
129 		TC_PRINT("this platform do not support the dma channel\n");
130 		chan_id = CONFIG_DMA_LOOP_TRANSFER_CHANNEL_NR;
131 	}
132 	transfer_count = 0;
133 	done = 0;
134 	TC_PRINT("Starting the transfer on channel %d and waiting for 1 second\n", chan_id);
135 	dma_block_cfg.block_size = sizeof(tx_data);
136 #ifdef CONFIG_DMA_64BIT
137 	dma_block_cfg.source_address = (uint64_t)tx_data;
138 	dma_block_cfg.dest_address = (uint64_t)rx_data[transfer_count];
139 #else
140 	dma_block_cfg.source_address = (uint32_t)tx_data;
141 	dma_block_cfg.dest_address = (uint32_t)rx_data[transfer_count];
142 #endif
143 
144 	if (dma_config(dma, chan_id, &dma_cfg)) {
145 		TC_PRINT("ERROR: transfer config (%d)\n", chan_id);
146 		return TC_FAIL;
147 	}
148 
149 	if (dma_start(dma, chan_id)) {
150 		TC_PRINT("ERROR: transfer start (%d)\n", chan_id);
151 		return TC_FAIL;
152 	}
153 
154 	k_sleep(K_MSEC(SLEEPTIME));
155 
156 	if (transfer_count < TRANSFER_LOOPS) {
157 		transfer_count = TRANSFER_LOOPS;
158 		TC_PRINT("ERROR: unfinished transfer\n");
159 		if (dma_stop(dma, chan_id)) {
160 			TC_PRINT("ERROR: transfer stop\n");
161 		}
162 		return TC_FAIL;
163 	}
164 
165 	TC_PRINT("Each RX buffer should contain the full TX buffer string.\n");
166 
167 	for (int i = 0; i < TRANSFER_LOOPS; i++) {
168 		TC_PRINT("RX data Loop %d\n", i);
169 		if (memcmp(tx_data, rx_data[i], CONFIG_DMA_LOOP_TRANSFER_SIZE)) {
170 			return TC_FAIL;
171 		}
172 	}
173 
174 	dma_release_channel(dma, chan_id);
175 
176 	TC_PRINT("Finished DMA: %s\n", dma->name);
177 	return TC_PASS;
178 }
179 
test_loop_suspend_resume(const struct device * dma)180 static int test_loop_suspend_resume(const struct device *dma)
181 {
182 	static int chan_id;
183 	int res = 0;
184 
185 	test_case_id = 1;
186 	TC_PRINT("DMA memory to memory transfer started\n");
187 
188 	memset(tx_data, 0, sizeof(tx_data));
189 
190 	for (int i = 0; i < CONFIG_DMA_LOOP_TRANSFER_SIZE; i++) {
191 		tx_data[i] = i;
192 	}
193 
194 	memset(rx_data, 0, sizeof(rx_data));
195 
196 	if (!device_is_ready(dma)) {
197 		TC_PRINT("dma controller device is not ready\n");
198 		return TC_FAIL;
199 	}
200 
201 	TC_PRINT("Preparing DMA Controller: %s\n", dma->name);
202 	dma_cfg.channel_direction = MEMORY_TO_MEMORY;
203 	dma_cfg.source_data_size = 1U;
204 	dma_cfg.dest_data_size = 1U;
205 	dma_cfg.source_burst_length = 1U;
206 	dma_cfg.dest_burst_length = 1U;
207 #ifdef CONFIG_DMAMUX_STM32
208 	dma_cfg.user_data = (struct device *)dma;
209 #else
210 	dma_cfg.user_data = NULL;
211 #endif /* CONFIG_DMAMUX_STM32 */
212 	dma_cfg.dma_callback = dma_user_callback;
213 	dma_cfg.block_count = 1U;
214 	dma_cfg.head_block = &dma_block_cfg;
215 
216 #ifdef CONFIG_DMA_MCUX_TEST_SLOT_START
217 	dma_cfg.dma_slot = CONFIG_DMA_MCUX_TEST_SLOT_START;
218 #endif
219 
220 	chan_id = dma_request_channel(dma, NULL);
221 	if (chan_id < 0) {
222 		TC_PRINT("this platform do not support the dma channel\n");
223 		chan_id = CONFIG_DMA_LOOP_TRANSFER_CHANNEL_NR;
224 	}
225 	transfer_count = 0;
226 	done = 0;
227 	TC_PRINT("Starting the transfer on channel %d and waiting for 1 second\n", chan_id);
228 	dma_block_cfg.block_size = sizeof(tx_data);
229 #ifdef CONFIG_DMA_64BIT
230 	dma_block_cfg.source_address = (uint64_t)tx_data;
231 	dma_block_cfg.dest_address = (uint64_t)rx_data[transfer_count];
232 #else
233 	dma_block_cfg.source_address = (uint32_t)tx_data;
234 	dma_block_cfg.dest_address = (uint32_t)rx_data[transfer_count];
235 #endif
236 
237 	unsigned int irq_key;
238 
239 	if (dma_config(dma, chan_id, &dma_cfg)) {
240 		TC_PRINT("ERROR: transfer config (%d)\n", chan_id);
241 		return TC_FAIL;
242 	}
243 
244 	if (dma_start(dma, chan_id)) {
245 		TC_PRINT("ERROR: transfer start (%d)\n", chan_id);
246 		return TC_FAIL;
247 	}
248 
249 	/* Try multiple times to suspend the transfers */
250 	uint32_t tc = transfer_count;
251 
252 	do {
253 		irq_key = irq_lock();
254 		res = dma_suspend(dma, chan_id);
255 		if (res == -ENOSYS) {
256 			done = 1;
257 			TC_PRINT("suspend not supported\n");
258 			dma_stop(dma, chan_id);
259 			ztest_test_skip();
260 			return TC_SKIP;
261 		}
262 		tc = transfer_count;
263 		irq_unlock(irq_key);
264 		k_busy_wait(100);
265 	} while (tc != transfer_count);
266 
267 	/* If we failed to suspend we failed */
268 	if (transfer_count == TRANSFER_LOOPS) {
269 		TC_PRINT("ERROR: failed to suspend transfers\n");
270 		if (dma_stop(dma, chan_id)) {
271 			TC_PRINT("ERROR: transfer stop\n");
272 		}
273 		return TC_FAIL;
274 	}
275 	TC_PRINT("suspended after %d transfers occurred\n", transfer_count);
276 
277 	/* Now sleep */
278 	k_sleep(K_MSEC(SLEEPTIME));
279 
280 	/* If we failed to suspend we failed */
281 	if (transfer_count == TRANSFER_LOOPS) {
282 		TC_PRINT("ERROR: failed to suspend transfers\n");
283 		if (dma_stop(dma, chan_id)) {
284 			TC_PRINT("ERROR: transfer stop\n");
285 		}
286 		return TC_FAIL;
287 	}
288 	TC_PRINT("resuming after %d transfers occurred\n", transfer_count);
289 
290 	res = dma_resume(dma, chan_id);
291 	TC_PRINT("Resumed transfers\n");
292 	if (res != 0) {
293 		TC_PRINT("ERROR: resume failed, channel %d, result %d", chan_id, res);
294 		if (dma_stop(dma, chan_id)) {
295 			TC_PRINT("ERROR: transfer stop\n");
296 		}
297 		return TC_FAIL;
298 	}
299 
300 	k_sleep(K_MSEC(SLEEPTIME));
301 
302 	TC_PRINT("Transfer count %d\n", transfer_count);
303 	if (transfer_count < TRANSFER_LOOPS) {
304 		transfer_count = TRANSFER_LOOPS;
305 		TC_PRINT("ERROR: unfinished transfer\n");
306 		if (dma_stop(dma, chan_id)) {
307 			TC_PRINT("ERROR: transfer stop\n");
308 		}
309 		return TC_FAIL;
310 	}
311 
312 	TC_PRINT("Each RX buffer should contain the full TX buffer string.\n");
313 
314 	for (int i = 0; i < TRANSFER_LOOPS; i++) {
315 		TC_PRINT("RX data Loop %d\n", i);
316 		if (memcmp(tx_data, rx_data[i], CONFIG_DMA_LOOP_TRANSFER_SIZE)) {
317 			return TC_FAIL;
318 		}
319 	}
320 
321 	dma_release_channel(dma, chan_id);
322 
323 	TC_PRINT("Finished DMA: %s\n", dma->name);
324 	return TC_PASS;
325 }
326 
327 /**
328  * @brief Check if the device is in valid power state.
329  *
330  * @param dev Device instance.
331  * @param expected Device expected power state.
332  *
333  * @retval true If device is in correct power state.
334  * @retval false If device is not in correct power state.
335  */
check_dev_power_state(const struct device * dev,enum pm_device_state expected)336 static bool check_dev_power_state(const struct device *dev, enum pm_device_state expected)
337 {
338 #if CONFIG_PM_DEVICE_RUNTIME
339 	enum pm_device_state state;
340 
341 	if (pm_device_state_get(dev, &state) == 0) {
342 		if (expected != state) {
343 			TC_PRINT("ERROR: device %s is incorrect power state"
344 				 " (current state = %s, expected = %s)\n",
345 				 dev->name, pm_device_state_str(state),
346 				 pm_device_state_str(expected));
347 			return false;
348 		}
349 
350 		return true;
351 	}
352 
353 	TC_PRINT("ERROR: unable to get power state of %s", dev->name);
354 	return false;
355 #else
356 	return true;
357 #endif /* CONFIG_PM_DEVICE_RUNTIME */
358 }
359 
test_loop_repeated_start_stop(const struct device * dma)360 static int test_loop_repeated_start_stop(const struct device *dma)
361 {
362 	static int chan_id;
363 
364 	if (!check_dev_power_state(dma, PM_DEVICE_STATE_SUSPENDED) &&
365 	    !check_dev_power_state(dma, PM_DEVICE_STATE_OFF)) {
366 		TC_PRINT("ERROR: device %s is not in the correct init power state", dma->name);
367 		return TC_FAIL;
368 	}
369 
370 	test_case_id = 0;
371 	TC_PRINT("DMA memory to memory transfer started\n");
372 	TC_PRINT("Preparing DMA Controller\n");
373 
374 	memset(tx_data, 0, sizeof(tx_data));
375 
376 	memset(rx_data, 0, sizeof(rx_data));
377 
378 	if (!device_is_ready(dma)) {
379 		TC_PRINT("dma controller device is not ready\n");
380 		return TC_FAIL;
381 	}
382 
383 	dma_cfg.channel_direction = MEMORY_TO_MEMORY;
384 	dma_cfg.source_data_size = 1U;
385 	dma_cfg.dest_data_size = 1U;
386 	dma_cfg.source_burst_length = 1U;
387 	dma_cfg.dest_burst_length = 1U;
388 #ifdef CONFIG_DMAMUX_STM32
389 	dma_cfg.user_data = (void *)dma;
390 #else
391 	dma_cfg.user_data = NULL;
392 #endif /* CONFIG_DMAMUX_STM32 */
393 	dma_cfg.dma_callback = dma_user_callback;
394 	dma_cfg.block_count = 1U;
395 	dma_cfg.head_block = &dma_block_cfg;
396 
397 #ifdef CONFIG_DMA_MCUX_TEST_SLOT_START
398 	dma_cfg.dma_slot = CONFIG_DMA_MCUX_TEST_SLOT_START;
399 #endif
400 
401 	chan_id = dma_request_channel(dma, NULL);
402 	if (chan_id < 0) {
403 		TC_PRINT("this platform do not support the dma channel\n");
404 		chan_id = CONFIG_DMA_LOOP_TRANSFER_CHANNEL_NR;
405 	}
406 	transfer_count = 0;
407 	done = 0;
408 	TC_PRINT("Starting the transfer on channel %d and waiting for 1 second\n", chan_id);
409 	dma_block_cfg.block_size = sizeof(tx_data);
410 #ifdef CONFIG_DMA_64BIT
411 	dma_block_cfg.source_address = (uint64_t)tx_data;
412 	dma_block_cfg.dest_address = (uint64_t)rx_data[transfer_count];
413 #else
414 	dma_block_cfg.source_address = (uint32_t)tx_data;
415 	dma_block_cfg.dest_address = (uint32_t)rx_data[transfer_count];
416 #endif
417 
418 	if (dma_config(dma, chan_id, &dma_cfg)) {
419 		TC_PRINT("ERROR: transfer config (%d)\n", chan_id);
420 		return TC_FAIL;
421 	}
422 
423 	if (dma_stop(dma, chan_id)) {
424 		TC_PRINT("ERROR: transfer stop on stopped channel (%d)\n", chan_id);
425 		return TC_FAIL;
426 	}
427 
428 	if (!check_dev_power_state(dma, PM_DEVICE_STATE_SUSPENDED) &&
429 	    !check_dev_power_state(dma, PM_DEVICE_STATE_OFF)) {
430 		TC_PRINT("ERROR: device %s is not in the correct power state", dma->name);
431 		return TC_FAIL;
432 	}
433 
434 	if (dma_start(dma, chan_id)) {
435 		TC_PRINT("ERROR: transfer start (%d)\n", chan_id);
436 		return TC_FAIL;
437 	}
438 
439 	if (!check_dev_power_state(dma, PM_DEVICE_STATE_ACTIVE)) {
440 		return TC_FAIL;
441 	}
442 
443 	k_sleep(K_MSEC(SLEEPTIME));
444 
445 	if (transfer_count < TRANSFER_LOOPS) {
446 		transfer_count = TRANSFER_LOOPS;
447 		TC_PRINT("ERROR: unfinished transfer\n");
448 		if (dma_stop(dma, chan_id)) {
449 			TC_PRINT("ERROR: transfer stop\n");
450 		}
451 		return TC_FAIL;
452 	}
453 
454 	TC_PRINT("Each RX buffer should contain the full TX buffer string.\n");
455 
456 	for (int i = 0; i < TRANSFER_LOOPS; i++) {
457 		TC_PRINT("RX data Loop %d\n", i);
458 		if (memcmp(tx_data, rx_data[i], CONFIG_DMA_LOOP_TRANSFER_SIZE)) {
459 			return TC_FAIL;
460 		}
461 	}
462 
463 	TC_PRINT("Finished: DMA\n");
464 
465 	if (dma_stop(dma, chan_id)) {
466 		TC_PRINT("ERROR: transfer stop (%d)\n", chan_id);
467 		return TC_FAIL;
468 	}
469 
470 	if (!check_dev_power_state(dma, PM_DEVICE_STATE_SUSPENDED) &&
471 	    !check_dev_power_state(dma, PM_DEVICE_STATE_OFF)) {
472 		TC_PRINT("ERROR: device %s is not in the correct power state", dma->name);
473 		return TC_FAIL;
474 	}
475 
476 	if (dma_stop(dma, chan_id)) {
477 		TC_PRINT("ERROR: repeated transfer stop (%d)\n", chan_id);
478 		return TC_FAIL;
479 	}
480 
481 	dma_release_channel(dma, chan_id);
482 
483 	return TC_PASS;
484 }
485 
486 #define DMA_NAME(i, _)	tst_dma ## i
487 #define DMA_LIST	LISTIFY(CONFIG_DMA_LOOP_TRANSFER_NUMBER_OF_DMAS, DMA_NAME, (,))
488 
489 #define TEST_LOOP(dma_name)                                                                        \
490 	ZTEST(dma_m2m_loop, test_ ## dma_name ## _m2m_loop)                                        \
491 	{                                                                                          \
492 		const struct device *dma = DEVICE_DT_GET(DT_NODELABEL(dma_name));                  \
493 		zassert_true((test_loop(dma) == TC_PASS));                                         \
494 	}
495 
496 FOR_EACH(TEST_LOOP, (), DMA_LIST);
497 
498 #define TEST_LOOP_SUSPEND_RESUME(dma_name)                                                         \
499 	ZTEST(dma_m2m_loop, test_ ## dma_name ## _m2m_loop_suspend_resume)                         \
500 	{                                                                                          \
501 		const struct device *dma = DEVICE_DT_GET(DT_NODELABEL(dma_name));                  \
502 		zassert_true((test_loop_suspend_resume(dma) == TC_PASS));                          \
503 	}
504 
505 FOR_EACH(TEST_LOOP_SUSPEND_RESUME, (), DMA_LIST);
506 
507 #define TEST_LOOP_REPEATED_START_STOP(dma_name)                                                    \
508 	ZTEST(dma_m2m_loop, test_ ## dma_name ## _m2m_loop_repeated_start_stop)                    \
509 	{                                                                                          \
510 		const struct device *dma = DEVICE_DT_GET(DT_NODELABEL(dma_name));                  \
511 		zassert_true((test_loop_repeated_start_stop(dma) == TC_PASS));                     \
512 	}
513 
514 FOR_EACH(TEST_LOOP_REPEATED_START_STOP, (), DMA_LIST);
515