1 /* dma.c - DMA test source file */
2
3 /*
4 * Copyright (c) 2016 Intel Corporation.
5 * Copyright (c) 2021 Linaro Limited.
6 *
7 * SPDX-License-Identifier: Apache-2.0
8 */
9
10 /**
11 * @file
12 * @brief Verify zephyr dma memory to memory transfer loops
13 * @details
14 * - Test Steps
15 * -# Set dma channel configuration including source/dest addr, burstlen
16 * -# Set direction memory-to-memory
17 * -# Start transfer
18 * -# Move to next dest addr
19 * -# Back to first step
20 * - Expected Results
21 * -# Data is transferred correctly from src to dest, for each loop
22 */
23
24 #include <zephyr/kernel.h>
25
26 #include <zephyr/device.h>
27 #include <zephyr/drivers/dma.h>
28 #include <zephyr/pm/device.h>
29 #include <zephyr/ztest.h>
30
31 /* in millisecond */
32 #define SLEEPTIME 250
33
34 #define TRANSFER_LOOPS (4)
35
36 static __aligned(32) uint8_t tx_data[CONFIG_DMA_LOOP_TRANSFER_SIZE];
37 static __aligned(32) uint8_t rx_data[TRANSFER_LOOPS][CONFIG_DMA_LOOP_TRANSFER_SIZE] = { { 0 } };
38
39 volatile uint32_t transfer_count;
40 volatile uint32_t done;
41 static struct dma_config dma_cfg = {0};
42 static struct dma_block_config dma_block_cfg = {0};
43 static int test_case_id;
44
test_transfer(const struct device * dev,uint32_t id)45 static void test_transfer(const struct device *dev, uint32_t id)
46 {
47 transfer_count++;
48 if (transfer_count < TRANSFER_LOOPS) {
49 dma_block_cfg.block_size = sizeof(tx_data);
50 #ifdef CONFIG_DMA_64BIT
51 dma_block_cfg.source_address = (uint64_t)tx_data;
52 dma_block_cfg.dest_address = (uint64_t)rx_data[transfer_count];
53 #else
54 dma_block_cfg.source_address = (uint32_t)tx_data;
55 dma_block_cfg.dest_address = (uint32_t)rx_data[transfer_count];
56 #endif
57
58 zassert_ok(dma_config(dev, id, &dma_cfg), "Not able to config transfer %d",
59 transfer_count + 1);
60 zassert_ok(dma_start(dev, id), "Not able to start next transfer %d",
61 transfer_count + 1);
62 }
63 }
64
dma_user_callback(const struct device * dma_dev,void * arg,uint32_t id,int status)65 static void dma_user_callback(const struct device *dma_dev, void *arg,
66 uint32_t id, int status)
67 {
68 /* test case is done so ignore the interrupt */
69 if (done) {
70 return;
71 }
72
73 zassert_false(status < 0, "DMA could not proceed, an error occurred\n");
74
75 #ifdef CONFIG_DMAMUX_STM32
76 /* the channel is the DMAMUX's one
77 * the device is the DMAMUX, given through
78 * the stream->user_data by the dma_stm32_irq_handler
79 */
80 test_transfer((const struct device *)arg, id);
81 #else
82 test_transfer(dma_dev, id);
83 #endif /* CONFIG_DMAMUX_STM32 */
84 }
85
test_loop(const struct device * dma)86 static int test_loop(const struct device *dma)
87 {
88 static int chan_id;
89
90 test_case_id = 0;
91 TC_PRINT("DMA memory to memory transfer started\n");
92
93 memset(tx_data, 0, sizeof(tx_data));
94
95 for (int i = 0; i < CONFIG_DMA_LOOP_TRANSFER_SIZE; i++) {
96 tx_data[i] = i;
97 }
98
99 memset(rx_data, 0, sizeof(rx_data));
100
101 if (!device_is_ready(dma)) {
102 TC_PRINT("dma controller device is not ready\n");
103 return TC_FAIL;
104 }
105
106 TC_PRINT("Preparing DMA Controller: %s\n", dma->name);
107 dma_cfg.channel_direction = MEMORY_TO_MEMORY;
108 dma_cfg.source_data_size = 1U;
109 dma_cfg.dest_data_size = 1U;
110 dma_cfg.source_burst_length = 1U;
111 dma_cfg.dest_burst_length = 1U;
112 #ifdef CONFIG_DMAMUX_STM32
113 dma_cfg.user_data = (void *)dma;
114 #else
115 dma_cfg.user_data = NULL;
116 #endif /* CONFIG_DMAMUX_STM32 */
117 dma_cfg.dma_callback = dma_user_callback;
118 dma_cfg.block_count = 1U;
119 dma_cfg.head_block = &dma_block_cfg;
120
121 #ifdef CONFIG_DMA_MCUX_TEST_SLOT_START
122 dma_cfg.dma_slot = CONFIG_DMA_MCUX_TEST_SLOT_START;
123 #endif
124
125 chan_id = dma_request_channel(dma, NULL);
126 if (chan_id < 0) {
127 TC_PRINT("this platform do not support the dma channel\n");
128 chan_id = CONFIG_DMA_LOOP_TRANSFER_CHANNEL_NR;
129 }
130 transfer_count = 0;
131 done = 0;
132 TC_PRINT("Starting the transfer on channel %d and waiting for 1 second\n", chan_id);
133 dma_block_cfg.block_size = sizeof(tx_data);
134 #ifdef CONFIG_DMA_64BIT
135 dma_block_cfg.source_address = (uint64_t)tx_data;
136 dma_block_cfg.dest_address = (uint64_t)rx_data[transfer_count];
137 #else
138 dma_block_cfg.source_address = (uint32_t)tx_data;
139 dma_block_cfg.dest_address = (uint32_t)rx_data[transfer_count];
140 #endif
141
142 if (dma_config(dma, chan_id, &dma_cfg)) {
143 TC_PRINT("ERROR: transfer config (%d)\n", chan_id);
144 return TC_FAIL;
145 }
146
147 if (dma_start(dma, chan_id)) {
148 TC_PRINT("ERROR: transfer start (%d)\n", chan_id);
149 return TC_FAIL;
150 }
151
152 k_sleep(K_MSEC(SLEEPTIME));
153
154 if (transfer_count < TRANSFER_LOOPS) {
155 transfer_count = TRANSFER_LOOPS;
156 TC_PRINT("ERROR: unfinished transfer\n");
157 if (dma_stop(dma, chan_id)) {
158 TC_PRINT("ERROR: transfer stop\n");
159 }
160 return TC_FAIL;
161 }
162
163 TC_PRINT("Each RX buffer should contain the full TX buffer string.\n");
164
165 for (int i = 0; i < TRANSFER_LOOPS; i++) {
166 TC_PRINT("RX data Loop %d\n", i);
167 if (memcmp(tx_data, rx_data[i], CONFIG_DMA_LOOP_TRANSFER_SIZE)) {
168 return TC_FAIL;
169 }
170 }
171
172 TC_PRINT("Finished DMA: %s\n", dma->name);
173 return TC_PASS;
174 }
175
test_loop_suspend_resume(const struct device * dma)176 static int test_loop_suspend_resume(const struct device *dma)
177 {
178 static int chan_id;
179 int res = 0;
180
181 test_case_id = 1;
182 TC_PRINT("DMA memory to memory transfer started\n");
183
184 memset(tx_data, 0, sizeof(tx_data));
185
186 for (int i = 0; i < CONFIG_DMA_LOOP_TRANSFER_SIZE; i++) {
187 tx_data[i] = i;
188 }
189
190 memset(rx_data, 0, sizeof(rx_data));
191
192 if (!device_is_ready(dma)) {
193 TC_PRINT("dma controller device is not ready\n");
194 return TC_FAIL;
195 }
196
197 TC_PRINT("Preparing DMA Controller: %s\n", dma->name);
198 dma_cfg.channel_direction = MEMORY_TO_MEMORY;
199 dma_cfg.source_data_size = 1U;
200 dma_cfg.dest_data_size = 1U;
201 dma_cfg.source_burst_length = 1U;
202 dma_cfg.dest_burst_length = 1U;
203 #ifdef CONFIG_DMAMUX_STM32
204 dma_cfg.user_data = (struct device *)dma;
205 #else
206 dma_cfg.user_data = NULL;
207 #endif /* CONFIG_DMAMUX_STM32 */
208 dma_cfg.dma_callback = dma_user_callback;
209 dma_cfg.block_count = 1U;
210 dma_cfg.head_block = &dma_block_cfg;
211
212 #ifdef CONFIG_DMA_MCUX_TEST_SLOT_START
213 dma_cfg.dma_slot = CONFIG_DMA_MCUX_TEST_SLOT_START;
214 #endif
215
216 chan_id = dma_request_channel(dma, NULL);
217 if (chan_id < 0) {
218 TC_PRINT("this platform do not support the dma channel\n");
219 chan_id = CONFIG_DMA_LOOP_TRANSFER_CHANNEL_NR;
220 }
221 transfer_count = 0;
222 done = 0;
223 TC_PRINT("Starting the transfer on channel %d and waiting for 1 second\n", chan_id);
224 dma_block_cfg.block_size = sizeof(tx_data);
225 #ifdef CONFIG_DMA_64BIT
226 dma_block_cfg.source_address = (uint64_t)tx_data;
227 dma_block_cfg.dest_address = (uint64_t)rx_data[transfer_count];
228 #else
229 dma_block_cfg.source_address = (uint32_t)tx_data;
230 dma_block_cfg.dest_address = (uint32_t)rx_data[transfer_count];
231 #endif
232
233 unsigned int irq_key;
234
235 if (dma_config(dma, chan_id, &dma_cfg)) {
236 TC_PRINT("ERROR: transfer config (%d)\n", chan_id);
237 return TC_FAIL;
238 }
239
240 if (dma_start(dma, chan_id)) {
241 TC_PRINT("ERROR: transfer start (%d)\n", chan_id);
242 return TC_FAIL;
243 }
244
245 /* Try multiple times to suspend the transfers */
246 uint32_t tc = transfer_count;
247
248 do {
249 irq_key = irq_lock();
250 res = dma_suspend(dma, chan_id);
251 if (res == -ENOSYS) {
252 done = 1;
253 TC_PRINT("suspend not supported\n");
254 dma_stop(dma, chan_id);
255 ztest_test_skip();
256 return TC_SKIP;
257 }
258 tc = transfer_count;
259 irq_unlock(irq_key);
260 k_busy_wait(100);
261 } while (tc != transfer_count);
262
263 /* If we failed to suspend we failed */
264 if (transfer_count == TRANSFER_LOOPS) {
265 TC_PRINT("ERROR: failed to suspend transfers\n");
266 if (dma_stop(dma, chan_id)) {
267 TC_PRINT("ERROR: transfer stop\n");
268 }
269 return TC_FAIL;
270 }
271 TC_PRINT("suspended after %d transfers occurred\n", transfer_count);
272
273 /* Now sleep */
274 k_sleep(K_MSEC(SLEEPTIME));
275
276 /* If we failed to suspend we failed */
277 if (transfer_count == TRANSFER_LOOPS) {
278 TC_PRINT("ERROR: failed to suspend transfers\n");
279 if (dma_stop(dma, chan_id)) {
280 TC_PRINT("ERROR: transfer stop\n");
281 }
282 return TC_FAIL;
283 }
284 TC_PRINT("resuming after %d transfers occurred\n", transfer_count);
285
286 res = dma_resume(dma, chan_id);
287 TC_PRINT("Resumed transfers\n");
288 if (res != 0) {
289 TC_PRINT("ERROR: resume failed, channel %d, result %d", chan_id, res);
290 if (dma_stop(dma, chan_id)) {
291 TC_PRINT("ERROR: transfer stop\n");
292 }
293 return TC_FAIL;
294 }
295
296 k_sleep(K_MSEC(SLEEPTIME));
297
298 TC_PRINT("Transfer count %d\n", transfer_count);
299 if (transfer_count < TRANSFER_LOOPS) {
300 transfer_count = TRANSFER_LOOPS;
301 TC_PRINT("ERROR: unfinished transfer\n");
302 if (dma_stop(dma, chan_id)) {
303 TC_PRINT("ERROR: transfer stop\n");
304 }
305 return TC_FAIL;
306 }
307
308 TC_PRINT("Each RX buffer should contain the full TX buffer string.\n");
309
310 for (int i = 0; i < TRANSFER_LOOPS; i++) {
311 TC_PRINT("RX data Loop %d\n", i);
312 if (memcmp(tx_data, rx_data[i], CONFIG_DMA_LOOP_TRANSFER_SIZE)) {
313 return TC_FAIL;
314 }
315 }
316
317 TC_PRINT("Finished DMA: %s\n", dma->name);
318 return TC_PASS;
319 }
320
321 /**
322 * @brief Check if the device is in valid power state.
323 *
324 * @param dev Device instance.
325 * @param expected Device expected power state.
326 *
327 * @retval true If device is in correct power state.
328 * @retval false If device is not in correct power state.
329 */
check_dev_power_state(const struct device * dev,enum pm_device_state expected)330 static bool check_dev_power_state(const struct device *dev, enum pm_device_state expected)
331 {
332 #if CONFIG_PM_DEVICE_RUNTIME
333 enum pm_device_state state;
334
335 if (pm_device_state_get(dev, &state) == 0) {
336 if (expected != state) {
337 TC_PRINT("ERROR: device %s is incorrect power state"
338 " (current state = %s, expected = %s)\n",
339 dev->name, pm_device_state_str(state),
340 pm_device_state_str(expected));
341 return false;
342 }
343
344 return true;
345 }
346
347 TC_PRINT("ERROR: unable to get power state of %s", dev->name);
348 return false;
349 #else
350 return true;
351 #endif /* CONFIG_PM_DEVICE_RUNTIME */
352 }
353
test_loop_repeated_start_stop(const struct device * dma)354 static int test_loop_repeated_start_stop(const struct device *dma)
355 {
356 static int chan_id;
357 enum pm_device_state init_state = pm_device_on_power_domain(dma) ?
358 PM_DEVICE_STATE_OFF : PM_DEVICE_STATE_SUSPENDED;
359
360 test_case_id = 0;
361 TC_PRINT("DMA memory to memory transfer started\n");
362 TC_PRINT("Preparing DMA Controller\n");
363
364 memset(tx_data, 0, sizeof(tx_data));
365
366 memset(rx_data, 0, sizeof(rx_data));
367
368 if (!device_is_ready(dma)) {
369 TC_PRINT("dma controller device is not ready\n");
370 return TC_FAIL;
371 }
372
373 dma_cfg.channel_direction = MEMORY_TO_MEMORY;
374 dma_cfg.source_data_size = 1U;
375 dma_cfg.dest_data_size = 1U;
376 dma_cfg.source_burst_length = 1U;
377 dma_cfg.dest_burst_length = 1U;
378 #ifdef CONFIG_DMAMUX_STM32
379 dma_cfg.user_data = (void *)dma;
380 #else
381 dma_cfg.user_data = NULL;
382 #endif /* CONFIG_DMAMUX_STM32 */
383 dma_cfg.dma_callback = dma_user_callback;
384 dma_cfg.block_count = 1U;
385 dma_cfg.head_block = &dma_block_cfg;
386
387 #ifdef CONFIG_DMA_MCUX_TEST_SLOT_START
388 dma_cfg.dma_slot = CONFIG_DMA_MCUX_TEST_SLOT_START;
389 #endif
390
391 if (!check_dev_power_state(dma, PM_DEVICE_STATE_OFF)) {
392 return TC_FAIL;
393 }
394
395 chan_id = dma_request_channel(dma, NULL);
396 if (chan_id < 0) {
397 TC_PRINT("this platform do not support the dma channel\n");
398 chan_id = CONFIG_DMA_LOOP_TRANSFER_CHANNEL_NR;
399 }
400 transfer_count = 0;
401 done = 0;
402 TC_PRINT("Starting the transfer on channel %d and waiting for 1 second\n", chan_id);
403 dma_block_cfg.block_size = sizeof(tx_data);
404 #ifdef CONFIG_DMA_64BIT
405 dma_block_cfg.source_address = (uint64_t)tx_data;
406 dma_block_cfg.dest_address = (uint64_t)rx_data[transfer_count];
407 #else
408 dma_block_cfg.source_address = (uint32_t)tx_data;
409 dma_block_cfg.dest_address = (uint32_t)rx_data[transfer_count];
410 #endif
411
412 if (dma_config(dma, chan_id, &dma_cfg)) {
413 TC_PRINT("ERROR: transfer config (%d)\n", chan_id);
414 return TC_FAIL;
415 }
416
417 if (dma_stop(dma, chan_id)) {
418 TC_PRINT("ERROR: transfer stop on stopped channel (%d)\n", chan_id);
419 return TC_FAIL;
420 }
421
422 if (!check_dev_power_state(dma, init_state)) {
423 return TC_FAIL;
424 }
425
426 if (dma_start(dma, chan_id)) {
427 TC_PRINT("ERROR: transfer start (%d)\n", chan_id);
428 return TC_FAIL;
429 }
430
431 if (!check_dev_power_state(dma, PM_DEVICE_STATE_ACTIVE)) {
432 return TC_FAIL;
433 }
434
435 k_sleep(K_MSEC(SLEEPTIME));
436
437 if (transfer_count < TRANSFER_LOOPS) {
438 transfer_count = TRANSFER_LOOPS;
439 TC_PRINT("ERROR: unfinished transfer\n");
440 if (dma_stop(dma, chan_id)) {
441 TC_PRINT("ERROR: transfer stop\n");
442 }
443 return TC_FAIL;
444 }
445
446 TC_PRINT("Each RX buffer should contain the full TX buffer string.\n");
447
448 for (int i = 0; i < TRANSFER_LOOPS; i++) {
449 TC_PRINT("RX data Loop %d\n", i);
450 if (memcmp(tx_data, rx_data[i], CONFIG_DMA_LOOP_TRANSFER_SIZE)) {
451 return TC_FAIL;
452 }
453 }
454
455 TC_PRINT("Finished: DMA\n");
456
457 if (dma_stop(dma, chan_id)) {
458 TC_PRINT("ERROR: transfer stop (%d)\n", chan_id);
459 return TC_FAIL;
460 }
461
462 if (!check_dev_power_state(dma, init_state)) {
463 return TC_FAIL;
464 }
465
466 if (dma_stop(dma, chan_id)) {
467 TC_PRINT("ERROR: repeated transfer stop (%d)\n", chan_id);
468 return TC_FAIL;
469 }
470
471 return TC_PASS;
472 }
473
474 #define DMA_NAME(i, _) tst_dma ## i
475 #define DMA_LIST LISTIFY(CONFIG_DMA_LOOP_TRANSFER_NUMBER_OF_DMAS, DMA_NAME, (,))
476
477 #define TEST_LOOP(dma_name) \
478 ZTEST(dma_m2m_loop, test_ ## dma_name ## _m2m_loop) \
479 { \
480 const struct device *dma = DEVICE_DT_GET(DT_NODELABEL(dma_name)); \
481 zassert_true((test_loop(dma) == TC_PASS)); \
482 }
483
484 FOR_EACH(TEST_LOOP, (), DMA_LIST);
485
486 #define TEST_LOOP_SUSPEND_RESUME(dma_name) \
487 ZTEST(dma_m2m_loop, test_ ## dma_name ## _m2m_loop_suspend_resume) \
488 { \
489 const struct device *dma = DEVICE_DT_GET(DT_NODELABEL(dma_name)); \
490 zassert_true((test_loop_suspend_resume(dma) == TC_PASS)); \
491 }
492
493 FOR_EACH(TEST_LOOP_SUSPEND_RESUME, (), DMA_LIST);
494
495 #define TEST_LOOP_REPEATED_START_STOP(dma_name) \
496 ZTEST(dma_m2m_loop, test_ ## dma_name ## _m2m_loop_repeated_start_stop) \
497 { \
498 const struct device *dma = DEVICE_DT_GET(DT_NODELABEL(dma_name)); \
499 zassert_true((test_loop_repeated_start_stop(dma) == TC_PASS)); \
500 }
501
502 FOR_EACH(TEST_LOOP_REPEATED_START_STOP, (), DMA_LIST);
503