1 /*
2  * Copyright (c) 2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Verify zephyr dma memory to memory transfer loops with scatter gather
10  * @details
11  * - Test Steps
12  *   -# Set dma configuration for scatter gather enable
13  *   -# Set direction memory-to-memory with two block transfers
14  *   -# Start transfer tx -> rx
15  * - Expected Results
16  *   -# Data is transferred correctly from src buffers to dest buffers without
17  *      software intervention.
18  */
19 
20 #include <zephyr/kernel.h>
21 #include <zephyr/drivers/dma.h>
22 #include <zephyr/ztest.h>
23 
24 #define XFERS 4
25 
26 #if CONFIG_NOCACHE_MEMORY
27 static __aligned(32) uint8_t tx_data[CONFIG_DMA_SG_XFER_SIZE] __used
28 	__attribute__((__section__(".nocache")));
29 static __aligned(32) uint8_t rx_data[XFERS][CONFIG_DMA_SG_XFER_SIZE] __used
30 	__attribute__((__section__(".nocache.dma")));
31 #else
32 /* this src memory shall be in RAM to support using as a DMA source pointer.*/
33 static __aligned(32) uint8_t tx_data[CONFIG_DMA_SG_XFER_SIZE];
34 static __aligned(32) uint8_t rx_data[XFERS][CONFIG_DMA_SG_XFER_SIZE] = { { 0 } };
35 #endif
36 
37 K_SEM_DEFINE(xfer_sem, 0, 1);
38 
39 static struct dma_config dma_cfg = {0};
40 static struct dma_block_config dma_block_cfgs[XFERS];
41 
dma_sg_callback(const struct device * dma_dev,void * user_data,uint32_t channel,int status)42 static void dma_sg_callback(const struct device *dma_dev, void *user_data,
43 			    uint32_t channel, int status)
44 {
45 	if (status < 0) {
46 		TC_PRINT("callback status %d\n", status);
47 	} else {
48 		TC_PRINT("giving xfer_sem\n");
49 		k_sem_give(&xfer_sem);
50 	}
51 }
52 
test_sg(void)53 static int test_sg(void)
54 {
55 	const struct device *dma;
56 	static int chan_id;
57 
58 	TC_PRINT("DMA memory to memory transfer started\n");
59 	TC_PRINT("Preparing DMA Controller\n");
60 
61 	memset(tx_data, 0, sizeof(tx_data));
62 
63 	for (int i = 0; i < CONFIG_DMA_SG_XFER_SIZE; i++) {
64 		tx_data[i] = i;
65 	}
66 
67 	memset(rx_data, 0, sizeof(rx_data));
68 
69 	dma = DEVICE_DT_GET(DT_ALIAS(dma0));
70 	if (!device_is_ready(dma)) {
71 		TC_PRINT("dma controller device is not ready\n");
72 		return TC_FAIL;
73 	}
74 
75 	dma_cfg.channel_direction = MEMORY_TO_MEMORY;
76 	dma_cfg.source_data_size = 4U;
77 	dma_cfg.dest_data_size = 4U;
78 	dma_cfg.source_burst_length = 4U;
79 	dma_cfg.dest_burst_length = 4U;
80 #ifdef CONFIG_DMAMUX_STM32
81 	dma_cfg.user_data = (struct device *)dma;
82 #else
83 	dma_cfg.user_data = NULL;
84 #endif /* CONFIG_DMAMUX_STM32 */
85 	dma_cfg.dma_callback = dma_sg_callback;
86 	dma_cfg.block_count = XFERS;
87 	dma_cfg.head_block = dma_block_cfgs;
88 	dma_cfg.complete_callback_en = false; /* per block completion */
89 
90 #ifdef CONFIG_DMA_MCUX_TEST_SLOT_START
91 	dma_cfg.dma_slot = CONFIG_DMA_MCUX_TEST_SLOT_START;
92 #endif
93 
94 	chan_id = dma_request_channel(dma, NULL);
95 	if (chan_id < 0) {
96 		TC_PRINT("Platform does not support dma request channel,"
97 			 " using Kconfig DMA_SG_CHANNEL_NR\n");
98 		chan_id = CONFIG_DMA_SG_CHANNEL_NR;
99 	}
100 
101 	memset(dma_block_cfgs, 0, sizeof(dma_block_cfgs));
102 	for (int i = 0; i < XFERS; i++) {
103 		dma_block_cfgs[i].source_gather_en = 1U;
104 		dma_block_cfgs[i].block_size = CONFIG_DMA_SG_XFER_SIZE;
105 #ifdef CONFIG_DMA_64BIT
106 		dma_block_cfgs[i].source_address = (uint64_t)(tx_data);
107 		dma_block_cfgs[i].dest_address = (uint64_t)(rx_data[i]);
108 		TC_PRINT("dma block %d block_size %d, source addr %" PRIx64 ", dest addr %"
109 		     PRIx64 "\n", i, CONFIG_DMA_SG_XFER_SIZE, dma_block_cfgs[i].source_address,
110 			 dma_block_cfgs[i].dest_address);
111 #else
112 		dma_block_cfgs[i].source_address = (uint32_t)(tx_data);
113 		dma_block_cfgs[i].dest_address = (uint32_t)(rx_data[i]);
114 		TC_PRINT("dma block %d block_size %d, source addr %x, dest addr %x\n",
115 			 i, CONFIG_DMA_SG_XFER_SIZE, dma_block_cfgs[i].source_address,
116 			 dma_block_cfgs[i].dest_address);
117 #endif
118 		if (i < XFERS - 1) {
119 			dma_block_cfgs[i].next_block = &dma_block_cfgs[i+1];
120 			TC_PRINT("set next block pointer to %p\n", dma_block_cfgs[i].next_block);
121 		}
122 	}
123 
124 	TC_PRINT("Configuring the scatter-gather transfer on channel %d\n", chan_id);
125 
126 	if (dma_config(dma, chan_id, &dma_cfg)) {
127 		TC_PRINT("ERROR: transfer config (%d)\n", chan_id);
128 		return TC_FAIL;
129 	}
130 
131 	TC_PRINT("Starting the transfer on channel %d and waiting completion\n", chan_id);
132 
133 	if (dma_start(dma, chan_id)) {
134 		TC_PRINT("ERROR: transfer start (%d)\n", chan_id);
135 		return TC_FAIL;
136 	}
137 
138 	if (k_sem_take(&xfer_sem, K_MSEC(1000)) != 0) {
139 		TC_PRINT("Timed out waiting for xfers\n");
140 		return TC_FAIL;
141 	}
142 
143 	TC_PRINT("Verify RX buffer should contain the full TX buffer string.\n");
144 
145 	for (int i = 0; i < XFERS; i++) {
146 		TC_PRINT("rx_data[%d]\n", i);
147 		if (memcmp(tx_data, rx_data[i], CONFIG_DMA_SG_XFER_SIZE)) {
148 			return TC_FAIL;
149 		}
150 	}
151 
152 	TC_PRINT("Finished: DMA Scatter-Gather\n");
153 	return TC_PASS;
154 }
155 
156 /* export test cases */
ZTEST(dma_m2m_sg,test_dma_m2m_sg)157 ZTEST(dma_m2m_sg, test_dma_m2m_sg)
158 {
159 	zassert_true((test_sg() == TC_PASS));
160 }
161