1 /* Copyright (c) 2022 Intel Corporation
2 * SPDX-License-Identifier: Apache-2.0
3 */
4
5 #include <zephyr/cache.h>
6 #include <zephyr/kernel.h>
7 #include <zephyr/ztest.h>
8 #include <intel_adsp_ipc.h>
9 #include <zephyr/drivers/dma.h>
10 #include "tests.h"
11
12 #define IPC_TIMEOUT K_MSEC(1500)
13 #define DMA_BUF_SIZE 256
14 #define TRANSFER_SIZE 256
15 #define TRANSFER_COUNT 8
16
17 #define ALIGNMENT DMA_BUF_ADDR_ALIGNMENT(DT_NODELABEL(hda_host_in))
18 static __aligned(ALIGNMENT) uint8_t dma_buf[DMA_BUF_SIZE];
19
20 #define HDA_HOST_IN_BASE DT_PROP_BY_IDX(DT_NODELABEL(hda_host_in), reg, 0)
21 #define HDA_HOST_OUT_BASE DT_PROP_BY_IDX(DT_NODELABEL(hda_host_out), reg, 0)
22 #define HDA_STREAM_COUNT DT_PROP(DT_NODELABEL(hda_host_out), dma_channels)
23 #define HDA_REGBLOCK_SIZE DT_PROP_BY_IDX(DT_NODELABEL(hda_host_out), reg, 1)
24 #include <intel_adsp_hda.h>
25
26 static volatile int msg_cnt;
27 static volatile int msg_res;
28
ipc_message(const struct device * dev,void * arg,uint32_t data,uint32_t ext_data)29 static bool ipc_message(const struct device *dev, void *arg,
30 uint32_t data, uint32_t ext_data)
31 {
32 printk("HDA message received, data %u, ext_data %u\n", data, ext_data);
33 msg_res = data;
34 msg_cnt++;
35 return true;
36 }
37
38 /*
39 * Tests host input streams with the DMA API
40 *
41 * Note that the order of operations in this test are important and things potentially will not
42 * work in horrible and unexpected ways if not done as they are here.
43 */
ZTEST(intel_adsp_hda_dma,test_hda_host_in_dma)44 ZTEST(intel_adsp_hda_dma, test_hda_host_in_dma)
45 {
46 const struct device *dma;
47 int res, channel;
48 uint32_t last_msg_cnt;
49
50 printk("smoke testing hda with fifo buffer at address %p, size %d\n",
51 dma_buf, DMA_BUF_SIZE);
52
53 intel_adsp_ipc_set_message_handler(INTEL_ADSP_IPC_HOST_DEV, ipc_message, NULL);
54
55 printk("Using buffer of size %d at addr %p\n", DMA_BUF_SIZE, dma_buf);
56
57 /* setup a ramp in the buffer */
58 for (uint32_t i = 0; i < DMA_BUF_SIZE; i++) {
59 dma_buf[i] = i & 0xff;
60 }
61
62 #if (IS_ENABLED(CONFIG_KERNEL_COHERENCE))
63 zassert_true(arch_mem_coherent(dma_buf), "Buffer is unexpectedly incoherent!");
64 #else
65 /* The buffer is in the cached address range and must be flushed */
66 zassert_false(arch_mem_coherent(dma_buf), "Buffer is unexpectedly coherent!");
67 sys_cache_data_flush_range(dma_buf, DMA_BUF_SIZE);
68 #endif
69
70 dma = DEVICE_DT_GET(DT_NODELABEL(hda_host_in));
71 zassert_true(device_is_ready(dma), "DMA device is not ready");
72
73 channel = dma_request_channel(dma, NULL);
74 zassert_true(channel >= 0, "Expected a valid DMA channel");
75 hda_dump_regs(HOST_IN, HDA_REGBLOCK_SIZE, channel, "dma channel");
76
77 hda_ipc_msg(INTEL_ADSP_IPC_HOST_DEV, IPCCMD_HDA_RESET, channel, IPC_TIMEOUT);
78 hda_dump_regs(HOST_IN, HDA_REGBLOCK_SIZE, channel, "host reset");
79
80 hda_ipc_msg(INTEL_ADSP_IPC_HOST_DEV, IPCCMD_HDA_CONFIG,
81 channel | (DMA_BUF_SIZE << 8), IPC_TIMEOUT);
82 hda_dump_regs(HOST_IN, HDA_REGBLOCK_SIZE, channel, "host config");
83
84
85 struct dma_block_config block_cfg = {
86 .block_size = DMA_BUF_SIZE,
87 .source_address = (uint32_t)(&dma_buf[0]),
88 };
89
90 struct dma_config dma_cfg = {
91 .block_count = 1,
92 .channel_direction = MEMORY_TO_HOST,
93 .head_block = &block_cfg,
94 };
95
96 res = dma_config(dma, channel, &dma_cfg);
97 hda_dump_regs(HOST_IN, HDA_REGBLOCK_SIZE, channel, "dsp dma config");
98 zassert_ok(res, "Expected dma config to succeed");
99
100 res = dma_start(dma, channel);
101 hda_dump_regs(HOST_IN, HDA_REGBLOCK_SIZE, channel, "dsp dma start");
102 zassert_ok(res, "Expected dma start to succeed");
103
104 hda_ipc_msg(INTEL_ADSP_IPC_HOST_DEV, IPCCMD_HDA_START, channel, IPC_TIMEOUT);
105 hda_dump_regs(HOST_IN, HDA_REGBLOCK_SIZE, channel, "host start");
106
107 for (uint32_t i = 0; i < TRANSFER_COUNT; i++) {
108 res = dma_reload(dma, channel, 0, 0, DMA_BUF_SIZE);
109 zassert_ok(res, "Expected dma reload to succeed");
110 hda_dump_regs(HOST_IN, HDA_REGBLOCK_SIZE, channel, "dsp dma reload");
111
112 struct dma_status status;
113 int j;
114 /* up to 10mS wait time */
115 for (j = 0; j < 100; j++) {
116 res = dma_get_status(dma, channel, &status);
117 zassert_ok(res, "Expected dma status to succeed");
118 if (status.read_position == status.write_position) {
119 break;
120 }
121 k_busy_wait(100);
122 }
123 hda_dump_regs(HOST_IN, HDA_REGBLOCK_SIZE, channel,
124 "dsp read write equal after %d uS", j*100);
125
126 last_msg_cnt = msg_cnt;
127 hda_ipc_msg(INTEL_ADSP_IPC_HOST_DEV, IPCCMD_HDA_VALIDATE, channel,
128 IPC_TIMEOUT);
129
130 WAIT_FOR(msg_cnt > last_msg_cnt, 10000, k_msleep(1));
131 zassert_true(msg_res == 1,
132 "Expected data validation to be true from Host");
133 }
134
135 hda_ipc_msg(INTEL_ADSP_IPC_HOST_DEV, IPCCMD_HDA_RESET,
136 channel, IPC_TIMEOUT);
137
138 res = dma_stop(dma, channel);
139 zassert_ok(res, "Expected dma stop to succeed");
140 }
141
142 /*
143 * Tests host output streams with the DMA API
144 */
test_hda_host_out_dma(void)145 void test_hda_host_out_dma(void)
146 {
147 const struct device *dma;
148 int res, channel;
149 bool is_ramp;
150
151
152 printk("smoke testing hda with fifo buffer at address %p, size %d\n",
153 dma_buf, DMA_BUF_SIZE);
154
155 intel_adsp_ipc_set_message_handler(INTEL_ADSP_IPC_HOST_DEV, ipc_message, NULL);
156
157 printk("Using buffer of size %d at addr %p\n", DMA_BUF_SIZE, dma_buf);
158
159 dma = DEVICE_DT_GET(DT_NODELABEL(hda_host_out));
160 zassert_true(device_is_ready(dma), "DMA device is not ready");
161
162 channel = dma_request_channel(dma, NULL);
163 zassert_true(channel >= 0, "Expected a valid DMA channel");
164 hda_dump_regs(HOST_OUT, HDA_REGBLOCK_SIZE, channel, "dma request channel");
165
166 hda_ipc_msg(INTEL_ADSP_IPC_HOST_DEV, IPCCMD_HDA_RESET,
167 (channel + 7), IPC_TIMEOUT);
168 hda_dump_regs(HOST_OUT, HDA_REGBLOCK_SIZE, channel, "host reset");
169
170 hda_ipc_msg(INTEL_ADSP_IPC_HOST_DEV, IPCCMD_HDA_CONFIG,
171 (channel + 7) | (DMA_BUF_SIZE << 8), IPC_TIMEOUT);
172 hda_dump_regs(HOST_OUT, HDA_REGBLOCK_SIZE, channel, "host config");
173
174 struct dma_block_config block_cfg = {
175 .block_size = DMA_BUF_SIZE,
176 .source_address = (uint32_t)(&dma_buf[0]),
177 };
178
179 struct dma_config dma_cfg = {
180 .block_count = 1,
181 .channel_direction = HOST_TO_MEMORY,
182 .head_block = &block_cfg,
183 };
184
185 res = dma_config(dma, channel, &dma_cfg);
186 hda_dump_regs(HOST_OUT, HDA_REGBLOCK_SIZE, channel, "dsp dma config");
187 zassert_ok(res, "Expected dma config to succeed");
188
189 res = dma_start(dma, channel);
190 hda_dump_regs(HOST_OUT, HDA_REGBLOCK_SIZE, channel, "dsp dma start");
191 zassert_ok(res, "Expected dma start to succeed");
192
193 hda_ipc_msg(INTEL_ADSP_IPC_HOST_DEV, IPCCMD_HDA_START, (channel + 7), IPC_TIMEOUT);
194 hda_dump_regs(HOST_OUT, HDA_REGBLOCK_SIZE, channel, "host start");
195
196 for (uint32_t i = 0; i < TRANSFER_COUNT; i++) {
197 hda_ipc_msg(INTEL_ADSP_IPC_HOST_DEV, IPCCMD_HDA_SEND,
198 (channel + 7) | (DMA_BUF_SIZE << 8), IPC_TIMEOUT);
199 hda_dump_regs(HOST_OUT, HDA_REGBLOCK_SIZE, channel, "host send");
200
201 /* TODO add a dma_poll() style call for xfer ready/complete maybe? */
202 WAIT_FOR(intel_adsp_hda_buf_full(HDA_HOST_OUT_BASE, HDA_REGBLOCK_SIZE, channel),
203 10000, k_msleep(1));
204 hda_dump_regs(HOST_OUT, HDA_REGBLOCK_SIZE, channel, "dsp wait for full");
205
206 #if (IS_ENABLED(CONFIG_KERNEL_COHERENCE))
207 zassert_true(arch_mem_coherent(dma_buf), "Buffer is unexpectedly incoherent!");
208 #else
209 /* The buffer is in the cached address range and must be invalidated
210 * prior to reading.
211 */
212 zassert_false(arch_mem_coherent(dma_buf), "Buffer is unexpectedly coherent!");
213 sys_cache_data_invd_range(dma_buf, DMA_BUF_SIZE);
214 #endif
215
216 is_ramp = true;
217 for (int j = 0; j < DMA_BUF_SIZE; j++) {
218 /* printk("dma_buf[%d] = %d\n", j, dma_buf[j]); */ /* DEBUG HELPER */
219 if (dma_buf[j] != j) {
220 is_ramp = false;
221 }
222 }
223 zassert_true(is_ramp, "Expected data to be a ramp");
224
225 res = dma_reload(dma, channel, 0, 0, DMA_BUF_SIZE);
226 zassert_ok(res, "Expected dma reload to succeed");
227 hda_dump_regs(HOST_OUT, HDA_REGBLOCK_SIZE, channel, "dsp dma reload");
228 }
229
230 hda_ipc_msg(INTEL_ADSP_IPC_HOST_DEV, IPCCMD_HDA_RESET, (channel + 7), IPC_TIMEOUT);
231
232 hda_dump_regs(HOST_OUT, HDA_REGBLOCK_SIZE, channel, "host reset");
233
234 res = dma_stop(dma, channel);
235 zassert_ok(res, "Expected dma stop to succeed");
236 hda_dump_regs(HOST_OUT, HDA_REGBLOCK_SIZE, channel, "dsp dma stop");
237 }
238
239 ZTEST_SUITE(intel_adsp_hda_dma, NULL, NULL, NULL, NULL, NULL);
240