1 /*
2 * Copyright (c) 2019 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @addtogroup t_driver_uart
9 * @{
10 * @defgroup t_uart_mix_fifo_poll test_uart_mix_fifo_poll
11 * @}
12 */
13
14 #include <zephyr/drivers/uart.h>
15 #include <zephyr/ztest.h>
16 #include <zephyr/drivers/counter.h>
17 #include <zephyr/random/random.h>
18 #include <zephyr/pm/device_runtime.h>
19 /* RX and TX pins have to be connected together*/
20
21 #if DT_NODE_EXISTS(DT_NODELABEL(dut))
22 #define UART_NODE DT_NODELABEL(dut)
23 #elif defined(CONFIG_BOARD_SAMD21_XPRO)
24 #define UART_NODE DT_NODELABEL(sercom1)
25 #elif defined(CONFIG_BOARD_SAMR21_XPRO)
26 #define UART_NODE DT_NODELABEL(sercom3)
27 #elif defined(CONFIG_BOARD_SAME54_XPRO)
28 #define UART_NODE DT_NODELABEL(sercom1)
29 #else
30 #define UART_NODE DT_CHOSEN(zephyr_console)
31 #endif
32
33 #if DT_NODE_EXISTS(DT_NODELABEL(counter_dev))
34 #define COUNTER_NODE DT_NODELABEL(counter_dev)
35 #else
36 #define COUNTER_NODE DT_NODELABEL(timer0)
37 #endif
38
39 struct rx_source {
40 int cnt;
41 uint8_t prev;
42 };
43
44 struct dut_data {
45 const struct device *dev;
46 const char *name;
47 };
48
49 static struct dut_data duts[] = {
50 {
51 .dev = DEVICE_DT_GET(UART_NODE),
52 .name = DT_NODE_FULL_NAME(UART_NODE),
53 },
54 #if DT_NODE_EXISTS(DT_NODELABEL(dut2)) && DT_NODE_HAS_STATUS(DT_NODELABEL(dut2), okay)
55 {
56 .dev = DEVICE_DT_GET(DT_NODELABEL(dut2)),
57 .name = DT_NODE_FULL_NAME(DT_NODELABEL(dut2)),
58 },
59 #endif
60 };
61
62 #define BUF_SIZE 16
63
64 /* Buffer used for polling. */
65 static uint8_t txbuf[3][BUF_SIZE];
66
67 /* Buffer used for async or interrupt driven apis.
68 * One of test configurations checks if RO buffer works with the driver.
69 */
70 static IF_ENABLED(TEST_CONST_BUFFER, (const)) uint8_t txbuf3[16] = {
71 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
72 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f
73 };
74
75 struct test_data {
76 const uint8_t *buf;
77 volatile int cnt;
78 int max;
79 struct k_sem sem;
80 };
81
82 static struct rx_source source[4];
83 static struct test_data test_data[3];
84 static struct test_data int_async_data;
85
86 static const struct device *const counter_dev =
87 DEVICE_DT_GET(COUNTER_NODE);
88 static const struct device *uart_dev;
89
90 static bool async;
91 static bool int_driven;
92 static volatile bool async_rx_enabled;
93 static struct k_sem async_tx_sem;
94
95 static void int_driven_callback(const struct device *dev, void *user_data);
96 static void async_callback(const struct device *dev,
97 struct uart_event *evt, void *user_data);
98
process_byte(uint8_t b)99 static void process_byte(uint8_t b)
100 {
101 int base = b >> 4;
102 struct rx_source *src = &source[base];
103 bool ok;
104
105 b &= 0x0F;
106 src->cnt++;
107
108 if (src->cnt == 1) {
109 src->prev = b;
110 return;
111 }
112
113 ok = ((b - src->prev) == 1) || (!b && (src->prev == 0x0F));
114
115 zassert_true(ok, "Unexpected byte received:0x%02x, prev:0x%02x",
116 (base << 4) | b, (base << 4) | src->prev);
117 src->prev = b;
118 }
119
counter_top_handler(const struct device * dev,void * user_data)120 static void counter_top_handler(const struct device *dev, void *user_data)
121 {
122 static bool enable = true;
123 static uint8_t async_rx_buf[4];
124
125 if (async && !async_rx_enabled) {
126 int err;
127
128 err = uart_rx_enable(uart_dev, async_rx_buf,
129 sizeof(async_rx_buf), 1 * USEC_PER_MSEC);
130 zassert_true(err >= 0);
131 async_rx_enabled = true;
132 } else if (int_driven) {
133 if (enable) {
134 uart_irq_rx_enable(uart_dev);
135 } else {
136 uart_irq_rx_disable(uart_dev);
137 }
138
139 enable = !enable;
140 } else if (!async && !int_driven) {
141 uint8_t c;
142
143 while (uart_poll_in(uart_dev, &c) >= 0) {
144 process_byte(c);
145 }
146 }
147 }
148
init_test(int idx)149 static void init_test(int idx)
150 {
151 memset(source, 0, sizeof(source));
152 async_rx_enabled = false;
153 uart_dev = duts[idx].dev;
154 TC_PRINT("UART instance:%s\n", duts[idx].name);
155
156 zassert_true(device_is_ready(uart_dev), "uart device is not ready");
157
158 if (uart_callback_set(uart_dev, async_callback, NULL) == 0) {
159 async = true;
160 } else {
161 async = false;
162 int_driven = uart_irq_tx_complete(uart_dev) >= 0;
163 if (int_driven) {
164 uart_irq_callback_set(uart_dev, int_driven_callback);
165 }
166 }
167 }
168
rx_isr(void)169 static void rx_isr(void)
170 {
171 uint8_t buf[64];
172 int len;
173
174 do {
175 len = uart_fifo_read(uart_dev, buf, BUF_SIZE);
176 for (int i = 0; i < len; i++) {
177 process_byte(buf[i]);
178 }
179 } while (len);
180 }
181
tx_isr(void)182 static void tx_isr(void)
183 {
184 const uint8_t *buf = &int_async_data.buf[int_async_data.cnt & 0xF];
185 int len = uart_fifo_fill(uart_dev, buf, 1);
186
187 int_async_data.cnt += len;
188
189 k_busy_wait(len ? 4 : 2);
190 uart_irq_tx_disable(uart_dev);
191 }
192
int_driven_callback(const struct device * dev,void * user_data)193 static void int_driven_callback(const struct device *dev, void *user_data)
194 {
195 while (uart_irq_is_pending(uart_dev)) {
196 if (uart_irq_rx_ready(uart_dev)) {
197 rx_isr();
198 }
199 if (uart_irq_tx_ready(uart_dev)) {
200 tx_isr();
201 }
202 }
203 }
204
async_callback(const struct device * dev,struct uart_event * evt,void * user_data)205 static void async_callback(const struct device *dev,
206 struct uart_event *evt, void *user_data)
207 {
208 switch (evt->type) {
209 case UART_TX_DONE:
210 k_sem_give(&async_tx_sem);
211 break;
212 case UART_RX_RDY:
213 for (int i = 0; i < evt->data.rx.len; i++) {
214 process_byte(evt->data.rx.buf[evt->data.rx.offset + i]);
215 }
216 break;
217 case UART_RX_DISABLED:
218 async_rx_enabled = false;
219 break;
220 default:
221 break;
222
223 }
224 }
225
bulk_poll_out(struct test_data * data,int wait_base,int wait_range)226 static void bulk_poll_out(struct test_data *data, int wait_base, int wait_range)
227 {
228 for (int i = 0; i < data->max; i++) {
229
230 data->cnt++;
231 uart_poll_out(uart_dev, data->buf[i % BUF_SIZE]);
232 if (wait_base) {
233 int r = sys_rand32_get();
234
235 k_sleep(K_USEC(wait_base + (r % wait_range)));
236 }
237 }
238
239 k_sem_give(&data->sem);
240 }
241
poll_out_thread(void * data,void * unused0,void * unused1)242 static void poll_out_thread(void *data, void *unused0, void *unused1)
243 {
244 bulk_poll_out((struct test_data *)data, 200, 600);
245 }
246
247 K_THREAD_STACK_DEFINE(high_poll_out_thread_stack, 1024);
248 static struct k_thread high_poll_out_thread;
249
250 K_THREAD_STACK_DEFINE(int_async_thread_stack, 1024);
251 static struct k_thread int_async_thread;
252
int_async_thread_func(void * p_data,void * base,void * range)253 static void int_async_thread_func(void *p_data, void *base, void *range)
254 {
255 struct test_data *data = p_data;
256 int wait_base = (int)base;
257 int wait_range = (int)range;
258
259 k_sem_init(&async_tx_sem, 1, 1);
260
261 while (data->cnt < data->max) {
262 if (async) {
263 int err;
264
265 err = k_sem_take(&async_tx_sem, K_MSEC(1000));
266 zassert_true(err >= 0);
267
268 int idx = data->cnt & 0xF;
269 size_t len = (idx < BUF_SIZE / 2) ? 5 : 1; /* Try various lengths */
270 len = MIN(len, data->max - data->cnt);
271
272 data->cnt += len;
273 err = uart_tx(uart_dev, &int_async_data.buf[idx],
274 len, 1000 * USEC_PER_MSEC);
275 zassert_true(err >= 0,
276 "Unexpected err:%d", err);
277 } else {
278 uart_irq_tx_enable(uart_dev);
279 }
280
281 int r = sys_rand32_get();
282
283 k_sleep(K_USEC(wait_base + (r % wait_range)));
284 }
285
286 k_sem_give(&data->sem);
287 }
288
poll_out_timer_handler(struct k_timer * timer)289 static void poll_out_timer_handler(struct k_timer *timer)
290 {
291 struct test_data *data = k_timer_user_data_get(timer);
292
293 uart_poll_out(uart_dev, data->buf[data->cnt % BUF_SIZE]);
294
295 data->cnt++;
296 if (data->cnt == data->max) {
297 k_timer_stop(timer);
298 k_sem_give(&data->sem);
299 } else {
300 k_timer_start(timer, K_USEC(250 + (sys_rand16_get() % 800)),
301 K_NO_WAIT);
302 }
303 }
304
305 K_TIMER_DEFINE(poll_out_timer, poll_out_timer_handler, NULL);
306
init_buf(uint8_t * buf,int len,int idx)307 static void init_buf(uint8_t *buf, int len, int idx)
308 {
309 for (int i = 0; i < len; i++) {
310 buf[i] = i | (idx << 4);
311 }
312 }
313
init_test_data(struct test_data * data,const uint8_t * buf,int repeat)314 static void init_test_data(struct test_data *data, const uint8_t *buf, int repeat)
315 {
316 k_sem_init(&data->sem, 0, 1);
317 data->buf = buf;
318 data->cnt = 0;
319 data->max = repeat;
320 }
321
ZTEST(uart_mix_fifo_poll,test_mixed_uart_access)322 ZTEST(uart_mix_fifo_poll, test_mixed_uart_access)
323 {
324 int repeat = CONFIG_STRESS_TEST_REPS;
325 int err;
326 int num_of_contexts = ARRAY_SIZE(test_data);
327 struct counter_top_cfg top_cfg = {
328 .callback = counter_top_handler,
329 .user_data = NULL,
330 .flags = 0
331 };
332
333 if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
334 if (async) {
335 #if DT_NODE_EXISTS(DT_NODELABEL(uart120)) && DT_NODE_HAS_STATUS(DT_NODELABEL(uart120), okay)
336 if (uart_dev == DEVICE_DT_GET(DT_NODELABEL(uart120))) {
337 ztest_test_skip();
338 }
339 #endif
340 } else {
341 /* If only polling API is available then UART device is initially
342 * suspended which means that RX is disabled and poll_in won't work.
343 * Device must be explicitly enabled.
344 */
345 pm_device_runtime_get(uart_dev);
346 }
347 }
348
349 /* Setup counter which will periodically enable/disable UART RX,
350 * Disabling RX should lead to flow control being activated.
351 */
352 zassert_true(device_is_ready(counter_dev));
353
354 top_cfg.ticks = counter_us_to_ticks(counter_dev, 1000);
355
356 err = counter_set_top_value(counter_dev, &top_cfg);
357 zassert_true(err >= 0);
358
359 err = counter_start(counter_dev);
360 zassert_true(err >= 0);
361
362 for (int i = 0; i < ARRAY_SIZE(test_data); i++) {
363 init_buf(txbuf[i], sizeof(txbuf[i]), i);
364 init_test_data(&test_data[i], txbuf[i], repeat);
365 }
366 (void)k_thread_create(&high_poll_out_thread,
367 high_poll_out_thread_stack, 1024,
368 poll_out_thread, &test_data[0], NULL, NULL,
369 1, 0, K_NO_WAIT);
370
371
372 if (async || int_driven) {
373 init_test_data(&int_async_data, txbuf3, repeat);
374 (void)k_thread_create(&int_async_thread,
375 int_async_thread_stack, 1024,
376 int_async_thread_func,
377 &int_async_data, (void *)300, (void *)400,
378 2, 0, K_NO_WAIT);
379 }
380
381 k_timer_user_data_set(&poll_out_timer, &test_data[1]);
382 k_timer_start(&poll_out_timer, K_USEC(250), K_NO_WAIT);
383
384 bulk_poll_out(&test_data[2], 300, 500);
385
386 k_msleep(1);
387
388 for (int i = 0; i < num_of_contexts; i++) {
389 err = k_sem_take(&test_data[i].sem, K_MSEC(10000));
390 zassert_equal(err, 0);
391 }
392
393 if (async || int_driven) {
394 err = k_sem_take(&int_async_data.sem, K_MSEC(10000));
395 zassert_equal(err, 0);
396 }
397
398 k_msleep(10);
399
400 for (int i = 0; i < (num_of_contexts + (async || int_driven ? 1 : 0)); i++) {
401 zassert_equal(source[i].cnt, repeat,
402 "%d: Unexpected rx bytes count (%d/%d)",
403 i, source[i].cnt, repeat);
404 }
405
406 err = counter_stop(counter_dev);
407 zassert_true(err >= 0);
408
409 if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME) && !async) {
410 pm_device_runtime_put(uart_dev);
411 }
412 }
413
uart_mix_setup(void)414 void *uart_mix_setup(void)
415 {
416 static int idx;
417
418 init_test(idx++);
419
420 return NULL;
421 }
422
423 ZTEST_SUITE(uart_mix_fifo_poll, NULL, uart_mix_setup,
424 NULL, NULL, NULL);
425
test_main(void)426 void test_main(void)
427 {
428 /* Run all suites for each dut UART. Setup function for each suite is picking
429 * next UART from the array.
430 */
431 ztest_run_all(NULL, false, ARRAY_SIZE(duts), 1);
432 ztest_verify_all_test_suites_ran();
433 }
434