1 /*
2  * Copyright (c) 2019 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @addtogroup t_driver_uart
9  * @{
10  * @defgroup t_uart_mix_fifo_poll test_uart_mix_fifo_poll
11  * @}
12  */
13 
14 #include <zephyr/drivers/uart.h>
15 #include <zephyr/ztest.h>
16 #include <zephyr/drivers/counter.h>
17 #include <zephyr/random/random.h>
18 /* RX and TX pins have to be connected together*/
19 
20 #if DT_NODE_EXISTS(DT_NODELABEL(dut))
21 #define UART_NODE DT_NODELABEL(dut)
22 #elif defined(CONFIG_BOARD_SAMD21_XPRO)
23 #define UART_NODE DT_NODELABEL(sercom1)
24 #elif defined(CONFIG_BOARD_SAMR21_XPRO)
25 #define UART_NODE DT_NODELABEL(sercom3)
26 #elif defined(CONFIG_BOARD_SAME54_XPRO)
27 #define UART_NODE DT_NODELABEL(sercom1)
28 #else
29 #define UART_NODE DT_CHOSEN(zephyr_console)
30 #endif
31 
32 #if DT_NODE_EXISTS(DT_NODELABEL(counter_dev))
33 #define COUNTER_NODE DT_NODELABEL(counter_dev)
34 #else
35 #define COUNTER_NODE DT_NODELABEL(timer0)
36 #endif
37 
38 struct rx_source {
39 	int cnt;
40 	uint8_t prev;
41 };
42 
43 #define BUF_SIZE 16
44 
45 /* Buffer used for polling. */
46 static uint8_t txbuf[3][BUF_SIZE];
47 
48 /* Buffer used for async or interrupt driven apis.
49  * One of test configurations checks if RO buffer works with the driver.
50  */
51 static IF_ENABLED(TEST_CONST_BUFFER, (const)) uint8_t txbuf3[16] = {
52 	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
53 	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f
54 };
55 
56 struct test_data {
57 	const uint8_t *buf;
58 	volatile int cnt;
59 	int max;
60 	struct k_sem sem;
61 };
62 
63 static struct rx_source source[4];
64 static struct test_data test_data[3];
65 static struct test_data int_async_data;
66 
67 static const struct device *const counter_dev =
68 	DEVICE_DT_GET(COUNTER_NODE);
69 static const struct device *const uart_dev =
70 	DEVICE_DT_GET(UART_NODE);
71 
72 static bool async;
73 static bool int_driven;
74 static volatile bool async_rx_enabled;
75 static struct k_sem async_tx_sem;
76 
77 static void int_driven_callback(const struct device *dev, void *user_data);
78 static void async_callback(const struct device *dev,
79 			   struct uart_event *evt, void *user_data);
80 
process_byte(uint8_t b)81 static void process_byte(uint8_t b)
82 {
83 	int base = b >> 4;
84 	struct rx_source *src = &source[base];
85 	bool ok;
86 
87 	b &= 0x0F;
88 	src->cnt++;
89 
90 	if (src->cnt == 1) {
91 		src->prev = b;
92 		return;
93 	}
94 
95 	ok = ((b - src->prev) == 1) || (!b && (src->prev == 0x0F));
96 
97 	zassert_true(ok, "Unexpected byte received:0x%02x, prev:0x%02x",
98 			(base << 4) | b, (base << 4) | src->prev);
99 	src->prev = b;
100 }
101 
counter_top_handler(const struct device * dev,void * user_data)102 static void counter_top_handler(const struct device *dev, void *user_data)
103 {
104 	static bool enable = true;
105 	static uint8_t async_rx_buf[4];
106 
107 	if (async && !async_rx_enabled) {
108 		int err;
109 
110 		err = uart_rx_enable(uart_dev, async_rx_buf,
111 				     sizeof(async_rx_buf), 1 * USEC_PER_MSEC);
112 		zassert_true(err >= 0);
113 		async_rx_enabled = true;
114 	} else if (int_driven) {
115 		if (enable) {
116 			uart_irq_rx_enable(uart_dev);
117 		} else {
118 			uart_irq_rx_disable(uart_dev);
119 		}
120 
121 		enable = !enable;
122 	} else if (!async && !int_driven) {
123 		uint8_t c;
124 
125 		while (uart_poll_in(uart_dev, &c) >= 0) {
126 			process_byte(c);
127 		}
128 	}
129 }
130 
init_test(void)131 static void init_test(void)
132 {
133 	int err;
134 	struct counter_top_cfg top_cfg = {
135 		.callback = counter_top_handler,
136 		.user_data = NULL,
137 		.flags = 0
138 	};
139 
140 	zassert_true(device_is_ready(uart_dev), "uart device is not ready");
141 
142 	if (uart_callback_set(uart_dev, async_callback, NULL) == 0) {
143 		async = true;
144 	} else {
145 		async = false;
146 		int_driven = uart_irq_tx_complete(uart_dev) >= 0;
147 		if (int_driven) {
148 			uart_irq_callback_set(uart_dev, int_driven_callback);
149 		}
150 	}
151 
152 	/* Setup counter which will periodically enable/disable UART RX,
153 	 * Disabling RX should lead to flow control being activated.
154 	 */
155 	zassert_true(device_is_ready(counter_dev));
156 
157 	top_cfg.ticks = counter_us_to_ticks(counter_dev, 1000);
158 
159 	err = counter_set_top_value(counter_dev, &top_cfg);
160 	zassert_true(err >= 0);
161 
162 	err = counter_start(counter_dev);
163 	zassert_true(err >= 0);
164 }
165 
rx_isr(void)166 static void rx_isr(void)
167 {
168 	uint8_t buf[64];
169 	int len;
170 
171 	do {
172 		len = uart_fifo_read(uart_dev, buf, BUF_SIZE);
173 		for (int i = 0; i < len; i++) {
174 			process_byte(buf[i]);
175 		}
176 	} while (len);
177 }
178 
tx_isr(void)179 static void tx_isr(void)
180 {
181 	const uint8_t *buf = &int_async_data.buf[int_async_data.cnt & 0xF];
182 	int len = uart_fifo_fill(uart_dev, buf, 1);
183 
184 	int_async_data.cnt += len;
185 
186 	k_busy_wait(len ? 4 : 2);
187 	uart_irq_tx_disable(uart_dev);
188 }
189 
int_driven_callback(const struct device * dev,void * user_data)190 static void int_driven_callback(const struct device *dev, void *user_data)
191 {
192 	while (uart_irq_is_pending(uart_dev)) {
193 		if (uart_irq_rx_ready(uart_dev)) {
194 			rx_isr();
195 		}
196 		if (uart_irq_tx_ready(uart_dev)) {
197 			tx_isr();
198 		}
199 	}
200 }
201 
async_callback(const struct device * dev,struct uart_event * evt,void * user_data)202 static void async_callback(const struct device *dev,
203 			   struct uart_event *evt, void *user_data)
204 {
205 	switch (evt->type) {
206 	case UART_TX_DONE:
207 		k_sem_give(&async_tx_sem);
208 		break;
209 	case UART_RX_RDY:
210 		for (int i = 0; i < evt->data.rx.len; i++) {
211 			process_byte(evt->data.rx.buf[evt->data.rx.offset + i]);
212 		}
213 		break;
214 	case UART_RX_DISABLED:
215 		async_rx_enabled = false;
216 		break;
217 	default:
218 		break;
219 
220 	}
221 }
222 
bulk_poll_out(struct test_data * data,int wait_base,int wait_range)223 static void bulk_poll_out(struct test_data *data, int wait_base, int wait_range)
224 {
225 	for (int i = 0; i < data->max; i++) {
226 
227 		data->cnt++;
228 		uart_poll_out(uart_dev, data->buf[i % BUF_SIZE]);
229 		if (wait_base) {
230 			int r = sys_rand32_get();
231 
232 			k_sleep(K_USEC(wait_base + (r % wait_range)));
233 		}
234 	}
235 
236 	k_sem_give(&data->sem);
237 }
238 
poll_out_thread(void * data,void * unused0,void * unused1)239 static void poll_out_thread(void *data, void *unused0, void *unused1)
240 {
241 	bulk_poll_out((struct test_data *)data, 200, 600);
242 }
243 
244 K_THREAD_STACK_DEFINE(high_poll_out_thread_stack, 1024);
245 static struct k_thread high_poll_out_thread;
246 
247 K_THREAD_STACK_DEFINE(int_async_thread_stack, 1024);
248 static struct k_thread int_async_thread;
249 
int_async_thread_func(void * p_data,void * base,void * range)250 static void int_async_thread_func(void *p_data, void *base, void *range)
251 {
252 	struct test_data *data = p_data;
253 	int wait_base = (int)base;
254 	int wait_range = (int)range;
255 
256 	k_sem_init(&async_tx_sem, 1, 1);
257 
258 	while (data->cnt < data->max) {
259 		if (async) {
260 			int err;
261 
262 			err = k_sem_take(&async_tx_sem, K_MSEC(1000));
263 			zassert_true(err >= 0);
264 
265 			int idx = data->cnt & 0xF;
266 			size_t len = (idx < BUF_SIZE / 2) ? 5 : 1; /* Try various lengths */
267 			len = MIN(len, data->max - data->cnt);
268 
269 			data->cnt += len;
270 			err = uart_tx(uart_dev, &int_async_data.buf[idx],
271 				      len, 1000 * USEC_PER_MSEC);
272 			zassert_true(err >= 0,
273 					"Unexpected err:%d", err);
274 		} else {
275 			uart_irq_tx_enable(uart_dev);
276 		}
277 
278 		int r = sys_rand32_get();
279 
280 		k_sleep(K_USEC(wait_base + (r % wait_range)));
281 	}
282 
283 	k_sem_give(&data->sem);
284 }
285 
poll_out_timer_handler(struct k_timer * timer)286 static void poll_out_timer_handler(struct k_timer *timer)
287 {
288 	struct test_data *data = k_timer_user_data_get(timer);
289 
290 	uart_poll_out(uart_dev, data->buf[data->cnt % BUF_SIZE]);
291 
292 	data->cnt++;
293 	if (data->cnt == data->max) {
294 		k_timer_stop(timer);
295 		k_sem_give(&data->sem);
296 	} else {
297 		k_timer_start(timer, K_USEC(250 + (sys_rand16_get() % 800)),
298 				K_NO_WAIT);
299 	}
300 }
301 
302 K_TIMER_DEFINE(poll_out_timer, poll_out_timer_handler, NULL);
303 
init_buf(uint8_t * buf,int len,int idx)304 static void init_buf(uint8_t *buf, int len, int idx)
305 {
306 	for (int i = 0; i < len; i++) {
307 		buf[i] = i | (idx << 4);
308 	}
309 }
310 
init_test_data(struct test_data * data,const uint8_t * buf,int repeat)311 static void init_test_data(struct test_data *data, const uint8_t *buf, int repeat)
312 {
313 	k_sem_init(&data->sem, 0, 1);
314 	data->buf = buf;
315 	data->cnt = 0;
316 	data->max = repeat;
317 }
318 
ZTEST(uart_mix_fifo_poll,test_mixed_uart_access)319 ZTEST(uart_mix_fifo_poll, test_mixed_uart_access)
320 {
321 	int repeat = CONFIG_STRESS_TEST_REPS;
322 	int err;
323 	int num_of_contexts = ARRAY_SIZE(test_data);
324 
325 	for (int i = 0; i < ARRAY_SIZE(test_data); i++) {
326 		init_buf(txbuf[i], sizeof(txbuf[i]), i);
327 		init_test_data(&test_data[i], txbuf[i], repeat);
328 	}
329 	(void)k_thread_create(&high_poll_out_thread,
330 			      high_poll_out_thread_stack, 1024,
331 			      poll_out_thread, &test_data[0], NULL, NULL,
332 			      1, 0, K_NO_WAIT);
333 
334 
335 	if (async || int_driven) {
336 		init_test_data(&int_async_data, txbuf3, repeat);
337 		(void)k_thread_create(&int_async_thread,
338 				int_async_thread_stack, 1024,
339 				int_async_thread_func,
340 				&int_async_data, (void *)300, (void *)400,
341 				2, 0, K_NO_WAIT);
342 	}
343 
344 	k_timer_user_data_set(&poll_out_timer, &test_data[1]);
345 	k_timer_start(&poll_out_timer, K_USEC(250), K_NO_WAIT);
346 
347 	bulk_poll_out(&test_data[2], 300, 500);
348 
349 	k_msleep(1);
350 
351 	for (int i = 0; i < num_of_contexts; i++) {
352 		err = k_sem_take(&test_data[i].sem, K_MSEC(10000));
353 		zassert_equal(err, 0);
354 	}
355 
356 	if (async || int_driven) {
357 		err = k_sem_take(&int_async_data.sem, K_MSEC(10000));
358 		zassert_equal(err, 0);
359 	}
360 
361 	k_msleep(10);
362 
363 	for (int i = 0; i < (num_of_contexts + (async || int_driven ? 1 : 0)); i++) {
364 		zassert_equal(source[i].cnt, repeat,
365 				"%d: Unexpected rx bytes count (%d/%d)",
366 				i, source[i].cnt, repeat);
367 	}
368 }
369 
uart_mix_setup(void)370 void *uart_mix_setup(void)
371 {
372 	init_test();
373 
374 	return NULL;
375 }
376 
377 ZTEST_SUITE(uart_mix_fifo_poll, NULL, uart_mix_setup,
378 		NULL, NULL, NULL);
379