1 /*
2  * Copyright (c) 2024 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @addtogroup t_driver_uart
9  * @{
10  * @defgroup t_uart_errors test_uart_errors
11  * @}
12  */
13 
14 #include <zephyr/drivers/uart.h>
15 #include <zephyr/pm/device.h>
16 #include <zephyr/ztest.h>
17 #include <zephyr/logging/log.h>
18 LOG_MODULE_REGISTER(test, LOG_LEVEL_NONE);
19 
20 #if DT_NODE_EXISTS(DT_NODELABEL(dut))
21 #define UART_NODE DT_NODELABEL(dut)
22 #else
23 #error "No dut device in the test"
24 #endif
25 
26 #if DT_NODE_EXISTS(DT_NODELABEL(dut_aux))
27 #define UART_NODE_AUX DT_NODELABEL(dut_aux)
28 #else
29 #error "No dut_aux device in the test"
30 #endif
31 
32 static const struct device *const uart_dev = DEVICE_DT_GET(UART_NODE);
33 static const struct device *const uart_dev_aux = DEVICE_DT_GET(UART_NODE_AUX);
34 
35 #define RX_CHUNK_CNT 2
36 #define RX_CHUNK_LEN 16
37 #define RX_TIMEOUT (1 * USEC_PER_MSEC)
38 
39 static uint8_t rx_chunks[RX_CHUNK_CNT][16];
40 static uint32_t rx_chunks_mask = BIT_MASK(RX_CHUNK_CNT);
41 static uint8_t rx_buffer[256];
42 static uint32_t rx_buffer_cnt;
43 static volatile uint32_t rx_stopped_cnt;
44 static volatile bool rx_active;
45 
46 struct aux_dut_data {
47 	const uint8_t *buf;
48 	size_t len;
49 	size_t curr;
50 	int err_byte;
51 	struct k_sem *sem;
52 	bool cfg_ok;
53 };
54 
55 /* Simple buffer allocator. If allocation fails then test fails inside that function. */
alloc_rx_chunk(void)56 static uint8_t *alloc_rx_chunk(void)
57 {
58 	uint32_t idx;
59 
60 	zassert_true(rx_chunks_mask > 0);
61 
62 	idx = __builtin_ctz(rx_chunks_mask);
63 	rx_chunks_mask &= ~BIT(idx);
64 
65 	return rx_chunks[idx];
66 }
67 
free_rx_chunk(uint8_t * buf)68 static void free_rx_chunk(uint8_t *buf)
69 {
70 	memset(buf, 0, RX_CHUNK_LEN);
71 	for (size_t i = 0; i < ARRAY_SIZE(rx_chunks); i++) {
72 		if (rx_chunks[i] == buf) {
73 			rx_chunks_mask |= BIT(i);
74 			break;
75 		}
76 	}
77 }
78 
dut_async_callback(const struct device * dev,struct uart_event * evt,void * user_data)79 static void dut_async_callback(const struct device *dev, struct uart_event *evt, void *user_data)
80 {
81 	switch (evt->type) {
82 	case UART_TX_DONE:
83 		zassert_true(false);
84 		break;
85 	case UART_RX_RDY:
86 		LOG_INF("RX:%p len:%d off:%d",
87 			(void *)evt->data.rx.buf, evt->data.rx.len, evt->data.rx.offset);
88 		/* Aggregate all received data into a single buffer. */
89 		memcpy(&rx_buffer[rx_buffer_cnt], &evt->data.rx.buf[evt->data.rx.offset],
90 			evt->data.rx.len);
91 		rx_buffer_cnt += evt->data.rx.len;
92 		break;
93 	case UART_RX_BUF_REQUEST:
94 	{
95 		uint8_t *buf = alloc_rx_chunk();
96 
97 		LOG_INF("buf request: %p", (void *)buf);
98 		zassert_equal(uart_rx_buf_rsp(dev, buf, RX_CHUNK_LEN), 0);
99 		break;
100 	}
101 	case UART_RX_BUF_RELEASED:
102 		LOG_INF("buf release: %p", (void *)evt->data.rx_buf.buf);
103 		free_rx_chunk(evt->data.rx_buf.buf);
104 		break;
105 	case UART_RX_DISABLED:
106 		zassert_true(rx_chunks_mask == BIT_MASK(RX_CHUNK_CNT));
107 		/* If test continues re-enable the receiver. Disabling may happen
108 		 * during the test after error is detected.
109 		 */
110 		if (rx_active) {
111 			uint8_t *buf = alloc_rx_chunk();
112 			int err;
113 
114 			LOG_INF("RX disabled, re-enabling:%p", (void *)buf);
115 			err = uart_rx_enable(dev, buf, RX_CHUNK_LEN, RX_TIMEOUT);
116 			zassert_equal(err, 0);
117 		} else {
118 			LOG_WRN("RX disabled");
119 		}
120 		break;
121 	case UART_RX_STOPPED:
122 		LOG_WRN("RX error");
123 		rx_stopped_cnt++;
124 		break;
125 	default:
126 		zassert_true(false);
127 		break;
128 	}
129 
130 }
131 
dut_int_callback(const struct device * dev,void * user_data)132 static void dut_int_callback(const struct device *dev, void *user_data)
133 {
134 	while (uart_irq_update(dev) && uart_irq_is_pending(dev)) {
135 		zassert_false(uart_irq_tx_ready(dev));
136 		if (uart_err_check(dev) != 0) {
137 			rx_stopped_cnt++;
138 		}
139 		if (uart_irq_rx_ready(dev)) {
140 			size_t rem = sizeof(rx_buffer) - rx_buffer_cnt;
141 			int len = uart_fifo_read(dev, &rx_buffer[rx_buffer_cnt], rem);
142 
143 			zassert_true(len >= 0);
144 			rx_buffer_cnt += len;
145 		}
146 	}
147 }
148 
aux_async_callback(const struct device * dev,struct uart_event * evt,void * user_data)149 static void aux_async_callback(const struct device *dev, struct uart_event *evt, void *user_data)
150 {
151 	struct k_sem *sem = user_data;
152 
153 	switch (evt->type) {
154 	case UART_TX_DONE:
155 		k_sem_give(sem);
156 		break;
157 	default:
158 		zassert_true(false);
159 		break;
160 	}
161 }
162 
163 /* Callback is handling injection of one corrupted byte. In order to corrupt that byte
164  * uart must be reconfigured so when it is time to reconfigure interrupt is disabled
165  * and semaphore is posted to reconfigure the uart in the thread context.
166  */
aux_int_callback(const struct device * dev,void * user_data)167 static void aux_int_callback(const struct device *dev, void *user_data)
168 {
169 	struct aux_dut_data *data = user_data;
170 	size_t req_len;
171 	size_t tx_len;
172 	bool completed = data->curr == data->len;
173 	bool inject_err = data->err_byte >= 0;
174 	bool pre_err = inject_err && (data->curr == data->err_byte);
175 	bool post_err = inject_err && ((data->curr + 1) == data->err_byte);
176 	bool trig_reconfig = ((pre_err && data->cfg_ok) || (post_err && !data->cfg_ok));
177 
178 	while (uart_irq_tx_ready(dev)) {
179 		if (completed || trig_reconfig) {
180 			/* Transmission completed or not configured correctly. */
181 			uart_irq_tx_disable(dev);
182 			k_sem_give(data->sem);
183 		} else {
184 			if (pre_err) {
185 				req_len = 1;
186 			} else if (inject_err && (data->curr < data->err_byte)) {
187 				req_len = data->err_byte - data->curr;
188 			} else {
189 				req_len = data->len - data->curr;
190 			}
191 
192 			tx_len = uart_fifo_fill(dev, &data->buf[data->curr], req_len);
193 			data->curr += tx_len;
194 		}
195 	}
196 }
197 
reconfigure(const struct device * dev,bool cfg_ok,bool * hwfc)198 static void reconfigure(const struct device *dev, bool cfg_ok, bool *hwfc)
199 {
200 	struct uart_config config;
201 
202 	zassert_equal(uart_config_get(uart_dev, &config), 0);
203 
204 	if (hwfc) {
205 		if (IS_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN)) {
206 			/* Reconfiguration may happen on disabled device. In the
207 			 * interrupt driven mode receiver is always on so we need
208 			 * to suspend the device to disable the receiver and
209 			 * reconfigure it.
210 			 */
211 			pm_device_action_run(dev, PM_DEVICE_ACTION_SUSPEND);
212 		}
213 		config.flow_ctrl = *hwfc ? UART_CFG_FLOW_CTRL_RTS_CTS : UART_CFG_FLOW_CTRL_NONE;
214 	}
215 
216 	config.parity = cfg_ok ? UART_CFG_PARITY_NONE : UART_CFG_PARITY_EVEN;
217 
218 	zassert_equal(uart_configure(dev, &config), 0);
219 
220 	if (hwfc) {
221 		if (IS_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN)) {
222 			pm_device_action_run(dev, PM_DEVICE_ACTION_RESUME);
223 		}
224 	}
225 }
226 
227 /** @brief Transmit a buffer with optional one byte corrupted.
228  *
229  * Function supports asynchronous and interrupt driven APIs.
230  *
231  * @param dev Device.
232  * @param buf Buffer.
233  * @param len Buffer length.
234  * @param err_byte Index of byte which is sent with parity enabled. -1 to send without error.
235  */
aux_tx(const struct device * dev,const uint8_t * buf,size_t len,int err_byte)236 static void aux_tx(const struct device *dev, const uint8_t *buf, size_t len, int err_byte)
237 {
238 	int err;
239 	struct k_sem sem;
240 
241 	k_sem_init(&sem, 0, 1);
242 
243 	if (IS_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN)) {
244 		struct aux_dut_data data = {
245 			.buf = buf,
246 			.len = len,
247 			.err_byte = err_byte,
248 			.cfg_ok = true,
249 			.sem = &sem
250 		};
251 
252 		err = uart_irq_callback_user_data_set(dev, aux_int_callback, &data);
253 		zassert_equal(err, 0);
254 
255 		uart_irq_tx_enable(dev);
256 
257 		if (err_byte >= 0) {
258 			/* Reconfigure to unaligned configuration. */
259 			err = k_sem_take(&sem, K_MSEC(100));
260 			zassert_equal(err, 0);
261 			data.cfg_ok = false;
262 			reconfigure(dev, false, NULL);
263 			uart_irq_tx_enable(dev);
264 
265 			/* Reconfigure back to correct configuration. */
266 			err = k_sem_take(&sem, K_MSEC(100));
267 			zassert_equal(err, 0);
268 			data.cfg_ok = true;
269 			reconfigure(dev, true, NULL);
270 			uart_irq_tx_enable(dev);
271 		}
272 
273 		/* Wait for completion. */
274 		err = k_sem_take(&sem, K_MSEC(100));
275 		zassert_equal(err, 0);
276 		return;
277 	}
278 
279 	err = uart_callback_set(dev, aux_async_callback, &sem);
280 	zassert_equal(err, 0);
281 
282 	if (err_byte < 0) {
283 		err = uart_tx(dev, buf, len, 100 * USEC_PER_MSEC);
284 		zassert_equal(err, 0);
285 
286 		err = k_sem_take(&sem, K_MSEC(100));
287 		zassert_equal(err, 0);
288 		return;
289 	} else if (err_byte > 0) {
290 		err = uart_tx(dev, buf, err_byte, 100 * USEC_PER_MSEC);
291 		zassert_equal(err, 0);
292 
293 		err = k_sem_take(&sem, K_MSEC(100));
294 		zassert_equal(err, 0);
295 	}
296 	/* Reconfigure to unaligned configuration that will lead to error. */
297 	reconfigure(dev, false, NULL);
298 
299 	err = uart_tx(dev, &buf[err_byte], 1, 100 * USEC_PER_MSEC);
300 	zassert_equal(err, 0);
301 
302 	err = k_sem_take(&sem, K_MSEC(100));
303 	zassert_equal(err, 0);
304 	/* Reconfigure back to the correct configuration. */
305 	reconfigure(dev, true, NULL);
306 
307 	err = uart_tx(dev, &buf[err_byte + 1], len - err_byte - 1, 100 * USEC_PER_MSEC);
308 	zassert_equal(err, 0);
309 
310 	err = k_sem_take(&sem, K_MSEC(100));
311 	zassert_equal(err, 0);
312 }
313 
314 /** @brief Test function.
315  *
316  * Test starts by sending 10 bytes without error then 10 bytes with an error on
317  * @p err_byte and then again 10 bytes without error. It is expected that driver
318  * will receive correctly first 10 bytes then detect error and recover to
319  * receive correctly last 10 bytes.
320  *
321  * @param hwfc Use hardware flow control.
322  * @param err_byte Index of corrupted byte in the second 10 byte sequence.
323  */
test_detect_error(bool hwfc,int err_byte)324 static void test_detect_error(bool hwfc, int err_byte)
325 {
326 	uint8_t buf[10];
327 	int err;
328 
329 	reconfigure(uart_dev, true, &hwfc);
330 	reconfigure(uart_dev_aux, true, &hwfc);
331 
332 	for (size_t i = 0; i < sizeof(buf); i++) {
333 		buf[i] = i;
334 	}
335 
336 	if (IS_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN)) {
337 		uart_irq_err_enable(uart_dev);
338 		uart_irq_rx_enable(uart_dev);
339 	} else {
340 		uint8_t *b = alloc_rx_chunk();
341 
342 		LOG_INF("dut rx enable buf:%p", (void *)b);
343 		err = uart_rx_enable(uart_dev, b, RX_CHUNK_LEN, RX_TIMEOUT);
344 		zassert_equal(err, 0);
345 	}
346 
347 	/* Send TX without error */
348 	aux_tx(uart_dev_aux, buf, sizeof(buf), -1);
349 	/* Send TX without error */
350 
351 	k_msleep(10);
352 	zassert_equal(sizeof(buf), rx_buffer_cnt, "Expected %d got %d", sizeof(buf), rx_buffer_cnt);
353 	zassert_equal(memcmp(buf, rx_buffer, rx_buffer_cnt), 0);
354 
355 	/* Send TX with error on nth byte. */
356 	aux_tx(uart_dev_aux, buf, sizeof(buf), err_byte);
357 
358 	/* At this point when error is detected receiver will be restarted and it may
359 	 * be started when there is a transmission on the line if HWFC is disabled
360 	 * which will trigger next error so until there is a gap on the line there
361 	 * might be multiple errors detected. However, when HWFC is enabled then there
362 	 * should be only one error.
363 	 */
364 	k_msleep(100);
365 	zassert_true(rx_stopped_cnt > 0);
366 
367 	/* Send TX without error. Receiver is settled so it should be correctly received. */
368 	aux_tx(uart_dev_aux, buf, sizeof(buf), -1);
369 
370 	k_msleep(100);
371 	TC_PRINT("RX bytes:%d/%d err_cnt:%d\n", rx_buffer_cnt, 3 * sizeof(buf), rx_stopped_cnt);
372 
373 	LOG_HEXDUMP_INF(rx_buffer, rx_buffer_cnt, "Received data:");
374 
375 	/* Last received chunk should be correct. */
376 	zassert_equal(memcmp(buf, &rx_buffer[rx_buffer_cnt - sizeof(buf)], sizeof(buf)), 0);
377 
378 	if (IS_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN)) {
379 		uart_irq_err_disable(uart_dev);
380 		uart_irq_rx_disable(uart_dev);
381 	} else {
382 		rx_active = false;
383 		err = uart_rx_disable(uart_dev);
384 		zassert_true((err == 0) || (err == -EFAULT));
385 
386 		k_msleep(10);
387 	}
388 }
389 
ZTEST(uart_errors,test_detect_error_first_byte)390 ZTEST(uart_errors, test_detect_error_first_byte)
391 {
392 	test_detect_error(false, 0);
393 }
394 
ZTEST(uart_errors,test_detect_error_in_the_middle)395 ZTEST(uart_errors, test_detect_error_in_the_middle)
396 {
397 	test_detect_error(false, 5);
398 }
399 
ZTEST(uart_errors,test_detect_error_first_byte_hwfc)400 ZTEST(uart_errors, test_detect_error_first_byte_hwfc)
401 {
402 	test_detect_error(true, 0);
403 }
404 
ZTEST(uart_errors,test_detect_error_in_the_middle_hwfc)405 ZTEST(uart_errors, test_detect_error_in_the_middle_hwfc)
406 {
407 	test_detect_error(true, 5);
408 }
409 
410 /*
411  * Test setup
412  */
test_setup(void)413 static void *test_setup(void)
414 {
415 	zassert_true(device_is_ready(uart_dev), "DUT UART device is not ready");
416 	zassert_true(device_is_ready(uart_dev_aux), "DUT_AUX UART device is not ready");
417 
418 	if (IS_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN)) {
419 		zassert_equal(uart_irq_callback_set(uart_dev, dut_int_callback), 0);
420 	} else {
421 		zassert_equal(uart_callback_set(uart_dev, dut_async_callback, NULL), 0);
422 	}
423 
424 	return NULL;
425 }
426 
before(void * unused)427 static void before(void *unused)
428 {
429 	ARG_UNUSED(unused);
430 	rx_buffer_cnt = 0;
431 	rx_stopped_cnt = 0;
432 	rx_active = true;
433 }
434 
435 ZTEST_SUITE(uart_errors, NULL, test_setup, before, NULL, NULL);
436