1 /*
2  * Copyright (c) 2024 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @addtogroup t_driver_uart
9  * @{
10  * @defgroup t_uart_errors test_uart_errors
11  * @}
12  */
13 
14 #include <zephyr/drivers/uart.h>
15 #include <zephyr/pm/device.h>
16 #include <zephyr/pm/device_runtime.h>
17 #include <zephyr/ztest.h>
18 #include <zephyr/logging/log.h>
19 LOG_MODULE_REGISTER(test, LOG_LEVEL_NONE);
20 
21 #if DT_NODE_EXISTS(DT_NODELABEL(dut))
22 #define UART_NODE DT_NODELABEL(dut)
23 #else
24 #error "No dut device in the test"
25 #endif
26 
27 #if DT_NODE_EXISTS(DT_NODELABEL(dut_aux))
28 #define UART_NODE_AUX DT_NODELABEL(dut_aux)
29 #else
30 #error "No dut_aux device in the test"
31 #endif
32 
33 static const struct device *const uart_dev = DEVICE_DT_GET(UART_NODE);
34 static const struct device *const uart_dev_aux = DEVICE_DT_GET(UART_NODE_AUX);
35 
36 #define RX_CHUNK_CNT 2
37 #define RX_CHUNK_LEN 16
38 #define RX_TIMEOUT (1 * USEC_PER_MSEC)
39 
40 static uint8_t rx_chunks[RX_CHUNK_CNT][16];
41 static uint32_t rx_chunks_mask = BIT_MASK(RX_CHUNK_CNT);
42 static uint8_t rx_buffer[256];
43 static uint32_t rx_buffer_cnt;
44 static volatile uint32_t rx_stopped_cnt;
45 static volatile bool rx_active;
46 
47 struct aux_dut_data {
48 	const uint8_t *buf;
49 	size_t len;
50 	size_t curr;
51 	int err_byte;
52 	struct k_sem *sem;
53 	bool cfg_ok;
54 };
55 
56 /* Simple buffer allocator. If allocation fails then test fails inside that function. */
alloc_rx_chunk(void)57 static uint8_t *alloc_rx_chunk(void)
58 {
59 	uint32_t idx;
60 
61 	zassert_true(rx_chunks_mask > 0);
62 
63 	idx = __builtin_ctz(rx_chunks_mask);
64 	rx_chunks_mask &= ~BIT(idx);
65 
66 	return rx_chunks[idx];
67 }
68 
free_rx_chunk(uint8_t * buf)69 static void free_rx_chunk(uint8_t *buf)
70 {
71 	memset(buf, 0, RX_CHUNK_LEN);
72 	for (size_t i = 0; i < ARRAY_SIZE(rx_chunks); i++) {
73 		if (rx_chunks[i] == buf) {
74 			rx_chunks_mask |= BIT(i);
75 			break;
76 		}
77 	}
78 }
79 
dut_async_callback(const struct device * dev,struct uart_event * evt,void * user_data)80 static void dut_async_callback(const struct device *dev, struct uart_event *evt, void *user_data)
81 {
82 	switch (evt->type) {
83 	case UART_TX_DONE:
84 		zassert_true(false);
85 		break;
86 	case UART_RX_RDY:
87 		LOG_INF("RX:%p len:%d off:%d",
88 			(void *)evt->data.rx.buf, evt->data.rx.len, evt->data.rx.offset);
89 		/* Aggregate all received data into a single buffer. */
90 		memcpy(&rx_buffer[rx_buffer_cnt], &evt->data.rx.buf[evt->data.rx.offset],
91 			evt->data.rx.len);
92 		rx_buffer_cnt += evt->data.rx.len;
93 		break;
94 	case UART_RX_BUF_REQUEST:
95 	{
96 		uint8_t *buf = alloc_rx_chunk();
97 
98 		LOG_INF("buf request: %p", (void *)buf);
99 		zassert_equal(uart_rx_buf_rsp(dev, buf, RX_CHUNK_LEN), 0);
100 		break;
101 	}
102 	case UART_RX_BUF_RELEASED:
103 		LOG_INF("buf release: %p", (void *)evt->data.rx_buf.buf);
104 		free_rx_chunk(evt->data.rx_buf.buf);
105 		break;
106 	case UART_RX_DISABLED:
107 		zassert_true(rx_chunks_mask == BIT_MASK(RX_CHUNK_CNT));
108 		/* If test continues re-enable the receiver. Disabling may happen
109 		 * during the test after error is detected.
110 		 */
111 		if (rx_active) {
112 			uint8_t *buf = alloc_rx_chunk();
113 			int err;
114 
115 			LOG_INF("RX disabled, re-enabling:%p", (void *)buf);
116 			err = uart_rx_enable(dev, buf, RX_CHUNK_LEN, RX_TIMEOUT);
117 			zassert_equal(err, 0);
118 		} else {
119 			LOG_WRN("RX disabled");
120 		}
121 		break;
122 	case UART_RX_STOPPED:
123 		LOG_WRN("RX error");
124 		rx_stopped_cnt++;
125 		break;
126 	default:
127 		zassert_true(false);
128 		break;
129 	}
130 
131 }
132 
dut_int_callback(const struct device * dev,void * user_data)133 static void dut_int_callback(const struct device *dev, void *user_data)
134 {
135 	while (uart_irq_update(dev) && uart_irq_is_pending(dev)) {
136 		zassert_false(uart_irq_tx_ready(dev));
137 		if (uart_err_check(dev) != 0) {
138 			rx_stopped_cnt++;
139 		}
140 		if (uart_irq_rx_ready(dev)) {
141 			size_t rem = sizeof(rx_buffer) - rx_buffer_cnt;
142 			int len = uart_fifo_read(dev, &rx_buffer[rx_buffer_cnt], rem);
143 
144 			zassert_true(len >= 0);
145 			rx_buffer_cnt += len;
146 		}
147 	}
148 }
149 
aux_async_callback(const struct device * dev,struct uart_event * evt,void * user_data)150 static void aux_async_callback(const struct device *dev, struct uart_event *evt, void *user_data)
151 {
152 	struct k_sem *sem = user_data;
153 
154 	switch (evt->type) {
155 	case UART_TX_DONE:
156 		k_sem_give(sem);
157 		break;
158 	default:
159 		zassert_true(false);
160 		break;
161 	}
162 }
163 
164 /* Callback is handling injection of one corrupted byte. In order to corrupt that byte
165  * uart must be reconfigured so when it is time to reconfigure interrupt is disabled
166  * and semaphore is posted to reconfigure the uart in the thread context.
167  */
aux_int_callback(const struct device * dev,void * user_data)168 static void aux_int_callback(const struct device *dev, void *user_data)
169 {
170 	struct aux_dut_data *data = user_data;
171 	size_t req_len;
172 	size_t tx_len;
173 	bool completed = data->curr == data->len;
174 	bool inject_err = data->err_byte >= 0;
175 	bool pre_err = inject_err && (data->curr == data->err_byte);
176 	bool post_err = inject_err && ((data->curr + 1) == data->err_byte);
177 	bool trig_reconfig = ((pre_err && data->cfg_ok) || (post_err && !data->cfg_ok));
178 
179 	while (uart_irq_tx_ready(dev)) {
180 		if (completed || trig_reconfig) {
181 			/* Transmission completed or not configured correctly. */
182 			uart_irq_tx_disable(dev);
183 			k_sem_give(data->sem);
184 		} else {
185 			if (pre_err) {
186 				req_len = 1;
187 			} else if (inject_err && (data->curr < data->err_byte)) {
188 				req_len = data->err_byte - data->curr;
189 			} else {
190 				req_len = data->len - data->curr;
191 			}
192 
193 			tx_len = uart_fifo_fill(dev, &data->buf[data->curr], req_len);
194 			data->curr += tx_len;
195 		}
196 	}
197 }
198 
reconfigure(const struct device * dev,bool cfg_ok,bool * hwfc)199 static void reconfigure(const struct device *dev, bool cfg_ok, bool *hwfc)
200 {
201 	struct uart_config config;
202 
203 	zassert_equal(uart_config_get(uart_dev, &config), 0);
204 
205 	if (hwfc) {
206 		if (IS_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN)) {
207 			/* Reconfiguration may happen on disabled device. In the
208 			 * interrupt driven mode receiver is always on so we need
209 			 * to suspend the device to disable the receiver and
210 			 * reconfigure it.
211 			 */
212 			pm_device_action_run(dev, PM_DEVICE_ACTION_SUSPEND);
213 		}
214 		config.flow_ctrl = *hwfc ? UART_CFG_FLOW_CTRL_RTS_CTS : UART_CFG_FLOW_CTRL_NONE;
215 	}
216 
217 	config.parity = cfg_ok ? UART_CFG_PARITY_NONE : UART_CFG_PARITY_EVEN;
218 
219 	zassert_equal(uart_configure(dev, &config), 0);
220 
221 	if (hwfc) {
222 		if (IS_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN)) {
223 			pm_device_action_run(dev, PM_DEVICE_ACTION_RESUME);
224 		}
225 	}
226 }
227 
228 /** @brief Transmit a buffer with optional one byte corrupted.
229  *
230  * Function supports asynchronous and interrupt driven APIs.
231  *
232  * @param dev Device.
233  * @param buf Buffer.
234  * @param len Buffer length.
235  * @param err_byte Index of byte which is sent with parity enabled. -1 to send without error.
236  */
aux_tx(const struct device * dev,const uint8_t * buf,size_t len,int err_byte)237 static void aux_tx(const struct device *dev, const uint8_t *buf, size_t len, int err_byte)
238 {
239 	int err;
240 	struct k_sem sem;
241 
242 	k_sem_init(&sem, 0, 1);
243 
244 	if (IS_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN)) {
245 		struct aux_dut_data data = {
246 			.buf = buf,
247 			.len = len,
248 			.err_byte = err_byte,
249 			.cfg_ok = true,
250 			.sem = &sem
251 		};
252 
253 		err = uart_irq_callback_user_data_set(dev, aux_int_callback, &data);
254 		zassert_equal(err, 0);
255 
256 		uart_irq_tx_enable(dev);
257 
258 		if (err_byte >= 0) {
259 			/* Reconfigure to unaligned configuration. */
260 			err = k_sem_take(&sem, K_MSEC(100));
261 			zassert_equal(err, 0);
262 			data.cfg_ok = false;
263 			reconfigure(dev, false, NULL);
264 			uart_irq_tx_enable(dev);
265 
266 			/* Reconfigure back to correct configuration. */
267 			err = k_sem_take(&sem, K_MSEC(100));
268 			zassert_equal(err, 0);
269 			data.cfg_ok = true;
270 			reconfigure(dev, true, NULL);
271 			uart_irq_tx_enable(dev);
272 		}
273 
274 		/* Wait for completion. */
275 		err = k_sem_take(&sem, K_MSEC(100));
276 		zassert_equal(err, 0);
277 		return;
278 	}
279 
280 	err = uart_callback_set(dev, aux_async_callback, &sem);
281 	zassert_equal(err, 0);
282 
283 	if (err_byte < 0) {
284 		err = uart_tx(dev, buf, len, 100 * USEC_PER_MSEC);
285 		zassert_equal(err, 0);
286 
287 		err = k_sem_take(&sem, K_MSEC(100));
288 		zassert_equal(err, 0);
289 		return;
290 	} else if (err_byte > 0) {
291 		err = uart_tx(dev, buf, err_byte, 100 * USEC_PER_MSEC);
292 		zassert_equal(err, 0);
293 
294 		err = k_sem_take(&sem, K_MSEC(100));
295 		zassert_equal(err, 0);
296 	}
297 	/* Reconfigure to unaligned configuration that will lead to error. */
298 	reconfigure(dev, false, NULL);
299 
300 	err = uart_tx(dev, &buf[err_byte], 1, 100 * USEC_PER_MSEC);
301 	zassert_equal(err, 0);
302 
303 	err = k_sem_take(&sem, K_MSEC(100));
304 	zassert_equal(err, 0);
305 	/* Reconfigure back to the correct configuration. */
306 	reconfigure(dev, true, NULL);
307 
308 	err = uart_tx(dev, &buf[err_byte + 1], len - err_byte - 1, 100 * USEC_PER_MSEC);
309 	zassert_equal(err, 0);
310 
311 	err = k_sem_take(&sem, K_MSEC(100));
312 	zassert_equal(err, 0);
313 }
314 
315 /** @brief Test function.
316  *
317  * Test starts by sending 10 bytes without error then 10 bytes with an error on
318  * @p err_byte and then again 10 bytes without error. It is expected that driver
319  * will receive correctly first 10 bytes then detect error and recover to
320  * receive correctly last 10 bytes.
321  *
322  * @param hwfc Use hardware flow control.
323  * @param err_byte Index of corrupted byte in the second 10 byte sequence.
324  */
test_detect_error(bool hwfc,int err_byte)325 static void test_detect_error(bool hwfc, int err_byte)
326 {
327 	uint8_t buf[10];
328 	int err;
329 
330 	reconfigure(uart_dev, true, &hwfc);
331 	reconfigure(uart_dev_aux, true, &hwfc);
332 
333 	for (size_t i = 0; i < sizeof(buf); i++) {
334 		buf[i] = i;
335 	}
336 
337 	if (IS_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN)) {
338 		if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
339 			pm_device_runtime_get(uart_dev);
340 		}
341 		uart_irq_err_enable(uart_dev);
342 		uart_irq_rx_enable(uart_dev);
343 	} else {
344 		uint8_t *b = alloc_rx_chunk();
345 
346 		LOG_INF("dut rx enable buf:%p", (void *)b);
347 		err = uart_rx_enable(uart_dev, b, RX_CHUNK_LEN, RX_TIMEOUT);
348 		zassert_equal(err, 0);
349 	}
350 
351 	/* Send TX without error */
352 	aux_tx(uart_dev_aux, buf, sizeof(buf), -1);
353 	/* Send TX without error */
354 
355 	k_msleep(10);
356 	zassert_equal(sizeof(buf), rx_buffer_cnt, "Expected %d got %d", sizeof(buf), rx_buffer_cnt);
357 	zassert_equal(memcmp(buf, rx_buffer, rx_buffer_cnt), 0);
358 
359 	/* Send TX with error on nth byte. */
360 	aux_tx(uart_dev_aux, buf, sizeof(buf), err_byte);
361 
362 	/* At this point when error is detected receiver will be restarted and it may
363 	 * be started when there is a transmission on the line if HWFC is disabled
364 	 * which will trigger next error so until there is a gap on the line there
365 	 * might be multiple errors detected. However, when HWFC is enabled then there
366 	 * should be only one error.
367 	 */
368 	k_msleep(100);
369 	zassert_true(rx_stopped_cnt > 0);
370 
371 	/* Send TX without error. Receiver is settled so it should be correctly received. */
372 	aux_tx(uart_dev_aux, buf, sizeof(buf), -1);
373 
374 	k_msleep(100);
375 	TC_PRINT("RX bytes:%d/%d err_cnt:%d\n", rx_buffer_cnt, 3 * sizeof(buf), rx_stopped_cnt);
376 
377 	LOG_HEXDUMP_INF(rx_buffer, rx_buffer_cnt, "Received data:");
378 
379 	/* Last received chunk should be correct. */
380 	zassert_equal(memcmp(buf, &rx_buffer[rx_buffer_cnt - sizeof(buf)], sizeof(buf)), 0);
381 
382 	if (IS_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN)) {
383 		uart_irq_err_disable(uart_dev);
384 		uart_irq_rx_disable(uart_dev);
385 		if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
386 			pm_device_runtime_put(uart_dev);
387 		}
388 	} else {
389 		rx_active = false;
390 		err = uart_rx_disable(uart_dev);
391 		zassert_true((err == 0) || (err == -EFAULT));
392 
393 		k_msleep(10);
394 	}
395 }
396 
ZTEST(uart_errors,test_detect_error_first_byte)397 ZTEST(uart_errors, test_detect_error_first_byte)
398 {
399 	test_detect_error(false, 0);
400 }
401 
ZTEST(uart_errors,test_detect_error_in_the_middle)402 ZTEST(uart_errors, test_detect_error_in_the_middle)
403 {
404 	test_detect_error(false, 5);
405 }
406 
ZTEST(uart_errors,test_detect_error_first_byte_hwfc)407 ZTEST(uart_errors, test_detect_error_first_byte_hwfc)
408 {
409 	test_detect_error(true, 0);
410 }
411 
ZTEST(uart_errors,test_detect_error_in_the_middle_hwfc)412 ZTEST(uart_errors, test_detect_error_in_the_middle_hwfc)
413 {
414 	test_detect_error(true, 5);
415 }
416 
417 /*
418  * Test setup
419  */
test_setup(void)420 static void *test_setup(void)
421 {
422 	zassert_true(device_is_ready(uart_dev), "DUT UART device is not ready");
423 	zassert_true(device_is_ready(uart_dev_aux), "DUT_AUX UART device is not ready");
424 
425 	if (IS_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN)) {
426 		zassert_equal(uart_irq_callback_set(uart_dev, dut_int_callback), 0);
427 	} else {
428 		zassert_equal(uart_callback_set(uart_dev, dut_async_callback, NULL), 0);
429 	}
430 
431 	return NULL;
432 }
433 
before(void * unused)434 static void before(void *unused)
435 {
436 	ARG_UNUSED(unused);
437 	rx_buffer_cnt = 0;
438 	rx_stopped_cnt = 0;
439 	rx_active = true;
440 }
441 
442 ZTEST_SUITE(uart_errors, NULL, test_setup, before, NULL, NULL);
443