1 /*
2  * Copyright (c) 2019 Nordic Semiconductor ASA
3  * Copyright (c) 2024 STMicroelectronics
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include "test_uart.h"
9 
10 #if defined(CONFIG_DCACHE) && defined(CONFIG_DT_DEFINED_NOCACHE)
11 #define __NOCACHE	__attribute__ ((__section__(CONFIG_DT_DEFINED_NOCACHE_NAME)))
12 #define NOCACHE_MEM 1
13 #elif defined(CONFIG_DCACHE) && defined(CONFIG_NOCACHE_MEMORY)
14 #define __NOCACHE	__nocache
15 #define NOCACHE_MEM 1
16 #else
17 #define NOCACHE_MEM 0
18 #endif /* CONFIG_NOCACHE_MEMORY */
19 
20 K_SEM_DEFINE(tx_done, 0, 2);
21 K_SEM_DEFINE(tx_aborted, 0, 1);
22 K_SEM_DEFINE(rx_rdy, 0, 1);
23 K_SEM_DEFINE(rx_buf_coherency, 0, 255);
24 K_SEM_DEFINE(rx_buf_released, 0, 1);
25 K_SEM_DEFINE(rx_disabled, 0, 1);
26 
27 static ZTEST_BMEM volatile bool failed_in_isr;
28 
29 struct dut_data {
30 	const struct device *dev;
31 	const char *name;
32 };
33 
34 ZTEST_DMEM struct dut_data duts[] = {
35 	{
36 		.dev = DEVICE_DT_GET(UART_NODE),
37 		.name = DT_NODE_FULL_NAME(UART_NODE),
38 	},
39 #if DT_NODE_EXISTS(DT_NODELABEL(dut2)) && DT_NODE_HAS_STATUS(DT_NODELABEL(dut2), okay)
40 	{
41 		.dev = DEVICE_DT_GET(DT_NODELABEL(dut2)),
42 		.name = DT_NODE_FULL_NAME(DT_NODELABEL(dut2)),
43 	},
44 #endif
45 };
46 
47 static ZTEST_BMEM const struct device *uart_dev;
48 static ZTEST_BMEM const char *uart_name;
49 
50 static void read_abort_timeout(struct k_timer *timer);
51 static K_TIMER_DEFINE(read_abort_timer, read_abort_timeout, NULL);
52 
53 
54 #ifdef CONFIG_USERSPACE
set_permissions(void)55 static void set_permissions(void)
56 {
57 	k_thread_access_grant(k_current_get(), &tx_done, &tx_aborted,
58 			      &rx_rdy, &rx_buf_coherency, &rx_buf_released,
59 			      &rx_disabled, uart_dev, &read_abort_timer);
60 
61 	for (size_t i = 0; i < ARRAY_SIZE(duts); i++) {
62 		k_thread_access_grant(k_current_get(), duts[i].dev);
63 	}
64 }
65 #endif
66 
uart_async_test_init(int idx)67 static void uart_async_test_init(int idx)
68 {
69 	static bool initialized;
70 
71 	uart_dev = duts[idx].dev;
72 	uart_name = duts[idx].name;
73 
74 	__ASSERT_NO_MSG(device_is_ready(uart_dev));
75 	TC_PRINT("UART instance:%s\n", uart_name);
76 	uart_rx_disable(uart_dev);
77 	uart_tx_abort(uart_dev);
78 	k_sem_reset(&tx_done);
79 	k_sem_reset(&tx_aborted);
80 	k_sem_reset(&rx_rdy);
81 	k_sem_reset(&rx_buf_coherency);
82 	k_sem_reset(&rx_buf_released);
83 	k_sem_reset(&rx_disabled);
84 
85 	struct uart_config uart_cfg;
86 
87 	zassert_equal(uart_config_get(uart_dev, &uart_cfg), 0);
88 
89 	if (IS_ENABLED(CONFIG_COVERAGE)) {
90 		/* When coverage is used then performance is degraded - avoid using
91 		 * higher baudrates.
92 		 */
93 		uart_cfg.baudrate = MIN(uart_cfg.baudrate, 115200);
94 	} else if (IS_ENABLED(CONFIG_UART_WIDE_DATA)) {
95 		uart_cfg.baudrate = 115200;
96 		uart_cfg.parity = UART_CFG_PARITY_NONE;
97 		uart_cfg.stop_bits = UART_CFG_STOP_BITS_1;
98 		uart_cfg.data_bits = UART_CFG_DATA_BITS_9;
99 		uart_cfg.flow_ctrl = UART_CFG_FLOW_CTRL_NONE;
100 	};
101 	zassert_equal(uart_configure(uart_dev, &uart_cfg), 0);
102 
103 	if (!initialized) {
104 		initialized = true;
105 #ifdef CONFIG_USERSPACE
106 		set_permissions();
107 #endif
108 	}
109 
110 }
111 
112 struct test_data {
113 	volatile uint32_t tx_aborted_count;
114 	__aligned(sizeof(void *)) uint8_t rx_first_buffer[10];
115 	uint32_t recv_bytes_first_buffer;
116 	__aligned(sizeof(void *)) uint8_t rx_second_buffer[5];
117 	uint32_t recv_bytes_second_buffer;
118 	bool supply_second_buffer;
119 };
120 
121 #if NOCACHE_MEM
122 static struct test_data tdata __used __NOCACHE;
123 #else
124 static ZTEST_BMEM struct test_data tdata;
125 #endif /* NOCACHE_MEM */
126 
test_single_read_callback(const struct device * dev,struct uart_event * evt,void * user_data)127 static void test_single_read_callback(const struct device *dev,
128 			       struct uart_event *evt, void *user_data)
129 {
130 	ARG_UNUSED(dev);
131 	struct test_data *data = (struct test_data *)user_data;
132 
133 	switch (evt->type) {
134 	case UART_TX_DONE:
135 		k_sem_give(&tx_done);
136 		break;
137 	case UART_TX_ABORTED:
138 		data->tx_aborted_count++;
139 		break;
140 	case UART_RX_RDY:
141 		if ((uintptr_t)evt->data.rx.buf < (uintptr_t)tdata.rx_second_buffer) {
142 			data->recv_bytes_first_buffer += evt->data.rx.len;
143 		} else {
144 			data->recv_bytes_second_buffer += evt->data.rx.len;
145 		}
146 		k_sem_give(&rx_rdy);
147 		break;
148 	case UART_RX_BUF_RELEASED:
149 		k_sem_give(&rx_buf_released);
150 		break;
151 	case UART_RX_BUF_REQUEST:
152 		if (data->supply_second_buffer) {
153 			/* Reply to one buffer request. */
154 			uart_rx_buf_rsp(dev, data->rx_second_buffer,
155 					sizeof(data->rx_second_buffer));
156 			data->supply_second_buffer = false;
157 		}
158 		break;
159 	case UART_RX_DISABLED:
160 		k_sem_give(&rx_disabled);
161 		break;
162 	default:
163 		break;
164 	}
165 }
166 
167 static ZTEST_BMEM volatile uint32_t tx_aborted_count;
168 
single_read_setup(void)169 static void *single_read_setup(void)
170 {
171 	static int idx;
172 
173 	uart_async_test_init(idx++);
174 
175 	memset(&tdata, 0, sizeof(tdata));
176 	tdata.supply_second_buffer = true;
177 	uart_callback_set(uart_dev,
178 			  test_single_read_callback,
179 			  (void *) &tdata);
180 
181 	return NULL;
182 }
183 
tdata_check_recv_buffers(const uint8_t * tx_buf,uint32_t sent_bytes)184 static void tdata_check_recv_buffers(const uint8_t *tx_buf, uint32_t sent_bytes)
185 {
186 	uint32_t recv_bytes_total;
187 
188 	recv_bytes_total = tdata.recv_bytes_first_buffer + tdata.recv_bytes_second_buffer;
189 	zassert_equal(recv_bytes_total, sent_bytes, "Incorrect number of bytes received");
190 
191 	zassert_equal(memcmp(tx_buf, tdata.rx_first_buffer, tdata.recv_bytes_first_buffer), 0,
192 		      "Invalid data received in first buffer");
193 	zassert_equal(memcmp(tx_buf + tdata.recv_bytes_first_buffer, tdata.rx_second_buffer,
194 			     tdata.recv_bytes_second_buffer),
195 		      0, "Invalid data received in second buffer");
196 
197 	/* check that the remaining bytes in the buffers are zero */
198 	for (int i = tdata.recv_bytes_first_buffer; i < sizeof(tdata.rx_first_buffer); i++) {
199 		zassert_equal(tdata.rx_first_buffer[i], 0,
200 			      "Received extra data to the first buffer");
201 	}
202 
203 	for (int i = tdata.recv_bytes_second_buffer; i < sizeof(tdata.rx_second_buffer); i++) {
204 		zassert_equal(tdata.rx_second_buffer[i], 0,
205 			      "Received extra data to the second buffer");
206 	}
207 }
208 
ZTEST_USER(uart_async_single_read,test_single_read)209 ZTEST_USER(uart_async_single_read, test_single_read)
210 {
211 	/* Check also if sending from read only memory (e.g. flash) works. */
212 	static const uint8_t tx_buf[] = "0123456789";
213 	uint32_t sent_bytes = 0;
214 
215 	zassert_not_equal(memcmp(tx_buf, tdata.rx_first_buffer, 5), 0,
216 			  "Initial buffer check failed");
217 
218 	uart_rx_enable(uart_dev, tdata.rx_first_buffer, 10, 50 * USEC_PER_MSEC);
219 	zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), -EAGAIN,
220 		      "RX_RDY not expected at this point");
221 
222 	uart_tx(uart_dev, tx_buf, 5, 100 * USEC_PER_MSEC);
223 	sent_bytes += 5;
224 
225 	zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout");
226 	zassert_equal(k_sem_take(&rx_rdy, K_MSEC(105)), 0, "RX_RDY timeout");
227 	zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), -EAGAIN,
228 		      "Extra RX_RDY received");
229 
230 	tdata_check_recv_buffers(tx_buf, sent_bytes);
231 
232 	uart_tx(uart_dev, tx_buf + sent_bytes, 5, 100 * USEC_PER_MSEC);
233 	sent_bytes += 5;
234 
235 	zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout");
236 	zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), 0, "RX_RDY timeout");
237 	zassert_equal(k_sem_take(&rx_buf_released, K_MSEC(100)),
238 		      0,
239 		      "RX_BUF_RELEASED timeout");
240 	uart_rx_disable(uart_dev);
241 
242 	zassert_equal(k_sem_take(&rx_disabled, K_MSEC(1000)), 0,
243 		      "RX_DISABLED timeout");
244 	zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), -EAGAIN,
245 		      "Extra RX_RDY received");
246 
247 	tdata_check_recv_buffers(tx_buf, sent_bytes);
248 
249 	zassert_equal(tdata.tx_aborted_count, 0, "TX aborted triggered");
250 }
251 
multiple_rx_enable_setup(void)252 static void *multiple_rx_enable_setup(void)
253 {
254 	static int idx;
255 
256 	uart_async_test_init(idx++);
257 
258 	memset(&tdata, 0, sizeof(tdata));
259 	/* Reuse the callback from the single_read test case, as this test case
260 	 * does not need anything extra in this regard.
261 	 */
262 	uart_callback_set(uart_dev,
263 			  test_single_read_callback,
264 			  (void *)&tdata);
265 
266 	return NULL;
267 }
268 
ZTEST_USER(uart_async_multi_rx,test_multiple_rx_enable)269 ZTEST_USER(uart_async_multi_rx, test_multiple_rx_enable)
270 {
271 	/* Check also if sending from read only memory (e.g. flash) works. */
272 	static const uint8_t tx_buf[] = "test";
273 	const uint32_t rx_buf_size = sizeof(tx_buf);
274 	int ret;
275 
276 	BUILD_ASSERT(sizeof(tx_buf) <= sizeof(tdata.rx_first_buffer), "Invalid buf size");
277 
278 	/* Enable RX without a timeout. */
279 	ret = uart_rx_enable(uart_dev, tdata.rx_first_buffer, rx_buf_size, SYS_FOREVER_US);
280 	zassert_equal(ret, 0, "uart_rx_enable failed");
281 	zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), -EAGAIN,
282 		      "RX_RDY not expected at this point");
283 	zassert_equal(k_sem_take(&rx_disabled, K_MSEC(100)), -EAGAIN,
284 		      "RX_DISABLED not expected at this point");
285 
286 	/* Disable RX before any data has been received. */
287 	ret = uart_rx_disable(uart_dev);
288 	zassert_equal(ret, 0, "uart_rx_disable failed");
289 	zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), -EAGAIN,
290 		      "RX_RDY not expected at this point");
291 	zassert_equal(k_sem_take(&rx_buf_released, K_MSEC(100)), 0,
292 		      "RX_BUF_RELEASED timeout");
293 	zassert_equal(k_sem_take(&rx_disabled, K_MSEC(100)), 0,
294 		      "RX_DISABLED timeout");
295 
296 	k_sem_reset(&rx_buf_released);
297 	k_sem_reset(&rx_disabled);
298 
299 	/* Check that RX can be reenabled after "manual" disabling. */
300 	ret = uart_rx_enable(uart_dev, tdata.rx_first_buffer, rx_buf_size,
301 			     50 * USEC_PER_MSEC);
302 	zassert_equal(ret, 0, "uart_rx_enable failed");
303 	zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), -EAGAIN,
304 		      "RX_RDY not expected at this point");
305 
306 	/* Send enough data to completely fill RX buffer, so that RX ends. */
307 	ret = uart_tx(uart_dev, tx_buf, sizeof(tx_buf), 100 * USEC_PER_MSEC);
308 	zassert_equal(ret, 0, "uart_tx failed");
309 	zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout");
310 	zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), 0, "RX_RDY timeout");
311 	zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), -EAGAIN,
312 		      "Extra RX_RDY received");
313 	zassert_equal(k_sem_take(&rx_buf_released, K_MSEC(100)), 0,
314 		      "RX_BUF_RELEASED timeout");
315 	zassert_equal(k_sem_take(&rx_disabled, K_MSEC(100)), 0,
316 		      "RX_DISABLED timeout");
317 	zassert_equal(tx_aborted_count, 0, "Unexpected TX abort");
318 
319 	tdata_check_recv_buffers(tx_buf, sizeof(tx_buf));
320 
321 	k_sem_reset(&rx_rdy);
322 	k_sem_reset(&rx_buf_released);
323 	k_sem_reset(&rx_disabled);
324 	k_sem_reset(&tx_done);
325 
326 	memset(&tdata, 0, sizeof(tdata));
327 
328 	/* Check that RX can be reenabled after automatic disabling. */
329 	ret = uart_rx_enable(uart_dev, tdata.rx_first_buffer, rx_buf_size,
330 			     50 * USEC_PER_MSEC);
331 	zassert_equal(ret, 0, "uart_rx_enable failed");
332 	zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), -EAGAIN,
333 		      "RX_RDY not expected at this point");
334 
335 	/* Fill RX buffer again to confirm that RX still works properly. */
336 	ret = uart_tx(uart_dev, tx_buf, sizeof(tx_buf), 100 * USEC_PER_MSEC);
337 	zassert_equal(ret, 0, "uart_tx failed");
338 	zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout");
339 	zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), 0, "RX_RDY timeout");
340 	zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), -EAGAIN,
341 		      "Extra RX_RDY received");
342 	zassert_equal(k_sem_take(&rx_buf_released, K_MSEC(100)), 0,
343 		      "RX_BUF_RELEASED timeout");
344 	zassert_equal(k_sem_take(&rx_disabled, K_MSEC(100)), 0,
345 		      "RX_DISABLED timeout");
346 	zassert_equal(tx_aborted_count, 0, "Unexpected TX abort");
347 
348 	tdata_check_recv_buffers(tx_buf, sizeof(tx_buf));
349 }
350 
351 #if NOCACHE_MEM
352 static __aligned(sizeof(void *)) uint8_t chained_read_buf_0[10] __used __NOCACHE;
353 static __aligned(sizeof(void *)) uint8_t chained_read_buf_1[10] __used __NOCACHE;
354 static __aligned(sizeof(void *)) uint8_t chained_cpy_buf[10] __used __NOCACHE;
355 #else
356 ZTEST_BMEM uint8_t chained_read_buf_0[10];
357 ZTEST_BMEM uint8_t chained_read_buf_1[10];
358 ZTEST_BMEM uint8_t chained_cpy_buf[10];
359 #endif /* NOCACHE_MEM */
360 static ZTEST_BMEM volatile uint8_t rx_data_idx;
361 static ZTEST_BMEM uint8_t rx_buf_idx;
362 
363 static ZTEST_BMEM uint8_t *read_ptr;
364 
365 static uint8_t *chained_read_buf[2] = {chained_read_buf_0, chained_read_buf_1};
366 
test_chained_read_callback(const struct device * dev,struct uart_event * evt,void * user_data)367 static void test_chained_read_callback(const struct device *dev,
368 				struct uart_event *evt, void *user_data)
369 {
370 	int err;
371 
372 	switch (evt->type) {
373 	case UART_TX_DONE:
374 		k_sem_give(&tx_done);
375 		break;
376 	case UART_RX_RDY:
377 		zassert_true(rx_data_idx + evt->data.rx.len <= sizeof(chained_cpy_buf));
378 		memcpy(&chained_cpy_buf[rx_data_idx],
379 		       &evt->data.rx.buf[evt->data.rx.offset],
380 		       evt->data.rx.len);
381 		rx_data_idx += evt->data.rx.len;
382 		break;
383 	case UART_RX_BUF_REQUEST:
384 		err = uart_rx_buf_rsp(dev, chained_read_buf[rx_buf_idx],
385 				      sizeof(chained_read_buf_0));
386 		zassert_equal(err, 0);
387 		rx_buf_idx = !rx_buf_idx ? 1 : 0;
388 		break;
389 	case UART_RX_DISABLED:
390 		k_sem_give(&rx_disabled);
391 		break;
392 	default:
393 		break;
394 	}
395 
396 }
397 
chained_read_setup(void)398 static void *chained_read_setup(void)
399 {
400 	static int idx;
401 
402 	uart_async_test_init(idx++);
403 
404 	uart_callback_set(uart_dev, test_chained_read_callback, NULL);
405 
406 	return NULL;
407 }
408 
ZTEST_USER(uart_async_chain_read,test_chained_read)409 ZTEST_USER(uart_async_chain_read, test_chained_read)
410 {
411 #if NOCACHE_MEM
412 	static __aligned(sizeof(void *)) uint8_t tx_buf[10] __used __NOCACHE;
413 #else
414 	 __aligned(sizeof(void *)) uint8_t tx_buf[10];
415 #endif /* NOCACHE_MEM */
416 	int iter = 6;
417 	uint32_t rx_timeout_ms = 50;
418 	int err;
419 
420 	err = uart_rx_enable(uart_dev, chained_read_buf[rx_buf_idx++], sizeof(chained_read_buf_0),
421 			     rx_timeout_ms * USEC_PER_MSEC);
422 	zassert_equal(err, 0);
423 	rx_data_idx = 0;
424 
425 	for (int i = 0; i < iter; i++) {
426 		zassert_not_equal(k_sem_take(&rx_disabled, K_MSEC(10)),
427 				  0,
428 				  "RX_DISABLED occurred");
429 		snprintf(tx_buf, sizeof(tx_buf), "Message %d", i);
430 		uart_tx(uart_dev, tx_buf, sizeof(tx_buf), 100 * USEC_PER_MSEC);
431 		zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0,
432 			      "TX_DONE timeout");
433 		k_msleep(rx_timeout_ms + 10);
434 		zassert_equal(rx_data_idx, sizeof(tx_buf),
435 				"Unexpected amount of data received %d exp:%zu",
436 				rx_data_idx, sizeof(tx_buf));
437 		zassert_equal(memcmp(tx_buf, chained_cpy_buf, sizeof(tx_buf)), 0,
438 			      "Buffers not equal exp %s, real %s", tx_buf, chained_cpy_buf);
439 		rx_data_idx = 0;
440 	}
441 	uart_rx_disable(uart_dev);
442 	zassert_equal(k_sem_take(&rx_disabled, K_MSEC(100)), 0,
443 		      "RX_DISABLED timeout");
444 }
445 
446 #if NOCACHE_MEM
447 static __aligned(sizeof(void *)) uint8_t double_buffer[2][12] __used __NOCACHE;
448 #else
449 static ZTEST_BMEM uint8_t double_buffer[2][12];
450 #endif /* NOCACHE_MEM */
451 static ZTEST_DMEM uint8_t *next_buf = double_buffer[1];
452 
test_double_buffer_callback(const struct device * dev,struct uart_event * evt,void * user_data)453 static void test_double_buffer_callback(const struct device *dev,
454 				 struct uart_event *evt, void *user_data)
455 {
456 	switch (evt->type) {
457 	case UART_TX_DONE:
458 		k_sem_give(&tx_done);
459 		break;
460 	case UART_RX_RDY:
461 		read_ptr = evt->data.rx.buf + evt->data.rx.offset;
462 		k_sem_give(&rx_rdy);
463 		break;
464 	case UART_RX_BUF_REQUEST:
465 		uart_rx_buf_rsp(dev, next_buf, sizeof(double_buffer[0]));
466 		break;
467 	case UART_RX_BUF_RELEASED:
468 		next_buf = evt->data.rx_buf.buf;
469 		k_sem_give(&rx_buf_released);
470 		break;
471 	case UART_RX_DISABLED:
472 		k_sem_give(&rx_disabled);
473 		break;
474 	default:
475 		break;
476 	}
477 
478 }
479 
double_buffer_setup(void)480 static void *double_buffer_setup(void)
481 {
482 	static int idx;
483 
484 	uart_async_test_init(idx++);
485 
486 	uart_callback_set(uart_dev, test_double_buffer_callback, NULL);
487 
488 	return NULL;
489 }
490 
ZTEST_USER(uart_async_double_buf,test_double_buffer)491 ZTEST_USER(uart_async_double_buf, test_double_buffer)
492 {
493 #if NOCACHE_MEM
494 	static __aligned(sizeof(void *)) uint8_t tx_buf[4] __used __NOCACHE;
495 #else
496 	 __aligned(sizeof(void *)) uint8_t tx_buf[4];
497 #endif /* NOCACHE_MEM */
498 
499 	zassert_equal(uart_rx_enable(uart_dev, double_buffer[0], sizeof(double_buffer[0]),
500 				     25 * USEC_PER_MSEC),
501 		      0, "Failed to enable receiving");
502 
503 	for (int i = 0; i < 100; i++) {
504 		snprintf(tx_buf, sizeof(tx_buf), "%03d", i);
505 		uart_tx(uart_dev, tx_buf, sizeof(tx_buf), 100 * USEC_PER_MSEC);
506 		zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0,
507 			      "TX_DONE timeout");
508 		zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), 0,
509 			      "RX_RDY timeout");
510 		if (read_ptr) {
511 			zassert_equal(memcmp(tx_buf, read_ptr, sizeof(tx_buf)),
512 					0,
513 					"Buffers not equal");
514 		}
515 	}
516 	uart_rx_disable(uart_dev);
517 	zassert_equal(k_sem_take(&rx_disabled, K_MSEC(100)), 0,
518 		      "RX_DISABLED timeout");
519 }
520 
521 #if NOCACHE_MEM
522 static __aligned(sizeof(void *)) uint8_t test_read_abort_rx_buf[2][100] __used __NOCACHE;
523 static __aligned(sizeof(void *)) uint8_t test_read_abort_read_buf[100] __used __NOCACHE;
524 #else
525 static ZTEST_BMEM uint8_t test_read_abort_rx_buf[2][100];
526 static ZTEST_BMEM uint8_t test_read_abort_read_buf[100];
527 #endif /* NOCACHE_MEM */
528 static ZTEST_BMEM int test_read_abort_rx_cnt;
529 static ZTEST_BMEM bool test_read_abort_rx_buf_req_once;
530 
test_read_abort_callback(const struct device * dev,struct uart_event * evt,void * user_data)531 static void test_read_abort_callback(const struct device *dev,
532 			      struct uart_event *evt, void *user_data)
533 {
534 	int err;
535 
536 	ARG_UNUSED(dev);
537 
538 	switch (evt->type) {
539 	case UART_TX_DONE:
540 		k_sem_give(&tx_done);
541 		break;
542 	case UART_RX_BUF_REQUEST:
543 	{
544 		if (!test_read_abort_rx_buf_req_once) {
545 			k_sem_give(&rx_buf_coherency);
546 			uart_rx_buf_rsp(dev,
547 					test_read_abort_rx_buf[1],
548 					sizeof(test_read_abort_rx_buf[1]));
549 			test_read_abort_rx_buf_req_once = true;
550 		}
551 		break;
552 	}
553 	case UART_RX_RDY:
554 		memcpy(&test_read_abort_read_buf[test_read_abort_rx_cnt],
555 		       &evt->data.rx.buf[evt->data.rx.offset],
556 		       evt->data.rx.len);
557 		test_read_abort_rx_cnt += evt->data.rx.len;
558 		k_sem_give(&rx_rdy);
559 		break;
560 	case UART_RX_BUF_RELEASED:
561 		k_sem_give(&rx_buf_released);
562 		err = k_sem_take(&rx_buf_coherency, K_NO_WAIT);
563 		failed_in_isr |= (err < 0);
564 		break;
565 	case UART_RX_DISABLED:
566 		err = k_sem_take(&rx_buf_released, K_NO_WAIT);
567 		failed_in_isr |= (err < 0);
568 		k_sem_give(&rx_disabled);
569 		break;
570 	default:
571 		break;
572 	}
573 }
574 
read_abort_timeout(struct k_timer * timer)575 static void read_abort_timeout(struct k_timer *timer)
576 {
577 	int err;
578 
579 	err = uart_rx_disable(uart_dev);
580 	zassert_equal(err, 0, "Unexpected err:%d", err);
581 }
582 
read_abort_setup(void)583 static void *read_abort_setup(void)
584 {
585 	static int idx;
586 
587 	uart_async_test_init(idx++);
588 
589 	test_read_abort_rx_buf_req_once = false;
590 	failed_in_isr = false;
591 	uart_callback_set(uart_dev, test_read_abort_callback, NULL);
592 
593 	return NULL;
594 }
595 
ZTEST_USER(uart_async_read_abort,test_read_abort)596 ZTEST_USER(uart_async_read_abort, test_read_abort)
597 {
598 	struct uart_config cfg;
599 	int err;
600 	uint32_t t_us;
601 #if NOCACHE_MEM
602 	static __aligned(sizeof(void *)) uint8_t rx_buf[100] __used __NOCACHE;
603 	static __aligned(sizeof(void *)) uint8_t tx_buf[100] __used __NOCACHE;
604 #else
605 	 __aligned(sizeof(void *)) uint8_t rx_buf[100];
606 	 __aligned(sizeof(void *)) uint8_t tx_buf[100];
607 #endif /* NOCACHE_MEM */
608 
609 	memset(rx_buf, 0, sizeof(rx_buf));
610 	memset(tx_buf, 1, sizeof(tx_buf));
611 
612 	err = uart_config_get(uart_dev, &cfg);
613 	zassert_equal(err, 0);
614 
615 	/* Lets aim to abort after transmitting ~20 bytes (200 bauds) */
616 	t_us = (20 * 10 * 1000000) / cfg.baudrate;
617 
618 	err = uart_rx_enable(uart_dev, rx_buf, sizeof(rx_buf), 50 * USEC_PER_MSEC);
619 	zassert_equal(err, 0);
620 	k_sem_give(&rx_buf_coherency);
621 
622 	err = uart_tx(uart_dev, tx_buf, 5, 100 * USEC_PER_MSEC);
623 	zassert_equal(err, 0);
624 	zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout");
625 	zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), 0, "RX_RDY timeout");
626 	zassert_equal(memcmp(tx_buf, rx_buf, 5), 0, "Buffers not equal");
627 
628 	err = uart_tx(uart_dev, tx_buf, 95, 100 * USEC_PER_MSEC);
629 	zassert_equal(err, 0);
630 
631 	k_timer_start(&read_abort_timer, K_USEC(t_us), K_NO_WAIT);
632 
633 	/* RX will be aborted from k_timer timeout */
634 
635 	zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout");
636 	zassert_equal(k_sem_take(&rx_disabled, K_MSEC(100)), 0,
637 		      "RX_DISABLED timeout");
638 	zassert_false(failed_in_isr, "Unexpected order of uart events");
639 	zassert_not_equal(memcmp(tx_buf, test_read_abort_read_buf, 100), 0, "Buffers equal");
640 
641 	/* Read out possible other RX bytes
642 	 * that may affect following test on RX
643 	 */
644 	uart_rx_enable(uart_dev, rx_buf, sizeof(rx_buf), 50 * USEC_PER_MSEC);
645 	while (k_sem_take(&rx_rdy, K_MSEC(1000)) != -EAGAIN) {
646 		;
647 	}
648 	uart_rx_disable(uart_dev);
649 	k_msleep(10);
650 	zassert_not_equal(k_sem_take(&rx_buf_coherency, K_NO_WAIT), 0,
651 			"All provided buffers are released");
652 
653 }
654 
655 static ZTEST_BMEM volatile size_t sent;
656 static ZTEST_BMEM volatile size_t received;
657 #if NOCACHE_MEM
658 static __aligned(sizeof(void *)) uint8_t test_rx_buf[2][100] __used __NOCACHE;
659 #else
660 static ZTEST_BMEM uint8_t test_rx_buf[2][100];
661 #endif /* NOCACHE_MEM */
662 
test_write_abort_callback(const struct device * dev,struct uart_event * evt,void * user_data)663 static void test_write_abort_callback(const struct device *dev,
664 			       struct uart_event *evt, void *user_data)
665 {
666 	ARG_UNUSED(dev);
667 
668 	switch (evt->type) {
669 	case UART_TX_DONE:
670 		k_sem_give(&tx_done);
671 		break;
672 	case UART_TX_ABORTED:
673 		sent = evt->data.tx.len;
674 		k_sem_give(&tx_aborted);
675 		break;
676 	case UART_RX_RDY:
677 		received = evt->data.rx.len;
678 		k_sem_give(&rx_rdy);
679 		break;
680 	case UART_RX_BUF_REQUEST:
681 		uart_rx_buf_rsp(dev, test_rx_buf[1], sizeof(test_rx_buf[1]));
682 		break;
683 	case UART_RX_BUF_RELEASED:
684 		k_sem_give(&rx_buf_released);
685 		break;
686 	case UART_RX_DISABLED:
687 		k_sem_give(&rx_disabled);
688 		break;
689 	default:
690 		break;
691 	}
692 }
693 
write_abort_setup(void)694 static void *write_abort_setup(void)
695 {
696 	static int idx;
697 
698 	uart_async_test_init(idx++);
699 
700 	uart_callback_set(uart_dev, test_write_abort_callback, NULL);
701 
702 	return NULL;
703 }
704 
ZTEST_USER(uart_async_write_abort,test_write_abort)705 ZTEST_USER(uart_async_write_abort, test_write_abort)
706 {
707 #if NOCACHE_MEM
708 	static __aligned(sizeof(void *)) uint8_t tx_buf[100] __used __NOCACHE;
709 #else
710 	 __aligned(sizeof(void *)) uint8_t tx_buf[100];
711 #endif /* NOCACHE_MEM */
712 
713 	memset(test_rx_buf, 0, sizeof(test_rx_buf));
714 	memset(tx_buf, 1, sizeof(tx_buf));
715 
716 	uart_rx_enable(uart_dev, test_rx_buf[0], sizeof(test_rx_buf[0]), 50 * USEC_PER_MSEC);
717 
718 	uart_tx(uart_dev, tx_buf, 5, 100 * USEC_PER_MSEC);
719 	zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout");
720 	zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), 0, "RX_RDY timeout");
721 	zassert_equal(memcmp(tx_buf, test_rx_buf, 5), 0, "Buffers not equal");
722 
723 	uart_tx(uart_dev, tx_buf, 95, 100 * USEC_PER_MSEC);
724 	uart_tx_abort(uart_dev);
725 	zassert_equal(k_sem_take(&tx_aborted, K_MSEC(100)), 0,
726 		      "TX_ABORTED timeout");
727 	if (sent != 0) {
728 		zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), 0,
729 			      "RX_RDY timeout");
730 		k_sleep(K_MSEC(30));
731 		zassert_equal(sent, received, "Sent is not equal to received.");
732 	}
733 	uart_rx_disable(uart_dev);
734 	zassert_equal(k_sem_take(&rx_buf_released, K_MSEC(100)),
735 		      0,
736 		      "RX_BUF_RELEASED timeout");
737 	zassert_equal(k_sem_take(&rx_disabled, K_MSEC(100)), 0,
738 		      "RX_DISABLED timeout");
739 }
740 
741 
test_forever_timeout_callback(const struct device * dev,struct uart_event * evt,void * user_data)742 static void test_forever_timeout_callback(const struct device *dev,
743 				   struct uart_event *evt, void *user_data)
744 {
745 	ARG_UNUSED(dev);
746 
747 	switch (evt->type) {
748 	case UART_TX_DONE:
749 		k_sem_give(&tx_done);
750 		break;
751 	case UART_TX_ABORTED:
752 		sent = evt->data.tx.len;
753 		k_sem_give(&tx_aborted);
754 		break;
755 	case UART_RX_RDY:
756 		received = evt->data.rx.len;
757 		k_sem_give(&rx_rdy);
758 		break;
759 	case UART_RX_BUF_RELEASED:
760 		k_sem_give(&rx_buf_released);
761 		break;
762 	case UART_RX_DISABLED:
763 		k_sem_give(&rx_disabled);
764 		break;
765 	default:
766 		break;
767 	}
768 }
769 
forever_timeout_setup(void)770 static void *forever_timeout_setup(void)
771 {
772 	static int idx;
773 
774 	uart_async_test_init(idx++);
775 
776 	uart_callback_set(uart_dev, test_forever_timeout_callback, NULL);
777 
778 	return NULL;
779 }
780 
ZTEST_USER(uart_async_timeout,test_forever_timeout)781 ZTEST_USER(uart_async_timeout, test_forever_timeout)
782 {
783 #if NOCACHE_MEM
784 	static __aligned(sizeof(void *)) uint8_t rx_buf[100] __used __NOCACHE;
785 	static __aligned(sizeof(void *)) uint8_t tx_buf[100] __used __NOCACHE;
786 #else
787 	 __aligned(sizeof(void *)) uint8_t rx_buf[100];
788 	 __aligned(sizeof(void *)) uint8_t tx_buf[100];
789 #endif /* NOCACHE_MEM */
790 
791 	memset(rx_buf, 0, sizeof(rx_buf));
792 	memset(tx_buf, 1, sizeof(tx_buf));
793 
794 	uart_rx_enable(uart_dev, rx_buf, sizeof(rx_buf), SYS_FOREVER_US);
795 
796 	uart_tx(uart_dev, tx_buf, 5, SYS_FOREVER_US);
797 	zassert_not_equal(k_sem_take(&tx_aborted, K_MSEC(1000)), 0,
798 			  "TX_ABORTED timeout");
799 	zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout");
800 	zassert_not_equal(k_sem_take(&rx_rdy, K_MSEC(1000)), 0,
801 			  "RX_RDY timeout");
802 
803 	uart_tx(uart_dev, tx_buf, 95, SYS_FOREVER_US);
804 
805 	zassert_not_equal(k_sem_take(&tx_aborted, K_MSEC(1000)), 0,
806 			  "TX_ABORTED timeout");
807 	zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout");
808 	zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), 0, "RX_RDY timeout");
809 
810 
811 	zassert_equal(memcmp(tx_buf, rx_buf, 100), 0, "Buffers not equal");
812 
813 	uart_rx_disable(uart_dev);
814 	zassert_equal(k_sem_take(&rx_buf_released, K_MSEC(100)),
815 		      0,
816 		      "RX_BUF_RELEASED timeout");
817 	zassert_equal(k_sem_take(&rx_disabled, K_MSEC(100)), 0,
818 		      "RX_DISABLED timeout");
819 }
820 
821 
822 #if NOCACHE_MEM
823 static const uint8_t chained_write_tx_bufs[2][10] = {"Message 1", "Message 2"};
824 #else
825 static ZTEST_DMEM uint8_t chained_write_tx_bufs[2][10] = {"Message 1", "Message 2"};
826 #endif /* NOCACHE_MEM */
827 static ZTEST_DMEM bool chained_write_next_buf = true;
828 static ZTEST_BMEM volatile uint8_t tx_sent;
829 
test_chained_write_callback(const struct device * dev,struct uart_event * evt,void * user_data)830 static void test_chained_write_callback(const struct device *dev,
831 				 struct uart_event *evt, void *user_data)
832 {
833 	switch (evt->type) {
834 	case UART_TX_DONE:
835 		if (chained_write_next_buf) {
836 			chained_write_next_buf = false;
837 			uart_tx(dev, chained_write_tx_bufs[1], 10, 100 * USEC_PER_MSEC);
838 		}
839 		tx_sent = 1;
840 		k_sem_give(&tx_done);
841 		break;
842 	case UART_TX_ABORTED:
843 		sent = evt->data.tx.len;
844 		k_sem_give(&tx_aborted);
845 		break;
846 	case UART_RX_RDY:
847 		received = evt->data.rx.len;
848 		k_sem_give(&rx_rdy);
849 		break;
850 	case UART_RX_BUF_RELEASED:
851 		k_sem_give(&rx_buf_released);
852 		break;
853 	case UART_RX_DISABLED:
854 		k_sem_give(&rx_disabled);
855 		break;
856 	default:
857 		break;
858 	}
859 }
860 
chained_write_setup(void)861 static void *chained_write_setup(void)
862 {
863 	static int idx;
864 
865 	uart_async_test_init(idx++);
866 
867 	tx_sent = 0;
868 	chained_write_next_buf = true;
869 	uart_callback_set(uart_dev, test_chained_write_callback, NULL);
870 
871 	return NULL;
872 }
873 
ZTEST_USER(uart_async_chain_write,test_chained_write)874 ZTEST_USER(uart_async_chain_write, test_chained_write)
875 {
876 #if NOCACHE_MEM
877 	static __aligned(sizeof(void *)) uint8_t rx_buf[20] __used __NOCACHE;
878 #else
879 	 __aligned(sizeof(void *)) uint8_t rx_buf[20];
880 #endif /* NOCACHE_MEM */
881 
882 	memset(rx_buf, 0, sizeof(rx_buf));
883 
884 	uart_rx_enable(uart_dev, rx_buf, sizeof(rx_buf), 50 * USEC_PER_MSEC);
885 
886 	uart_tx(uart_dev, chained_write_tx_bufs[0], 10, 100 * USEC_PER_MSEC);
887 	zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout");
888 	zassert_equal(k_sem_take(&tx_done, K_MSEC(100)), 0, "TX_DONE timeout");
889 	zassert_equal(chained_write_next_buf, false, "Sent no message");
890 	zassert_equal(k_sem_take(&rx_rdy, K_MSEC(100)), 0, "RX_RDY timeout");
891 	zassert_equal(memcmp(chained_write_tx_bufs[0], rx_buf, 10),
892 		      0,
893 		      "Buffers not equal");
894 	zassert_equal(memcmp(chained_write_tx_bufs[1], rx_buf + 10, 10),
895 		      0,
896 		      "Buffers not equal");
897 
898 	uart_rx_disable(uart_dev);
899 	zassert_equal(k_sem_take(&rx_buf_released, K_MSEC(100)),
900 		      0,
901 		      "RX_BUF_RELEASED timeout");
902 	zassert_equal(k_sem_take(&rx_disabled, K_MSEC(100)), 0,
903 		      "RX_DISABLED timeout");
904 }
905 
906 #define RX_LONG_BUFFER CONFIG_TEST_LONG_BUFFER_SIZE
907 #define TX_LONG_BUFFER (CONFIG_TEST_LONG_BUFFER_SIZE - 8)
908 
909 #if NOCACHE_MEM
910 static __aligned(sizeof(void *)) uint8_t long_rx_buf[RX_LONG_BUFFER] __used __NOCACHE;
911 static __aligned(sizeof(void *)) uint8_t long_rx_buf2[RX_LONG_BUFFER] __used __NOCACHE;
912 static __aligned(sizeof(void *)) uint8_t long_tx_buf[TX_LONG_BUFFER] __used __NOCACHE;
913 #else
914 static ZTEST_BMEM uint8_t long_rx_buf[RX_LONG_BUFFER];
915 static ZTEST_BMEM uint8_t long_rx_buf2[RX_LONG_BUFFER];
916 static ZTEST_BMEM uint8_t long_tx_buf[TX_LONG_BUFFER];
917 #endif /* NOCACHE_MEM */
918 static ZTEST_BMEM volatile uint8_t evt_num;
919 static ZTEST_BMEM size_t long_received[2];
920 static ZTEST_BMEM uint8_t *long_next_buffer;
921 
test_long_buffers_callback(const struct device * dev,struct uart_event * evt,void * user_data)922 static void test_long_buffers_callback(const struct device *dev,
923 				struct uart_event *evt, void *user_data)
924 {
925 
926 	switch (evt->type) {
927 	case UART_TX_DONE:
928 		k_sem_give(&tx_done);
929 		break;
930 	case UART_TX_ABORTED:
931 		sent = evt->data.tx.len;
932 		k_sem_give(&tx_aborted);
933 		break;
934 	case UART_RX_RDY:
935 		long_received[evt_num] = evt->data.rx.len;
936 		evt_num++;
937 		k_sem_give(&rx_rdy);
938 		break;
939 	case UART_RX_BUF_RELEASED:
940 		k_sem_give(&rx_buf_released);
941 		break;
942 	case UART_RX_DISABLED:
943 		k_sem_give(&rx_disabled);
944 		break;
945 	case UART_RX_BUF_REQUEST:
946 		uart_rx_buf_rsp(dev, long_next_buffer, RX_LONG_BUFFER);
947 		long_next_buffer = (long_next_buffer == long_rx_buf2) ? long_rx_buf : long_rx_buf2;
948 		break;
949 	default:
950 		break;
951 	}
952 }
953 
long_buffers_setup(void)954 static void *long_buffers_setup(void)
955 {
956 	static int idx;
957 
958 	uart_async_test_init(idx++);
959 
960 	evt_num = 0;
961 	long_next_buffer = long_rx_buf2;
962 	uart_callback_set(uart_dev, test_long_buffers_callback, NULL);
963 
964 	return NULL;
965 }
966 
ZTEST_USER(uart_async_long_buf,test_long_buffers)967 ZTEST_USER(uart_async_long_buf, test_long_buffers)
968 {
969 	size_t tx_len1 = TX_LONG_BUFFER / 2;
970 	size_t tx_len2 = TX_LONG_BUFFER;
971 
972 	memset(long_rx_buf, 0, sizeof(long_rx_buf));
973 	memset(long_tx_buf, 1, sizeof(long_tx_buf));
974 
975 	uart_rx_enable(uart_dev, long_rx_buf, sizeof(long_rx_buf), 10 * USEC_PER_MSEC);
976 
977 	uart_tx(uart_dev, long_tx_buf, tx_len1, 200 * USEC_PER_MSEC);
978 	zassert_equal(k_sem_take(&tx_done, K_MSEC(200)), 0, "TX_DONE timeout");
979 	zassert_equal(k_sem_take(&rx_rdy, K_MSEC(200)), 0, "RX_RDY timeout");
980 	zassert_equal(long_received[0], tx_len1, "Wrong number of bytes received.");
981 	zassert_equal(memcmp(long_tx_buf, long_rx_buf, tx_len1),
982 		      0,
983 		      "Buffers not equal");
984 	k_msleep(10);
985 	/* Check if instance is releasing a buffer after the timeout. */
986 	bool release_on_timeout = k_sem_take(&rx_buf_released, K_NO_WAIT) == 0;
987 
988 	evt_num = 0;
989 	uart_tx(uart_dev, long_tx_buf, tx_len2, 200 * USEC_PER_MSEC);
990 	zassert_equal(k_sem_take(&tx_done, K_MSEC(200)), 0, "TX_DONE timeout");
991 	zassert_equal(k_sem_take(&rx_rdy, K_MSEC(200)), 0, "RX_RDY timeout");
992 
993 	if (release_on_timeout) {
994 		zassert_equal(long_received[0], tx_len2, "Wrong number of bytes received.");
995 		zassert_equal(memcmp(long_tx_buf, long_rx_buf2, long_received[0]), 0,
996 			      "Buffers not equal");
997 	} else {
998 		zassert_equal(k_sem_take(&rx_rdy, K_MSEC(200)), 0, "RX_RDY timeout");
999 		zassert_equal(long_received[0], RX_LONG_BUFFER - tx_len1,
1000 				"Wrong number of bytes received.");
1001 		zassert_equal(long_received[1], tx_len2 - (RX_LONG_BUFFER - tx_len1),
1002 				"Wrong number of bytes received.");
1003 		zassert_equal(memcmp(long_tx_buf, long_rx_buf + tx_len1, long_received[0]), 0,
1004 			      "Buffers not equal");
1005 		zassert_equal(memcmp(long_tx_buf, long_rx_buf2, long_received[1]), 0,
1006 			      "Buffers not equal");
1007 	}
1008 
1009 	uart_rx_disable(uart_dev);
1010 	zassert_equal(k_sem_take(&rx_buf_released, K_MSEC(100)),
1011 		      0,
1012 		      "RX_BUF_RELEASED timeout");
1013 	zassert_equal(k_sem_take(&rx_disabled, K_MSEC(100)), 0,
1014 		      "RX_DISABLED timeout");
1015 }
1016 
1017 #define VAR_LENGTH_TX_BUF_SIZE 12
1018 #define VAR_LENGTH_RX_BUF_SIZE (VAR_LENGTH_TX_BUF_SIZE * 4)
1019 
1020 #if NOCACHE_MEM
1021 volatile static uint8_t __aligned(sizeof(void *))
1022 	var_length_rx_buf[VAR_LENGTH_RX_BUF_SIZE] __used __NOCACHE;
1023 volatile static uint8_t __aligned(sizeof(void *))
1024 	var_length_rx_buf_pool[VAR_LENGTH_RX_BUF_SIZE] __used __NOCACHE;
1025 #else
1026 volatile static ZTEST_BMEM
1027 	uint8_t __aligned(sizeof(void *)) var_length_rx_buf[VAR_LENGTH_RX_BUF_SIZE];
1028 volatile static ZTEST_BMEM
1029 	uint8_t __aligned(sizeof(void *)) var_length_rx_buf_pool[VAR_LENGTH_RX_BUF_SIZE];
1030 #endif /* NOCACHE_MEM */
1031 volatile static ZTEST_BMEM size_t var_length_buf_rx_idx;
1032 volatile static ZTEST_BMEM size_t var_length_buf_rx_pool_idx;
1033 volatile static ZTEST_BMEM size_t var_length_rx_buf_size;
1034 
test_var_buf_length_callback(const struct device * dev,struct uart_event * evt,void * user_data)1035 static void test_var_buf_length_callback(const struct device *dev, struct uart_event *evt,
1036 					 void *user_data)
1037 {
1038 	switch (evt->type) {
1039 	case UART_RX_RDY:
1040 		memcpy((void *)&var_length_rx_buf[var_length_buf_rx_idx],
1041 		       &evt->data.rx.buf[evt->data.rx.offset], evt->data.rx.len);
1042 		var_length_buf_rx_idx += evt->data.rx.len;
1043 		break;
1044 	case UART_RX_DISABLED:
1045 		k_sem_give(&rx_disabled);
1046 		break;
1047 	case UART_RX_BUF_REQUEST:
1048 		uart_rx_buf_rsp(dev, (uint8_t *)&var_length_rx_buf_pool[var_length_buf_rx_pool_idx],
1049 				var_length_rx_buf_size);
1050 		var_length_buf_rx_pool_idx += var_length_rx_buf_size;
1051 		break;
1052 	default:
1053 		break;
1054 	}
1055 }
1056 
var_buf_length_setup(void)1057 static void *var_buf_length_setup(void)
1058 {
1059 	static int idx;
1060 
1061 	uart_async_test_init(idx++);
1062 
1063 	uart_callback_set(uart_dev, test_var_buf_length_callback, NULL);
1064 
1065 	return NULL;
1066 }
1067 
test_uart_async_var_buf(size_t buf_len,size_t tx_len)1068 static void test_uart_async_var_buf(size_t buf_len, size_t tx_len)
1069 {
1070 	int ret;
1071 
1072 #if NOCACHE_MEM
1073 static __aligned(sizeof(void *)) uint8_t tx_buffer[VAR_LENGTH_TX_BUF_SIZE] __used __NOCACHE;
1074 #else
1075 static ZTEST_BMEM uint8_t tx_buffer[VAR_LENGTH_TX_BUF_SIZE];
1076 #endif /* NOCACHE_MEM */
1077 
1078 	for (size_t i = 0; i < VAR_LENGTH_TX_BUF_SIZE; ++i) {
1079 		tx_buffer[i] = tx_len;
1080 	}
1081 
1082 	var_length_buf_rx_idx = 0;
1083 	var_length_buf_rx_pool_idx = 0;
1084 	memset((void *)var_length_rx_buf, 0, VAR_LENGTH_RX_BUF_SIZE);
1085 	memset((void *)var_length_rx_buf_pool, 0, VAR_LENGTH_RX_BUF_SIZE);
1086 
1087 	var_length_rx_buf_size = buf_len;
1088 
1089 	ret = uart_rx_enable(uart_dev,
1090 			     (uint8_t *)&var_length_rx_buf_pool[var_length_buf_rx_pool_idx],
1091 			     buf_len, 2 * USEC_PER_MSEC);
1092 	zassert_true(ret == 0, "[buff=%zu][tx=%zu]Failed to enable RX: %d\n", buf_len, tx_len, ret);
1093 	var_length_buf_rx_pool_idx += buf_len;
1094 
1095 	ret = uart_tx(uart_dev, tx_buffer, tx_len, 100 * USEC_PER_MSEC);
1096 	zassert_true(ret == 0, "[buff=%zu][tx=%zu]Failed to TX: %d\n", buf_len, tx_len, ret);
1097 	k_msleep(10);
1098 
1099 	uart_rx_disable(uart_dev);
1100 	zassert_equal(k_sem_take(&rx_disabled, K_MSEC(500)), 0,
1101 		      "[buff=%zu][tx=%zu]RX_DISABLED timeout\n", buf_len, tx_len);
1102 
1103 	zassert_equal(var_length_buf_rx_idx, tx_len,
1104 		      "[buff=%zu][tx=%zu]Wrong number of bytes received, got: %zu, expected: %zu\n",
1105 		      buf_len, tx_len, var_length_buf_rx_idx, tx_len);
1106 	zassert_equal(memcmp((void *)var_length_rx_buf, tx_buffer, tx_len), 0,
1107 		      "[buff=%zu][tx=%zu]Buffers not equal\n", buf_len, tx_len);
1108 }
1109 
ZTEST_USER(uart_async_var_buf_length,test_var_buf_length)1110 ZTEST_USER(uart_async_var_buf_length, test_var_buf_length)
1111 {
1112 	struct uart_config uart_cfg;
1113 
1114 	zassert_equal(uart_config_get(uart_dev, &uart_cfg), 0);
1115 	if (uart_cfg.baudrate > CONFIG_VAR_LENGTH_BUFFER_TEST_BUADRATE_LIMIT) {
1116 		ztest_test_skip();
1117 	}
1118 
1119 	for (size_t buf_len = 1; buf_len < CONFIG_VAR_LENGTH_BUFFER_MAX_SIZE; ++buf_len) {
1120 		for (size_t tx_len = 1; tx_len < VAR_LENGTH_TX_BUF_SIZE; ++tx_len) {
1121 			test_uart_async_var_buf(buf_len, tx_len);
1122 		}
1123 	}
1124 }
1125 
1126 ZTEST_SUITE(uart_async_single_read, NULL, single_read_setup,
1127 		NULL, NULL, NULL);
1128 
1129 ZTEST_SUITE(uart_async_multi_rx, NULL, multiple_rx_enable_setup,
1130 		NULL, NULL, NULL);
1131 
1132 ZTEST_SUITE(uart_async_chain_read, NULL, chained_read_setup,
1133 		NULL, NULL, NULL);
1134 
1135 ZTEST_SUITE(uart_async_double_buf, NULL, double_buffer_setup,
1136 		NULL, NULL, NULL);
1137 
1138 ZTEST_SUITE(uart_async_read_abort, NULL, read_abort_setup,
1139 		NULL, NULL, NULL);
1140 
1141 ZTEST_SUITE(uart_async_chain_write, NULL, chained_write_setup,
1142 		NULL, NULL, NULL);
1143 
1144 ZTEST_SUITE(uart_async_long_buf, NULL, long_buffers_setup,
1145 		NULL, NULL, NULL);
1146 
1147 ZTEST_SUITE(uart_async_var_buf_length, NULL, var_buf_length_setup,
1148 		NULL, NULL, NULL);
1149 
1150 ZTEST_SUITE(uart_async_write_abort, NULL, write_abort_setup,
1151 		NULL, NULL, NULL);
1152 
1153 ZTEST_SUITE(uart_async_timeout, NULL, forever_timeout_setup,
1154 		NULL, NULL, NULL);
1155 
test_main(void)1156 void test_main(void)
1157 {
1158 	/* Run all suites for each dut UART. Setup function for each suite is picking
1159 	 * next UART from the array.
1160 	 */
1161 	ztest_run_all(NULL, false, ARRAY_SIZE(duts), 1);
1162 	ztest_verify_all_test_suites_ran();
1163 }
1164