1 /*
2 * Copyright (c) 2023 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/drivers/serial/uart_async_rx.h>
9 #include <zephyr/random/random.h>
10 #include <zephyr/ztest.h>
11 #include <zephyr/ztress.h>
12 #include <zephyr/sys/util.h>
13 #include <zephyr/logging/log.h>
14 LOG_MODULE_REGISTER(test);
15
mem_fill(uint8_t * buf,uint8_t init,size_t len)16 static void mem_fill(uint8_t *buf, uint8_t init, size_t len)
17 {
18 for (size_t i = 0; i < len; i++) {
19 buf[i] = init + i;
20 }
21 }
22
mem_check(uint8_t * buf,uint8_t init,size_t len)23 static bool mem_check(uint8_t *buf, uint8_t init, size_t len)
24 {
25 for (size_t i = 0; i < len; i++) {
26 if (buf[i] != init + i) {
27 return false;
28 }
29 }
30
31 return true;
32 }
33
ZTEST(uart_async_rx,test_rx)34 ZTEST(uart_async_rx, test_rx)
35 {
36 int err;
37 uint8_t buf[40];
38 static const int buf_cnt = 4;
39 size_t aloc_len;
40 size_t claim_len;
41 uint8_t *claim_buf;
42 uint8_t *aloc_buf;
43 struct uart_async_rx async_rx;
44 bool buf_available;
45 const struct uart_async_rx_config config = {
46 .buffer = buf,
47 .length = sizeof(buf),
48 .buf_cnt = buf_cnt
49 };
50
51 err = uart_async_rx_init(&async_rx, &config);
52 zassert_equal(err, 0);
53
54 aloc_len = uart_async_rx_get_buf_len(&async_rx);
55 aloc_buf = uart_async_rx_buf_req(&async_rx);
56
57 mem_fill(aloc_buf, 0, aloc_len - 2);
58
59 /* No data to read. */
60 claim_len = uart_async_rx_data_claim(&async_rx, &claim_buf, 1);
61 zassert_equal(claim_len, 0);
62
63 /* Simulate partial write */
64 uart_async_rx_on_rdy(&async_rx, aloc_buf, aloc_len - 4);
65
66 /* There is at least 1 byte available */
67 claim_len = uart_async_rx_data_claim(&async_rx, &claim_buf, 1);
68 zassert_equal(claim_len, 1);
69 zassert_equal(claim_buf, aloc_buf);
70 zassert_true(mem_check(claim_buf, 0, 1));
71
72 /* All received data is available */
73 claim_len = uart_async_rx_data_claim(&async_rx, &claim_buf, 100);
74 zassert_equal(claim_len, aloc_len - 4);
75 zassert_equal(claim_buf, aloc_buf);
76 zassert_true(mem_check(claim_buf, 0, aloc_len - 4));
77
78 /* Simulate 2 bytes received to the same buffer. */
79 uart_async_rx_on_rdy(&async_rx, aloc_buf, 2);
80
81 /* Indicate and of the current buffer. */
82 uart_async_rx_on_buf_rel(&async_rx, aloc_buf);
83
84 /* Claim all data received so far */
85 claim_len = uart_async_rx_data_claim(&async_rx, &claim_buf, 100);
86 zassert_equal(claim_len, aloc_len - 2);
87 zassert_equal(claim_buf, aloc_buf);
88 zassert_true(mem_check(claim_buf, 0, aloc_len - 2));
89
90 /* Consume first 2 bytes. */
91 buf_available = uart_async_rx_data_consume(&async_rx, 2);
92 zassert_true(buf_available);
93
94 /* Now claim will return buffer taking into account that first 2 bytes are
95 * consumed.
96 */
97 claim_len = uart_async_rx_data_claim(&async_rx, &claim_buf, 100);
98 zassert_equal(claim_len, aloc_len - 4);
99 zassert_equal(claim_buf, &aloc_buf[2]);
100 zassert_true(mem_check(claim_buf, 2, aloc_len - 4));
101
102 /* Consume rest of data. Get indication that it was end of the buffer. */
103 buf_available = uart_async_rx_data_consume(&async_rx, aloc_len - 4);
104 zassert_true(buf_available);
105 }
106
ZTEST(uart_async_rx,test_rx_late_consume)107 ZTEST(uart_async_rx, test_rx_late_consume)
108 {
109 int err;
110 uint8_t buf[40] __aligned(4);
111 static const int buf_cnt = 4;
112 size_t aloc_len;
113 size_t claim_len;
114 uint8_t *claim_buf;
115 uint8_t *aloc_buf;
116 struct uart_async_rx async_rx;
117 const struct uart_async_rx_config config = {
118 .buffer = buf,
119 .length = sizeof(buf),
120 .buf_cnt = buf_cnt
121 };
122
123 err = uart_async_rx_init(&async_rx, &config);
124 zassert_equal(err, 0);
125
126 aloc_len = uart_async_rx_get_buf_len(&async_rx);
127 for (int i = 0; i < buf_cnt; i++) {
128 aloc_buf = uart_async_rx_buf_req(&async_rx);
129
130 aloc_buf[0] = (uint8_t)i;
131 uart_async_rx_on_rdy(&async_rx, aloc_buf, 1);
132 uart_async_rx_on_buf_rel(&async_rx, aloc_buf);
133 }
134
135 for (int i = 0; i < buf_cnt; i++) {
136 claim_len = uart_async_rx_data_claim(&async_rx, &claim_buf, 100);
137 zassert_equal(claim_len, 1);
138 zassert_equal(claim_buf[0], (uint8_t)i);
139
140 (void)uart_async_rx_data_consume(&async_rx, 1);
141 }
142
143 claim_len = uart_async_rx_data_claim(&async_rx, &claim_buf, 100);
144 zassert_equal(claim_len, 0);
145 }
146
147 struct test_async_rx {
148 struct uart_async_rx async_rx;
149 atomic_t pending_req;
150 atomic_t total_pending_req;
151 bool in_chunks;
152 uint8_t exp_consume;
153 uint32_t byte_cnt;
154 uint8_t curr_len;
155 uint8_t *curr_buf;
156 uint8_t *next_buf;
157 struct k_spinlock lock;
158 };
159
producer_no_chunks(void * user_data,uint32_t cnt,bool last,int prio)160 static bool producer_no_chunks(void *user_data, uint32_t cnt, bool last, int prio)
161 {
162 struct test_async_rx *test_data = (struct test_async_rx *)user_data;
163 struct uart_async_rx *async_rx = &test_data->async_rx;
164 uint32_t r = sys_rand32_get();
165 uint32_t len = MAX(1, MIN(uart_async_rx_get_buf_len(async_rx), r & 0x7));
166
167 if (test_data->curr_buf) {
168
169 for (int i = 0; i < len; i++) {
170 test_data->curr_buf[i] = (uint8_t)test_data->byte_cnt;
171 test_data->byte_cnt++;
172 }
173 uart_async_rx_on_rdy(async_rx, test_data->curr_buf, len);
174 uart_async_rx_on_buf_rel(async_rx, test_data->curr_buf);
175 test_data->curr_buf = test_data->next_buf;
176 test_data->next_buf = NULL;
177
178 uint8_t *buf = uart_async_rx_buf_req(async_rx);
179
180 if (buf) {
181 if (test_data->curr_buf == NULL) {
182 test_data->curr_buf = buf;
183 } else {
184 test_data->next_buf = buf;
185 }
186 } else {
187 atomic_inc(&test_data->pending_req);
188 atomic_inc(&test_data->total_pending_req);
189 }
190 }
191
192 return true;
193 }
194
consumer(void * user_data,uint32_t cnt,bool last,int prio)195 static bool consumer(void *user_data, uint32_t cnt, bool last, int prio)
196 {
197 struct test_async_rx *test_data = (struct test_async_rx *)user_data;
198 struct uart_async_rx *async_rx = &test_data->async_rx;
199 uint32_t r = sys_rand32_get();
200 uint32_t rpt = MAX(1, r & 0x7);
201
202 r >>= 3;
203
204 for (uint32_t i = 0; i < rpt; i++) {
205 size_t claim_len = MAX(1, r & 0x7);
206 size_t len;
207 uint8_t *buf;
208
209 r >>= 3;
210 len = uart_async_rx_data_claim(async_rx, &buf, claim_len);
211
212 if (len == 0) {
213 return true;
214 }
215
216 for (int j = 0; j < len; j++) {
217 zassert_equal(buf[j], test_data->exp_consume,
218 "%02x (exp:%02x) len:%d, total:%d",
219 buf[j], test_data->exp_consume, len, test_data->byte_cnt);
220 test_data->exp_consume++;
221 }
222
223 bool buf_released = uart_async_rx_data_consume(async_rx, len);
224
225 if (buf_released && test_data->pending_req) {
226 buf = uart_async_rx_buf_req(async_rx);
227 zassert_true(buf != NULL);
228
229 atomic_dec(&test_data->pending_req);
230 k_spinlock_key_t key = k_spin_lock(&test_data->lock);
231
232 if (test_data->curr_buf == NULL) {
233 test_data->curr_buf = buf;
234 } else if (test_data->next_buf == NULL) {
235 test_data->next_buf = buf;
236 } else {
237 zassert_true(false);
238 }
239 k_spin_unlock(&test_data->lock, key);
240 }
241 }
242
243 return true;
244 }
245
producer_in_chunks(void * user_data,uint32_t cnt,bool last,int prio)246 static bool producer_in_chunks(void *user_data, uint32_t cnt, bool last, int prio)
247 {
248 struct test_async_rx *test_data = (struct test_async_rx *)user_data;
249 struct uart_async_rx *async_rx = &test_data->async_rx;
250 uint32_t r = sys_rand32_get();
251 uint32_t rem = uart_async_rx_get_buf_len(async_rx) - test_data->curr_len;
252 uint32_t len = MAX(1, MIN(uart_async_rx_get_buf_len(async_rx), r & 0x7));
253
254 len = MIN(rem, len);
255
256 if (test_data->curr_buf) {
257 for (int i = 0; i < len; i++) {
258 test_data->curr_buf[test_data->curr_len + i] = (uint8_t)test_data->byte_cnt;
259 test_data->byte_cnt++;
260 }
261 uart_async_rx_on_rdy(async_rx, test_data->curr_buf, len);
262 test_data->curr_len += len;
263
264 if ((test_data->curr_len == uart_async_rx_get_buf_len(async_rx)) || (r & BIT(31))) {
265 test_data->curr_len = 0;
266 uart_async_rx_on_buf_rel(async_rx, test_data->curr_buf);
267
268 test_data->curr_buf = test_data->next_buf;
269 test_data->next_buf = NULL;
270
271 uint8_t *buf = uart_async_rx_buf_req(async_rx);
272
273 if (buf) {
274 if (test_data->curr_buf == NULL) {
275 test_data->curr_buf = buf;
276 } else {
277 test_data->next_buf = buf;
278 }
279 } else {
280 atomic_inc(&test_data->pending_req);
281 }
282 }
283 }
284
285 return true;
286 }
287
stress_test(bool in_chunks)288 static void stress_test(bool in_chunks)
289 {
290 int err;
291 uint8_t buf[40];
292 static const int buf_cnt = 4;
293 int preempt = 1000;
294 int timeout = 5000;
295 struct test_async_rx test_data;
296 const struct uart_async_rx_config config = {
297 .buffer = buf,
298 .length = sizeof(buf),
299 .buf_cnt = buf_cnt
300 };
301
302 memset(&test_data, 0, sizeof(test_data));
303
304 err = uart_async_rx_init(&test_data.async_rx, &config);
305 zassert_equal(err, 0);
306
307 test_data.in_chunks = in_chunks;
308 test_data.curr_buf = uart_async_rx_buf_req(&test_data.async_rx);
309
310 ztress_set_timeout(K_MSEC(timeout));
311
312 ZTRESS_EXECUTE(ZTRESS_THREAD(in_chunks ? producer_in_chunks : producer_no_chunks,
313 &test_data, 0, 0, Z_TIMEOUT_TICKS(20)),
314 ZTRESS_THREAD(consumer, &test_data, 0, preempt, Z_TIMEOUT_TICKS(20)));
315
316 TC_PRINT("total bytes: %d\n", test_data.byte_cnt);
317 ztress_set_timeout(K_NO_WAIT);
318 }
319
ZTEST(uart_async_rx,test_rx_ztress_no_chunks)320 ZTEST(uart_async_rx, test_rx_ztress_no_chunks)
321 {
322 stress_test(false);
323 }
324
ZTEST(uart_async_rx,test_rx_ztress_with_chunks)325 ZTEST(uart_async_rx, test_rx_ztress_with_chunks)
326 {
327 stress_test(true);
328 }
329
330 ZTEST_SUITE(uart_async_rx, NULL, NULL, NULL, NULL, NULL);
331