1 /*
2 * Copyright (c) 2023 Fabian Blatz
3 * Copyright (c) 2024 grandcentrix GmbH
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #define DT_DRV_COMPAT zephyr_uart_emul
9
10 #include <errno.h>
11
12 #include <zephyr/drivers/emul.h>
13 #include <zephyr/drivers/uart.h>
14 #include <zephyr/drivers/serial/uart_emul.h>
15 #include <zephyr/kernel.h>
16 #include <zephyr/logging/log.h>
17 #include <zephyr/sys/ring_buffer.h>
18 #include <zephyr/sys/util.h>
19
20 LOG_MODULE_REGISTER(uart_emul, CONFIG_UART_LOG_LEVEL);
21
22 struct uart_emul_config {
23 /* emul_list has to be the first member */
24 struct emul_list_for_bus emul_list;
25
26 bool loopback;
27 size_t latch_buffer_size;
28 };
29
30 BUILD_ASSERT(offsetof(struct uart_emul_config, emul_list) == 0);
31
32 /* Device run time data */
33 struct uart_emul_data {
34 /* List of struct uart_emul associated with the device */
35 sys_slist_t emuls;
36
37 const struct device *dev;
38
39 struct uart_config cfg;
40 int errors;
41
42 struct ring_buf *rx_rb;
43 struct k_spinlock rx_lock;
44
45 uart_emul_callback_tx_data_ready_t tx_data_ready_cb;
46 void *user_data;
47
48 struct ring_buf *tx_rb;
49 struct k_spinlock tx_lock;
50
51 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
52 bool rx_irq_en;
53 bool tx_irq_en;
54 struct k_work irq_work;
55
56 uart_irq_callback_user_data_t irq_cb;
57 void *irq_cb_udata;
58 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
59
60 #ifdef CONFIG_UART_ASYNC_API
61 bool rx_async_en;
62 bool rx_stopping;
63 bool rx_release_on_timeout;
64
65 struct k_work tx_work;
66 struct k_work rx_work;
67 struct k_work rx_disable_work;
68 struct k_work_delayable rx_timeout_work;
69
70 uart_callback_t uart_callback;
71 void *callback_user_data;
72
73 const uint8_t *tx_buf;
74 size_t tx_buf_len;
75 size_t tx_buf_offset;
76
77 uint8_t *rx_buf;
78 size_t rx_buf_len;
79 size_t rx_buf_offset;
80 size_t rx_buf_data_len;
81 int32_t rx_buf_timeout;
82
83 uint8_t *rx_buf_next;
84 size_t rx_buf_next_len;
85 #endif /* CONFIG_UART_ASYNC_API */
86 };
87
88 /*
89 * Define local thread to emulate different thread priorities.
90 *
91 * A UART driver may call back from within a thread with higher or lower priority
92 * than the thread calling the UART API. This can hide potential concurrency issues,
93 * especially if the thread priorities are the same, or even using the same thread
94 * in case the system work queue.
95 */
96 K_THREAD_STACK_DEFINE(uart_emul_stack_area, CONFIG_UART_EMUL_WORK_Q_STACK_SIZE);
97 struct k_work_q uart_emul_work_q;
98
uart_emul_init_work_q(void)99 int uart_emul_init_work_q(void)
100 {
101 struct k_work_queue_config cfg = {
102 .name = "uart_emul_workq",
103 .no_yield = false,
104 };
105
106 k_work_queue_init(&uart_emul_work_q);
107 k_work_queue_start(&uart_emul_work_q, uart_emul_stack_area,
108 K_THREAD_STACK_SIZEOF(uart_emul_stack_area),
109 CONFIG_UART_EMUL_WORK_Q_PRIORITY, &cfg);
110 return 0;
111 }
112
113 SYS_INIT(uart_emul_init_work_q, POST_KERNEL, 0);
114
uart_emul_tx_data_ready(const struct device * dev)115 static void uart_emul_tx_data_ready(const struct device *dev)
116 {
117 struct uart_emul_data *data = dev->data;
118 sys_snode_t *node;
119
120 if (data->tx_data_ready_cb) {
121 (data->tx_data_ready_cb)(dev, ring_buf_size_get(data->tx_rb), data->user_data);
122 }
123 SYS_SLIST_FOR_EACH_NODE(&data->emuls, node) {
124 struct uart_emul *emul = CONTAINER_OF(node, struct uart_emul, node);
125
126 __ASSERT_NO_MSG(emul->api != NULL);
127 __ASSERT_NO_MSG(emul->api->tx_data_ready != NULL);
128
129 emul->api->tx_data_ready(dev, ring_buf_size_get(data->tx_rb), emul->target);
130 }
131 }
132
uart_emul_poll_in(const struct device * dev,unsigned char * p_char)133 static int uart_emul_poll_in(const struct device *dev, unsigned char *p_char)
134 {
135 struct uart_emul_data *drv_data = dev->data;
136 k_spinlock_key_t key;
137 uint32_t read;
138
139 key = k_spin_lock(&drv_data->rx_lock);
140 read = ring_buf_get(drv_data->rx_rb, p_char, 1);
141 k_spin_unlock(&drv_data->rx_lock, key);
142
143 if (!read) {
144 LOG_DBG("Rx buffer is empty");
145 return -1;
146 }
147
148 return 0;
149 }
150
uart_emul_poll_out(const struct device * dev,unsigned char out_char)151 static void uart_emul_poll_out(const struct device *dev, unsigned char out_char)
152 {
153 struct uart_emul_data *drv_data = dev->data;
154 const struct uart_emul_config *drv_cfg = dev->config;
155 k_spinlock_key_t key;
156 uint32_t written;
157
158 key = k_spin_lock(&drv_data->tx_lock);
159 written = ring_buf_put(drv_data->tx_rb, &out_char, 1);
160 k_spin_unlock(&drv_data->tx_lock, key);
161
162 if (!written) {
163 LOG_DBG("Tx buffer is full");
164 return;
165 }
166
167 if (drv_cfg->loopback) {
168 uart_emul_put_rx_data(dev, &out_char, 1);
169 }
170
171 uart_emul_tx_data_ready(dev);
172 }
173
uart_emul_err_check(const struct device * dev)174 static int uart_emul_err_check(const struct device *dev)
175 {
176 struct uart_emul_data *drv_data = dev->data;
177 int errors = drv_data->errors;
178
179 drv_data->errors = 0;
180 return errors;
181 }
182
183 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
uart_emul_configure(const struct device * dev,const struct uart_config * cfg)184 static int uart_emul_configure(const struct device *dev, const struct uart_config *cfg)
185 {
186 struct uart_emul_data *drv_data = dev->data;
187
188 memcpy(&drv_data->cfg, cfg, sizeof(struct uart_config));
189 return 0;
190 }
191
uart_emul_config_get(const struct device * dev,struct uart_config * cfg)192 static int uart_emul_config_get(const struct device *dev, struct uart_config *cfg)
193 {
194 const struct uart_emul_data *drv_data = dev->data;
195
196 memcpy(cfg, &drv_data->cfg, sizeof(struct uart_config));
197 return 0;
198 }
199 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
200
201 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
uart_emul_fifo_fill(const struct device * dev,const uint8_t * tx_data,int size)202 static int uart_emul_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size)
203 {
204 int ret;
205 struct uart_emul_data *data = dev->data;
206 const struct uart_emul_config *config = dev->config;
207 uint32_t put_size = MIN(config->latch_buffer_size, size);
208
209 K_SPINLOCK(&data->tx_lock) {
210 ret = ring_buf_put(data->tx_rb, tx_data, put_size);
211 }
212
213 if (config->loopback) {
214 uart_emul_put_rx_data(dev, (uint8_t *)tx_data, put_size);
215 }
216
217 uart_emul_tx_data_ready(dev);
218
219 return ret;
220 }
221
uart_emul_fifo_read(const struct device * dev,uint8_t * rx_data,int size)222 static int uart_emul_fifo_read(const struct device *dev, uint8_t *rx_data, int size)
223 {
224 struct uart_emul_data *data = dev->data;
225 const struct uart_emul_config *config = dev->config;
226 uint32_t bytes_to_read;
227
228 K_SPINLOCK(&data->rx_lock) {
229 bytes_to_read = MIN(config->latch_buffer_size, ring_buf_size_get(data->rx_rb));
230 bytes_to_read = MIN(bytes_to_read, size);
231 ring_buf_get(data->rx_rb, rx_data, bytes_to_read);
232 }
233
234 return bytes_to_read;
235 }
236
uart_emul_irq_tx_ready(const struct device * dev)237 static int uart_emul_irq_tx_ready(const struct device *dev)
238 {
239 int available = 0;
240 struct uart_emul_data *data = dev->data;
241
242 K_SPINLOCK(&data->tx_lock) {
243 if (!data->tx_irq_en) {
244 K_SPINLOCK_BREAK;
245 }
246
247 available = ring_buf_space_get(data->tx_rb);
248 }
249
250 return available;
251 }
252
uart_emul_irq_rx_ready(const struct device * dev)253 static int uart_emul_irq_rx_ready(const struct device *dev)
254 {
255 bool ready = false;
256 struct uart_emul_data *data = dev->data;
257
258 K_SPINLOCK(&data->rx_lock) {
259 if (!data->rx_irq_en) {
260 K_SPINLOCK_BREAK;
261 }
262
263 ready = !ring_buf_is_empty(data->rx_rb);
264 }
265
266 return ready;
267 }
268
uart_emul_irq_handler(struct k_work * work)269 static void uart_emul_irq_handler(struct k_work *work)
270 {
271 struct uart_emul_data *data = CONTAINER_OF(work, struct uart_emul_data, irq_work);
272 const struct device *dev = data->dev;
273 uart_irq_callback_user_data_t cb = data->irq_cb;
274 void *udata = data->irq_cb_udata;
275
276 if (cb == NULL) {
277 LOG_DBG("No IRQ callback configured for uart_emul device %p", dev);
278 return;
279 }
280
281 while (true) {
282 bool have_work = false;
283
284 K_SPINLOCK(&data->tx_lock) {
285 if (!data->tx_irq_en) {
286 K_SPINLOCK_BREAK;
287 }
288
289 have_work = have_work || ring_buf_space_get(data->tx_rb) > 0;
290 }
291
292 K_SPINLOCK(&data->rx_lock) {
293 if (!data->rx_irq_en) {
294 K_SPINLOCK_BREAK;
295 }
296
297 have_work = have_work || !ring_buf_is_empty(data->rx_rb);
298 }
299
300 if (!have_work) {
301 break;
302 }
303
304 cb(dev, udata);
305 }
306 }
307
uart_emul_irq_is_pending(const struct device * dev)308 static int uart_emul_irq_is_pending(const struct device *dev)
309 {
310 return uart_emul_irq_tx_ready(dev) || uart_emul_irq_rx_ready(dev);
311 }
312
uart_emul_irq_tx_enable(const struct device * dev)313 static void uart_emul_irq_tx_enable(const struct device *dev)
314 {
315 bool submit_irq_work;
316 struct uart_emul_data *const data = dev->data;
317
318 K_SPINLOCK(&data->tx_lock) {
319 data->tx_irq_en = true;
320 submit_irq_work = ring_buf_space_get(data->tx_rb) > 0;
321 }
322
323 if (submit_irq_work) {
324 (void)k_work_submit_to_queue(&uart_emul_work_q, &data->irq_work);
325 }
326 }
327
uart_emul_irq_rx_enable(const struct device * dev)328 static void uart_emul_irq_rx_enable(const struct device *dev)
329 {
330 bool submit_irq_work;
331 struct uart_emul_data *const data = dev->data;
332
333 K_SPINLOCK(&data->rx_lock) {
334 data->rx_irq_en = true;
335 submit_irq_work = !ring_buf_is_empty(data->rx_rb);
336 }
337
338 if (submit_irq_work) {
339 (void)k_work_submit_to_queue(&uart_emul_work_q, &data->irq_work);
340 }
341 }
342
uart_emul_irq_tx_disable(const struct device * dev)343 static void uart_emul_irq_tx_disable(const struct device *dev)
344 {
345 struct uart_emul_data *const data = dev->data;
346
347 K_SPINLOCK(&data->tx_lock) {
348 data->tx_irq_en = false;
349 }
350 }
351
uart_emul_irq_rx_disable(const struct device * dev)352 static void uart_emul_irq_rx_disable(const struct device *dev)
353 {
354 struct uart_emul_data *const data = dev->data;
355
356 K_SPINLOCK(&data->rx_lock) {
357 data->rx_irq_en = false;
358 }
359 }
360
uart_emul_irq_tx_complete(const struct device * dev)361 static int uart_emul_irq_tx_complete(const struct device *dev)
362 {
363 bool tx_complete = false;
364 struct uart_emul_data *const data = dev->data;
365
366 K_SPINLOCK(&data->tx_lock) {
367 tx_complete = ring_buf_is_empty(data->tx_rb);
368 }
369
370 return tx_complete;
371 }
372
uart_emul_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * user_data)373 static void uart_emul_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb,
374 void *user_data)
375 {
376 struct uart_emul_data *const data = dev->data;
377
378 data->irq_cb = cb;
379 data->irq_cb_udata = user_data;
380 }
381
uart_emul_irq_update(const struct device * dev)382 static int uart_emul_irq_update(const struct device *dev)
383 {
384 return 1;
385 }
386 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
387
388 #ifdef CONFIG_UART_ASYNC_API
uart_emul_post_event(const struct device * dev,struct uart_event * evt)389 static void uart_emul_post_event(const struct device *dev, struct uart_event *evt)
390 {
391 struct uart_emul_data *data = dev->data;
392
393 if (!data->uart_callback) {
394 LOG_DBG("No async callback configured for uart_emul device %p", dev);
395 }
396
397 data->uart_callback(dev, evt, data->callback_user_data);
398 }
399
uart_emul_simple_event(const struct device * dev,enum uart_event_type type)400 static void uart_emul_simple_event(const struct device *dev, enum uart_event_type type)
401 {
402 uart_emul_post_event(dev, &(struct uart_event){.type = type});
403 }
404
uart_emul_async_switch_buf_nolock(struct uart_emul_data * data)405 static void uart_emul_async_switch_buf_nolock(struct uart_emul_data *data)
406 {
407 data->rx_buf = data->rx_buf_next;
408 data->rx_buf_len = data->rx_buf_next_len;
409 data->rx_buf_offset = 0;
410 data->rx_buf_data_len = 0;
411 data->rx_buf_next = NULL;
412 data->rx_buf_next_len = 0;
413 }
414
uart_emul_async_rx_timeout_handler(struct k_work * _work)415 static void uart_emul_async_rx_timeout_handler(struct k_work *_work)
416 {
417 struct k_work_delayable *work = k_work_delayable_from_work(_work);
418 struct uart_emul_data *data = CONTAINER_OF(work, struct uart_emul_data, rx_timeout_work);
419 const struct device *dev = data->dev;
420
421 uint8_t *rx_buf;
422 size_t rx_buf_len;
423 size_t rx_buf_offset;
424 size_t rx_buf_data_len;
425 bool rx_en;
426 bool rx_buf_released = false;
427 bool rx_stopped = false;
428
429 K_SPINLOCK(&data->rx_lock) {
430 rx_en = data->rx_async_en;
431 rx_buf = data->rx_buf;
432 rx_buf_len = data->rx_buf_len;
433 rx_buf_offset = data->rx_buf_offset;
434 rx_buf_data_len = data->rx_buf_data_len;
435
436 data->rx_buf_offset += rx_buf_data_len;
437 data->rx_buf_data_len = 0;
438
439 if (data->rx_buf_offset >= rx_buf_len ||
440 (rx_buf_data_len > 0 && data->rx_release_on_timeout)) {
441 rx_buf_released = true;
442 uart_emul_async_switch_buf_nolock(data);
443 if (data->rx_buf == NULL) {
444 /* There was no second buffer scheduled, so stop receiving */
445 rx_stopped = true;
446 data->rx_async_en = false;
447 }
448 }
449 }
450
451 if (!rx_en || rx_buf == NULL || rx_buf_data_len == 0) {
452 return;
453 }
454
455 struct uart_event rx_rdy_event = {
456 .type = UART_RX_RDY,
457 .data.rx = {
458 .buf = rx_buf,
459 .offset = rx_buf_offset,
460 .len = rx_buf_data_len,
461 },
462 };
463
464 uart_emul_post_event(dev, &rx_rdy_event);
465
466 if (rx_buf_released) {
467 struct uart_event rx_buf_released_event = {
468 .type = UART_RX_BUF_RELEASED,
469 .data.rx_buf.buf = rx_buf,
470 };
471
472 uart_emul_post_event(dev, &rx_buf_released_event);
473 }
474 if (rx_stopped) {
475 uart_emul_simple_event(dev, UART_RX_DISABLED);
476 }
477 }
478
uart_emul_async_rx_handler(struct k_work * work)479 static void uart_emul_async_rx_handler(struct k_work *work)
480 {
481 struct uart_emul_data *data = CONTAINER_OF(work, struct uart_emul_data, rx_work);
482 const struct device *dev = data->dev;
483
484 bool rx_en = false;
485 bool empty = true;
486
487 do {
488 bool rx_rdy = false;
489 bool buf_request = false;
490
491 uint8_t *rx_buf = NULL;
492 size_t buf_len;
493 size_t offset;
494 size_t data_len;
495
496 K_SPINLOCK(&data->rx_lock) {
497 rx_en = data->rx_async_en;
498 rx_buf = data->rx_buf;
499 buf_len = data->rx_buf_len;
500 offset = data->rx_buf_offset;
501 data_len = data->rx_buf_data_len;
502 empty = ring_buf_is_empty(data->rx_rb);
503
504 if (!rx_en) {
505 K_SPINLOCK_BREAK;
506 }
507
508 if (rx_buf == NULL) {
509 uart_emul_async_switch_buf_nolock(data);
510 rx_buf = data->rx_buf;
511 buf_len = data->rx_buf_len;
512 offset = data->rx_buf_offset;
513 data_len = data->rx_buf_data_len;
514 }
515
516 if (rx_buf == NULL) {
517 /* During the last iteration the buffer was released but the
518 * application did not provide a new buffer. Stop RX and quit now.
519 */
520 data->rx_async_en = false;
521 K_SPINLOCK_BREAK;
522 }
523
524 if (empty) {
525 K_SPINLOCK_BREAK;
526 }
527
528 buf_request = data_len == 0 && data->rx_buf_next == NULL;
529
530 uint32_t read = ring_buf_get(data->rx_rb, &rx_buf[offset + data_len],
531 buf_len - (offset + data_len));
532 data_len += read;
533 data->rx_buf_data_len = data_len;
534
535 if (offset + data_len >= data->rx_buf_len) {
536 rx_rdy = true;
537 data->rx_buf = NULL;
538 data->rx_buf_len = 0;
539 data->rx_buf_offset = 0;
540 data->rx_buf_data_len = 0;
541 }
542 }
543
544 if (!rx_en) {
545 break;
546 }
547
548 if (rx_buf == NULL) {
549 uart_emul_simple_event(dev, UART_RX_DISABLED);
550 break;
551 }
552
553 if (empty && data->rx_buf_timeout != SYS_FOREVER_US) {
554 (void)k_work_reschedule_for_queue(&uart_emul_work_q, &data->rx_timeout_work,
555 K_USEC(data->rx_buf_timeout));
556 }
557
558 if (buf_request) {
559 uart_emul_simple_event(dev, UART_RX_BUF_REQUEST);
560 }
561
562 if (rx_rdy) {
563 struct uart_event rx_rdy_event = {
564 .type = UART_RX_RDY,
565 .data.rx = {
566 .buf = rx_buf,
567 .offset = offset,
568 .len = data_len,
569 },
570 };
571
572 uart_emul_post_event(dev, &rx_rdy_event);
573
574 struct uart_event rx_buf_released_event = {
575 .type = UART_RX_BUF_RELEASED,
576 .data.rx_buf.buf = rx_buf,
577 };
578
579 uart_emul_post_event(dev, &rx_buf_released_event);
580 }
581 } while (rx_en && !empty);
582 }
583
uart_emul_async_tx_handler(struct k_work * work)584 static void uart_emul_async_tx_handler(struct k_work *work)
585 {
586 struct uart_emul_data *data = CONTAINER_OF(work, struct uart_emul_data, tx_work);
587 const struct device *dev = data->dev;
588 const struct uart_emul_config *config = dev->config;
589
590 uint32_t written;
591
592 const uint8_t *tx_buf = NULL;
593 size_t tx_buf_len = 0;
594 size_t tx_buf_offset = 0;
595 bool tx_done = true;
596
597 K_SPINLOCK(&data->tx_lock) {
598 tx_buf = data->tx_buf;
599 tx_buf_len = data->tx_buf_len;
600 tx_buf_offset = data->tx_buf_offset;
601
602 if (!tx_buf) {
603 K_SPINLOCK_BREAK;
604 }
605
606 written = ring_buf_put(data->tx_rb, &data->tx_buf[tx_buf_offset],
607 tx_buf_len - tx_buf_offset);
608 tx_done = written == (tx_buf_len - tx_buf_offset);
609 if (!tx_done) {
610 data->tx_buf_offset += written;
611 K_SPINLOCK_BREAK;
612 }
613 data->tx_buf = NULL;
614 data->tx_buf_len = 0;
615 data->tx_buf_offset = 0;
616 }
617
618 if (!tx_buf) {
619 return;
620 }
621
622 if (config->loopback && written) {
623 uint32_t loop_written = uart_emul_put_rx_data(dev, &tx_buf[tx_buf_offset], written);
624
625 if (loop_written < written) {
626 LOG_WRN("Lost %" PRIu32 " bytes on loopback", written - loop_written);
627 }
628 }
629
630 uart_emul_tx_data_ready(dev);
631
632 if ((config->loopback && written) || !written) {
633 /* When using the loopback fixture, just allow to drop all bytes in the ring buffer
634 * not consumed by tx_data_ready_cb().
635 */
636
637 uint32_t flushed = uart_emul_flush_tx_data(dev);
638
639 if (flushed) {
640 if (written) {
641 LOG_DBG("Flushed %" PRIu32 " unused bytes from tx buffer", flushed);
642 } else {
643 LOG_WRN("Flushed %" PRIu32
644 " unused bytes from tx buffer to break out of infinite "
645 "loop! Consume or flush the bytes from the tx ring buffer "
646 "in your test case to prevent this!",
647 flushed);
648 }
649 }
650 }
651
652 if (!tx_done) {
653 /* We are not done yet, yield back into workqueue.
654 *
655 * This would basically be an infinite loop when tx_data_ready_cb() does not consume
656 * the bytes in the tx ring buffer.
657 */
658 k_work_submit_to_queue(&uart_emul_work_q, &data->tx_work);
659 return;
660 }
661
662 struct uart_event tx_done_event = {
663 .type = UART_TX_DONE,
664 .data.tx = {
665 .buf = tx_buf,
666 .len = tx_buf_len,
667 },
668 };
669
670 uart_emul_post_event(dev, &tx_done_event);
671 }
672
uart_emul_rx_stop(const struct device * dev,struct uart_emul_data * data)673 static void uart_emul_rx_stop(const struct device *dev, struct uart_emul_data *data)
674 {
675 uint8_t *rx_buf = NULL;
676 size_t rx_buf_offset = 0;
677 size_t rx_buf_data_len = 0;
678
679 k_work_cancel_delayable(&data->rx_timeout_work);
680
681 K_SPINLOCK(&data->rx_lock) {
682 if (!data->rx_async_en) {
683 K_SPINLOCK_BREAK;
684 }
685 rx_buf = data->rx_buf;
686 rx_buf_offset = data->rx_buf_offset;
687 rx_buf_data_len = data->rx_buf_data_len;
688
689 data->rx_buf = NULL;
690 data->rx_buf_len = 0;
691 data->rx_buf_offset = 0;
692 data->rx_buf_data_len = 0;
693 data->rx_buf_next = NULL;
694 data->rx_buf_next_len = 0;
695 data->rx_async_en = false;
696 data->rx_stopping = false;
697 }
698
699 if (rx_buf == NULL) {
700 return;
701 }
702
703 if (rx_buf_data_len > 0) {
704 struct uart_event rx_rdy_event = {
705 .type = UART_RX_RDY,
706 .data.rx = {
707 .buf = rx_buf,
708 .offset = rx_buf_offset,
709 .len = rx_buf_data_len,
710 },
711 };
712
713 uart_emul_post_event(dev, &rx_rdy_event);
714 }
715
716 struct uart_event rx_buf_released_event = {
717 .type = UART_RX_BUF_RELEASED,
718 .data.rx_buf.buf = rx_buf,
719 };
720
721 uart_emul_post_event(dev, &rx_buf_released_event);
722 uart_emul_simple_event(dev, UART_RX_DISABLED);
723 }
724
uart_emul_async_rx_disable_handler(struct k_work * work)725 static void uart_emul_async_rx_disable_handler(struct k_work *work)
726 {
727 struct uart_emul_data *data = CONTAINER_OF(work, struct uart_emul_data, rx_disable_work);
728 const struct device *dev = data->dev;
729
730 uart_emul_rx_stop(dev, data);
731 }
732
uart_emul_callback_set(const struct device * dev,uart_callback_t callback,void * user_data)733 static int uart_emul_callback_set(const struct device *dev, uart_callback_t callback,
734 void *user_data)
735 {
736 struct uart_emul_data *data = dev->data;
737
738 data->uart_callback = callback;
739 data->callback_user_data = user_data;
740
741 return 0;
742 }
743
uart_emul_tx(const struct device * dev,const uint8_t * buf,size_t len,int32_t timeout)744 static int uart_emul_tx(const struct device *dev, const uint8_t *buf, size_t len, int32_t timeout)
745 {
746 struct uart_emul_data *data = dev->data;
747 int ret = 0;
748
749 K_SPINLOCK(&data->tx_lock) {
750 if (data->tx_buf) {
751 ret = -EBUSY;
752 K_SPINLOCK_BREAK;
753 }
754
755 data->tx_buf = buf;
756 data->tx_buf_len = len;
757 data->tx_buf_offset = 0;
758
759 k_work_submit_to_queue(&uart_emul_work_q, &data->tx_work);
760 }
761
762 return ret;
763 }
764
uart_emul_tx_abort(const struct device * dev)765 static int uart_emul_tx_abort(const struct device *dev)
766 {
767 struct uart_emul_data *data = dev->data;
768 const uint8_t *tx_buf = NULL;
769 size_t tx_buf_sent;
770
771 K_SPINLOCK(&data->tx_lock) {
772 tx_buf = data->tx_buf;
773 tx_buf_sent = data->tx_buf_offset;
774
775 data->tx_buf = NULL;
776 data->tx_buf_len = 0;
777 data->tx_buf_offset = 0;
778
779 k_work_cancel(&data->tx_work);
780 }
781
782 if (!tx_buf) {
783 return -EFAULT;
784 }
785
786 struct uart_event tx_aborted_event = {
787 .type = UART_TX_ABORTED,
788 .data.tx = {
789 .buf = tx_buf,
790 .len = tx_buf_sent,
791 },
792 };
793
794 uart_emul_post_event(dev, &tx_aborted_event);
795
796 return 0;
797 }
798
uart_emul_rx_buf_rsp(const struct device * dev,uint8_t * buf,size_t len)799 static int uart_emul_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len)
800 {
801 struct uart_emul_data *data = dev->data;
802 int ret = 0;
803
804 K_SPINLOCK(&data->rx_lock) {
805 if (!data->rx_async_en) {
806 ret = -EACCES;
807 K_SPINLOCK_BREAK;
808 }
809
810 if (data->rx_buf_next != NULL) {
811 ret = -EBUSY;
812 K_SPINLOCK_BREAK;
813 }
814
815 data->rx_buf_next = buf;
816 data->rx_buf_next_len = len;
817 }
818
819 return ret;
820 }
821
uart_emul_rx_enable(const struct device * dev,uint8_t * buf,size_t len,int32_t timeout)822 static int uart_emul_rx_enable(const struct device *dev, uint8_t *buf, size_t len, int32_t timeout)
823 {
824 struct uart_emul_data *data = dev->data;
825 int ret = 0;
826 bool rx_stopping;
827
828 K_SPINLOCK(&data->rx_lock) {
829 rx_stopping = data->rx_stopping;
830 k_work_cancel(&data->rx_disable_work);
831 }
832
833 if (rx_stopping) {
834 uart_emul_rx_stop(dev, data);
835 }
836
837 K_SPINLOCK(&data->rx_lock) {
838 if (data->rx_async_en) {
839 ret = -EBUSY;
840 K_SPINLOCK_BREAK;
841 }
842
843 data->rx_async_en = true;
844 data->rx_buf = buf;
845 data->rx_buf_len = len;
846 data->rx_buf_timeout = timeout;
847 data->rx_buf_offset = 0;
848 data->rx_buf_data_len = 0;
849 data->rx_buf_next = NULL;
850 data->rx_buf_next_len = 0;
851
852 if (!ring_buf_is_empty(data->rx_rb)) {
853 (void)k_work_submit_to_queue(&uart_emul_work_q, &data->rx_work);
854 }
855 }
856
857 return ret;
858 }
859
uart_emul_rx_disable(const struct device * dev)860 static int uart_emul_rx_disable(const struct device *dev)
861 {
862 struct uart_emul_data *data = dev->data;
863 int ret = 0;
864
865 K_SPINLOCK(&data->rx_lock) {
866 if (!data->rx_async_en) {
867 ret = -EFAULT;
868 K_SPINLOCK_BREAK;
869 }
870 data->rx_stopping = true;
871 k_work_submit_to_queue(&uart_emul_work_q, &data->rx_disable_work);
872 }
873
874 return ret;
875 }
876 #endif /* CONFIG_UART_ASYNC_API */
877
878 static DEVICE_API(uart, uart_emul_api) = {
879 .poll_in = uart_emul_poll_in,
880 .poll_out = uart_emul_poll_out,
881 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
882 .config_get = uart_emul_config_get,
883 .configure = uart_emul_configure,
884 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
885 .err_check = uart_emul_err_check,
886 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
887 .fifo_fill = uart_emul_fifo_fill,
888 .fifo_read = uart_emul_fifo_read,
889 .irq_tx_enable = uart_emul_irq_tx_enable,
890 .irq_rx_enable = uart_emul_irq_rx_enable,
891 .irq_tx_disable = uart_emul_irq_tx_disable,
892 .irq_rx_disable = uart_emul_irq_rx_disable,
893 .irq_tx_ready = uart_emul_irq_tx_ready,
894 .irq_rx_ready = uart_emul_irq_rx_ready,
895 .irq_tx_complete = uart_emul_irq_tx_complete,
896 .irq_callback_set = uart_emul_irq_callback_set,
897 .irq_update = uart_emul_irq_update,
898 .irq_is_pending = uart_emul_irq_is_pending,
899 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
900 #ifdef CONFIG_UART_ASYNC_API
901 .callback_set = uart_emul_callback_set,
902 .tx = uart_emul_tx,
903 .tx_abort = uart_emul_tx_abort,
904 .rx_enable = uart_emul_rx_enable,
905 .rx_buf_rsp = uart_emul_rx_buf_rsp,
906 .rx_disable = uart_emul_rx_disable,
907 #endif /* CONFIG_UART_ASYNC_API */
908 };
909
uart_emul_callback_tx_data_ready_set(const struct device * dev,uart_emul_callback_tx_data_ready_t cb,void * user_data)910 void uart_emul_callback_tx_data_ready_set(const struct device *dev,
911 uart_emul_callback_tx_data_ready_t cb, void *user_data)
912 {
913 struct uart_emul_data *drv_data = dev->data;
914
915 drv_data->tx_data_ready_cb = cb;
916 drv_data->user_data = user_data;
917 }
918
uart_emul_put_rx_data(const struct device * dev,const uint8_t * data,size_t size)919 uint32_t uart_emul_put_rx_data(const struct device *dev, const uint8_t *data, size_t size)
920 {
921 struct uart_emul_data *drv_data = dev->data;
922 uint32_t count;
923 __unused bool empty;
924 __unused bool irq_en;
925 __unused bool rx_en;
926
927 K_SPINLOCK(&drv_data->rx_lock) {
928 count = ring_buf_put(drv_data->rx_rb, data, size);
929 empty = ring_buf_is_empty(drv_data->rx_rb);
930 IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, (irq_en = drv_data->rx_irq_en;));
931 IF_ENABLED(CONFIG_UART_ASYNC_API, (rx_en = drv_data->rx_async_en;));
932 }
933
934 if (count < size) {
935 uart_emul_set_errors(dev, UART_ERROR_OVERRUN);
936 }
937
938 IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, (
939 if (count > 0 && irq_en && !empty) {
940 (void)k_work_submit_to_queue(&uart_emul_work_q, &drv_data->irq_work);
941 }
942 ))
943 IF_ENABLED(CONFIG_UART_ASYNC_API, (
944 if (count > 0 && rx_en && !empty) {
945 (void)k_work_submit_to_queue(&uart_emul_work_q, &drv_data->rx_work);
946 }
947 ))
948
949 return count;
950 }
951
uart_emul_get_tx_data(const struct device * dev,uint8_t * data,size_t size)952 uint32_t uart_emul_get_tx_data(const struct device *dev, uint8_t *data, size_t size)
953 {
954 struct uart_emul_data *drv_data = dev->data;
955 k_spinlock_key_t key;
956 uint32_t count;
957
958 key = k_spin_lock(&drv_data->tx_lock);
959 count = ring_buf_get(drv_data->tx_rb, data, size);
960 k_spin_unlock(&drv_data->tx_lock, key);
961 return count;
962 }
963
uart_emul_flush_rx_data(const struct device * dev)964 uint32_t uart_emul_flush_rx_data(const struct device *dev)
965 {
966 struct uart_emul_data *drv_data = dev->data;
967 k_spinlock_key_t key;
968 uint32_t count;
969
970 key = k_spin_lock(&drv_data->rx_lock);
971 count = ring_buf_size_get(drv_data->rx_rb);
972 ring_buf_reset(drv_data->rx_rb);
973 k_spin_unlock(&drv_data->rx_lock, key);
974 return count;
975 }
976
uart_emul_flush_tx_data(const struct device * dev)977 uint32_t uart_emul_flush_tx_data(const struct device *dev)
978 {
979 struct uart_emul_data *drv_data = dev->data;
980 k_spinlock_key_t key;
981 uint32_t count;
982
983 key = k_spin_lock(&drv_data->tx_lock);
984 count = ring_buf_size_get(drv_data->tx_rb);
985 ring_buf_reset(drv_data->tx_rb);
986 k_spin_unlock(&drv_data->tx_lock, key);
987 return count;
988 }
989
uart_emul_set_errors(const struct device * dev,int errors)990 void uart_emul_set_errors(const struct device *dev, int errors)
991 {
992 struct uart_emul_data *drv_data = dev->data;
993
994 drv_data->errors |= errors;
995 }
996
uart_emul_set_release_buffer_on_timeout(const struct device * dev,bool release_on_timeout)997 void uart_emul_set_release_buffer_on_timeout(const struct device *dev, bool release_on_timeout)
998 {
999 __unused struct uart_emul_data *drv_data = dev->data;
1000
1001 IF_ENABLED(CONFIG_UART_ASYNC_API, (drv_data->rx_release_on_timeout = release_on_timeout;));
1002 }
1003
uart_emul_register(const struct device * dev,struct uart_emul * emul)1004 int uart_emul_register(const struct device *dev, struct uart_emul *emul)
1005 {
1006 struct uart_emul_data *data = dev->data;
1007
1008 sys_slist_append(&data->emuls, &emul->node);
1009
1010 return 0;
1011 }
1012
1013 #define UART_EMUL_RX_FIFO_SIZE(inst) (DT_INST_PROP(inst, rx_fifo_size))
1014 #define UART_EMUL_TX_FIFO_SIZE(inst) (DT_INST_PROP(inst, tx_fifo_size))
1015
1016 #define EMUL_LINK_AND_COMMA(node_id) \
1017 { \
1018 .dev = DEVICE_DT_GET(node_id), \
1019 },
1020
1021 #define DEFINE_UART_EMUL(inst) \
1022 static const struct emul_link_for_bus emuls_##inst[] = { \
1023 DT_FOREACH_CHILD_STATUS_OKAY(DT_DRV_INST(inst), EMUL_LINK_AND_COMMA)}; \
1024 \
1025 RING_BUF_DECLARE(uart_emul_##inst##_rx_rb, UART_EMUL_RX_FIFO_SIZE(inst)); \
1026 RING_BUF_DECLARE(uart_emul_##inst##_tx_rb, UART_EMUL_TX_FIFO_SIZE(inst)); \
1027 \
1028 static const struct uart_emul_config uart_emul_cfg_##inst = { \
1029 .loopback = DT_INST_PROP(inst, loopback), \
1030 .latch_buffer_size = DT_INST_PROP(inst, latch_buffer_size), \
1031 .emul_list = { \
1032 .children = emuls_##inst, \
1033 .num_children = ARRAY_SIZE(emuls_##inst), \
1034 }, \
1035 }; \
1036 static struct uart_emul_data uart_emul_data_##inst = { \
1037 .emuls = SYS_SLIST_STATIC_INIT(&_CONCAT(uart_emul_data_, inst).emuls), \
1038 .dev = DEVICE_DT_INST_GET(inst), \
1039 .rx_rb = &uart_emul_##inst##_rx_rb, \
1040 .tx_rb = &uart_emul_##inst##_tx_rb, \
1041 IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, \
1042 (.irq_work = Z_WORK_INITIALIZER(uart_emul_irq_handler),)) \
1043 IF_ENABLED(CONFIG_UART_ASYNC_API, \
1044 (.tx_work = Z_WORK_INITIALIZER(uart_emul_async_tx_handler), \
1045 .rx_timeout_work = Z_WORK_DELAYABLE_INITIALIZER( \
1046 uart_emul_async_rx_timeout_handler), \
1047 .rx_work = Z_WORK_INITIALIZER(uart_emul_async_rx_handler), \
1048 .rx_disable_work = Z_WORK_INITIALIZER( \
1049 uart_emul_async_rx_disable_handler),)) \
1050 }; \
1051 \
1052 static int uart_emul_post_init_##inst(void) \
1053 { \
1054 return emul_init_for_bus(DEVICE_DT_INST_GET(inst)); \
1055 } \
1056 SYS_INIT(uart_emul_post_init_##inst, POST_KERNEL, CONFIG_UART_EMUL_DEVICE_INIT_PRIORITY); \
1057 \
1058 DEVICE_DT_INST_DEFINE(inst, NULL, NULL, &uart_emul_data_##inst, &uart_emul_cfg_##inst, \
1059 PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &uart_emul_api);
1060
1061 DT_INST_FOREACH_STATUS_OKAY(DEFINE_UART_EMUL)
1062