1 /*
2 * Copyright (c) 2023 Fabian Blatz
3 * Copyright (c) 2024 grandcentrix GmbH
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #define DT_DRV_COMPAT zephyr_uart_emul
9
10 #include <errno.h>
11
12 #include <zephyr/drivers/emul.h>
13 #include <zephyr/drivers/uart.h>
14 #include <zephyr/drivers/serial/uart_emul.h>
15 #include <zephyr/kernel.h>
16 #include <zephyr/logging/log.h>
17 #include <zephyr/sys/ring_buffer.h>
18 #include <zephyr/sys/util.h>
19
20 LOG_MODULE_REGISTER(uart_emul, CONFIG_UART_LOG_LEVEL);
21
22 struct uart_emul_config {
23 /* emul_list has to be the first member */
24 struct emul_list_for_bus emul_list;
25
26 bool loopback;
27 size_t latch_buffer_size;
28 };
29
30 BUILD_ASSERT(offsetof(struct uart_emul_config, emul_list) == 0);
31
32 /* Device run time data */
33 struct uart_emul_data {
34 /* List of struct uart_emul associated with the device */
35 sys_slist_t emuls;
36
37 const struct device *dev;
38
39 struct uart_config cfg;
40 int errors;
41
42 struct ring_buf *rx_rb;
43 struct k_spinlock rx_lock;
44
45 uart_emul_callback_tx_data_ready_t tx_data_ready_cb;
46 void *user_data;
47
48 struct ring_buf *tx_rb;
49 struct k_spinlock tx_lock;
50
51 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
52 bool rx_irq_en;
53 bool tx_irq_en;
54 struct k_work irq_work;
55
56 uart_irq_callback_user_data_t irq_cb;
57 void *irq_cb_udata;
58 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
59
60 #ifdef CONFIG_UART_ASYNC_API
61 bool rx_async_en;
62 bool rx_stopping;
63 bool rx_release_on_timeout;
64
65 struct k_work tx_work;
66 struct k_work rx_work;
67 struct k_work rx_disable_work;
68 struct k_work_delayable rx_timeout_work;
69
70 uart_callback_t uart_callback;
71 void *callback_user_data;
72
73 const uint8_t *tx_buf;
74 size_t tx_buf_len;
75 size_t tx_buf_offset;
76
77 uint8_t *rx_buf;
78 size_t rx_buf_len;
79 size_t rx_buf_offset;
80 size_t rx_buf_data_len;
81 int32_t rx_buf_timeout;
82
83 uint8_t *rx_buf_next;
84 size_t rx_buf_next_len;
85 #endif /* CONFIG_UART_ASYNC_API */
86 };
87
88 /*
89 * Define local thread to emulate different thread priorities.
90 *
91 * A UART driver may call back from within a thread with higher or lower priority
92 * than the thread calling the UART API. This can hide potential concurrency issues,
93 * especially if the thread priorities are the same, or even using the same thread
94 * in case the system work queue.
95 */
96 K_THREAD_STACK_DEFINE(uart_emul_stack_area, CONFIG_UART_EMUL_WORK_Q_STACK_SIZE);
97 struct k_work_q uart_emul_work_q;
98
uart_emul_init_work_q(void)99 int uart_emul_init_work_q(void)
100 {
101 k_work_queue_init(&uart_emul_work_q);
102 k_work_queue_start(&uart_emul_work_q, uart_emul_stack_area,
103 K_THREAD_STACK_SIZEOF(uart_emul_stack_area),
104 CONFIG_UART_EMUL_WORK_Q_PRIORITY, NULL);
105 return 0;
106 }
107
108 SYS_INIT(uart_emul_init_work_q, POST_KERNEL, 0);
109
uart_emul_tx_data_ready(const struct device * dev)110 static void uart_emul_tx_data_ready(const struct device *dev)
111 {
112 struct uart_emul_data *data = dev->data;
113 sys_snode_t *node;
114
115 if (data->tx_data_ready_cb) {
116 (data->tx_data_ready_cb)(dev, ring_buf_size_get(data->tx_rb), data->user_data);
117 }
118 SYS_SLIST_FOR_EACH_NODE(&data->emuls, node) {
119 struct uart_emul *emul = CONTAINER_OF(node, struct uart_emul, node);
120
121 __ASSERT_NO_MSG(emul->api != NULL);
122 __ASSERT_NO_MSG(emul->api->tx_data_ready != NULL);
123
124 emul->api->tx_data_ready(dev, ring_buf_size_get(data->tx_rb), emul->target);
125 }
126 }
127
uart_emul_poll_in(const struct device * dev,unsigned char * p_char)128 static int uart_emul_poll_in(const struct device *dev, unsigned char *p_char)
129 {
130 struct uart_emul_data *drv_data = dev->data;
131 k_spinlock_key_t key;
132 uint32_t read;
133
134 key = k_spin_lock(&drv_data->rx_lock);
135 read = ring_buf_get(drv_data->rx_rb, p_char, 1);
136 k_spin_unlock(&drv_data->rx_lock, key);
137
138 if (!read) {
139 LOG_DBG("Rx buffer is empty");
140 return -1;
141 }
142
143 return 0;
144 }
145
uart_emul_poll_out(const struct device * dev,unsigned char out_char)146 static void uart_emul_poll_out(const struct device *dev, unsigned char out_char)
147 {
148 struct uart_emul_data *drv_data = dev->data;
149 const struct uart_emul_config *drv_cfg = dev->config;
150 k_spinlock_key_t key;
151 uint32_t written;
152
153 key = k_spin_lock(&drv_data->tx_lock);
154 written = ring_buf_put(drv_data->tx_rb, &out_char, 1);
155 k_spin_unlock(&drv_data->tx_lock, key);
156
157 if (!written) {
158 LOG_DBG("Tx buffer is full");
159 return;
160 }
161
162 if (drv_cfg->loopback) {
163 uart_emul_put_rx_data(dev, &out_char, 1);
164 }
165
166 uart_emul_tx_data_ready(dev);
167 }
168
uart_emul_err_check(const struct device * dev)169 static int uart_emul_err_check(const struct device *dev)
170 {
171 struct uart_emul_data *drv_data = dev->data;
172 int errors = drv_data->errors;
173
174 drv_data->errors = 0;
175 return errors;
176 }
177
178 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
uart_emul_configure(const struct device * dev,const struct uart_config * cfg)179 static int uart_emul_configure(const struct device *dev, const struct uart_config *cfg)
180 {
181 struct uart_emul_data *drv_data = dev->data;
182
183 memcpy(&drv_data->cfg, cfg, sizeof(struct uart_config));
184 return 0;
185 }
186
uart_emul_config_get(const struct device * dev,struct uart_config * cfg)187 static int uart_emul_config_get(const struct device *dev, struct uart_config *cfg)
188 {
189 const struct uart_emul_data *drv_data = dev->data;
190
191 memcpy(cfg, &drv_data->cfg, sizeof(struct uart_config));
192 return 0;
193 }
194 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
195
196 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
uart_emul_fifo_fill(const struct device * dev,const uint8_t * tx_data,int size)197 static int uart_emul_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size)
198 {
199 int ret;
200 struct uart_emul_data *data = dev->data;
201 const struct uart_emul_config *config = dev->config;
202 uint32_t put_size = MIN(config->latch_buffer_size, size);
203
204 K_SPINLOCK(&data->tx_lock) {
205 ret = ring_buf_put(data->tx_rb, tx_data, put_size);
206 }
207
208 if (config->loopback) {
209 uart_emul_put_rx_data(dev, (uint8_t *)tx_data, put_size);
210 }
211
212 uart_emul_tx_data_ready(dev);
213
214 return ret;
215 }
216
uart_emul_fifo_read(const struct device * dev,uint8_t * rx_data,int size)217 static int uart_emul_fifo_read(const struct device *dev, uint8_t *rx_data, int size)
218 {
219 struct uart_emul_data *data = dev->data;
220 const struct uart_emul_config *config = dev->config;
221 uint32_t bytes_to_read;
222
223 K_SPINLOCK(&data->rx_lock) {
224 bytes_to_read = MIN(config->latch_buffer_size, ring_buf_size_get(data->rx_rb));
225 bytes_to_read = MIN(bytes_to_read, size);
226 ring_buf_get(data->rx_rb, rx_data, bytes_to_read);
227 }
228
229 return bytes_to_read;
230 }
231
uart_emul_irq_tx_ready(const struct device * dev)232 static int uart_emul_irq_tx_ready(const struct device *dev)
233 {
234 int available = 0;
235 struct uart_emul_data *data = dev->data;
236
237 K_SPINLOCK(&data->tx_lock) {
238 if (!data->tx_irq_en) {
239 K_SPINLOCK_BREAK;
240 }
241
242 available = ring_buf_space_get(data->tx_rb);
243 }
244
245 return available;
246 }
247
uart_emul_irq_rx_ready(const struct device * dev)248 static int uart_emul_irq_rx_ready(const struct device *dev)
249 {
250 bool ready = false;
251 struct uart_emul_data *data = dev->data;
252
253 K_SPINLOCK(&data->rx_lock) {
254 if (!data->rx_irq_en) {
255 K_SPINLOCK_BREAK;
256 }
257
258 ready = !ring_buf_is_empty(data->rx_rb);
259 }
260
261 return ready;
262 }
263
uart_emul_irq_handler(struct k_work * work)264 static void uart_emul_irq_handler(struct k_work *work)
265 {
266 struct uart_emul_data *data = CONTAINER_OF(work, struct uart_emul_data, irq_work);
267 const struct device *dev = data->dev;
268 uart_irq_callback_user_data_t cb = data->irq_cb;
269 void *udata = data->irq_cb_udata;
270
271 if (cb == NULL) {
272 LOG_DBG("No IRQ callback configured for uart_emul device %p", dev);
273 return;
274 }
275
276 while (true) {
277 bool have_work = false;
278
279 K_SPINLOCK(&data->tx_lock) {
280 if (!data->tx_irq_en) {
281 K_SPINLOCK_BREAK;
282 }
283
284 have_work = have_work || ring_buf_space_get(data->tx_rb) > 0;
285 }
286
287 K_SPINLOCK(&data->rx_lock) {
288 if (!data->rx_irq_en) {
289 K_SPINLOCK_BREAK;
290 }
291
292 have_work = have_work || !ring_buf_is_empty(data->rx_rb);
293 }
294
295 if (!have_work) {
296 break;
297 }
298
299 cb(dev, udata);
300 }
301 }
302
uart_emul_irq_is_pending(const struct device * dev)303 static int uart_emul_irq_is_pending(const struct device *dev)
304 {
305 return uart_emul_irq_tx_ready(dev) || uart_emul_irq_rx_ready(dev);
306 }
307
uart_emul_irq_tx_enable(const struct device * dev)308 static void uart_emul_irq_tx_enable(const struct device *dev)
309 {
310 bool submit_irq_work;
311 struct uart_emul_data *const data = dev->data;
312
313 K_SPINLOCK(&data->tx_lock) {
314 data->tx_irq_en = true;
315 submit_irq_work = ring_buf_space_get(data->tx_rb) > 0;
316 }
317
318 if (submit_irq_work) {
319 (void)k_work_submit_to_queue(&uart_emul_work_q, &data->irq_work);
320 }
321 }
322
uart_emul_irq_rx_enable(const struct device * dev)323 static void uart_emul_irq_rx_enable(const struct device *dev)
324 {
325 bool submit_irq_work;
326 struct uart_emul_data *const data = dev->data;
327
328 K_SPINLOCK(&data->rx_lock) {
329 data->rx_irq_en = true;
330 submit_irq_work = !ring_buf_is_empty(data->rx_rb);
331 }
332
333 if (submit_irq_work) {
334 (void)k_work_submit_to_queue(&uart_emul_work_q, &data->irq_work);
335 }
336 }
337
uart_emul_irq_tx_disable(const struct device * dev)338 static void uart_emul_irq_tx_disable(const struct device *dev)
339 {
340 struct uart_emul_data *const data = dev->data;
341
342 K_SPINLOCK(&data->tx_lock) {
343 data->tx_irq_en = false;
344 }
345 }
346
uart_emul_irq_rx_disable(const struct device * dev)347 static void uart_emul_irq_rx_disable(const struct device *dev)
348 {
349 struct uart_emul_data *const data = dev->data;
350
351 K_SPINLOCK(&data->rx_lock) {
352 data->rx_irq_en = false;
353 }
354 }
355
uart_emul_irq_tx_complete(const struct device * dev)356 static int uart_emul_irq_tx_complete(const struct device *dev)
357 {
358 bool tx_complete = false;
359 struct uart_emul_data *const data = dev->data;
360
361 K_SPINLOCK(&data->tx_lock) {
362 tx_complete = ring_buf_is_empty(data->tx_rb);
363 }
364
365 return tx_complete;
366 }
367
uart_emul_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * user_data)368 static void uart_emul_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb,
369 void *user_data)
370 {
371 struct uart_emul_data *const data = dev->data;
372
373 data->irq_cb = cb;
374 data->irq_cb_udata = user_data;
375 }
376
uart_emul_irq_update(const struct device * dev)377 static int uart_emul_irq_update(const struct device *dev)
378 {
379 return 1;
380 }
381 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
382
383 #ifdef CONFIG_UART_ASYNC_API
uart_emul_post_event(const struct device * dev,struct uart_event * evt)384 static void uart_emul_post_event(const struct device *dev, struct uart_event *evt)
385 {
386 struct uart_emul_data *data = dev->data;
387
388 if (!data->uart_callback) {
389 LOG_DBG("No async callback configured for uart_emul device %p", dev);
390 }
391
392 data->uart_callback(dev, evt, data->callback_user_data);
393 }
394
uart_emul_simple_event(const struct device * dev,enum uart_event_type type)395 static void uart_emul_simple_event(const struct device *dev, enum uart_event_type type)
396 {
397 uart_emul_post_event(dev, &(struct uart_event){.type = type});
398 }
399
uart_emul_async_switch_buf_nolock(struct uart_emul_data * data)400 static void uart_emul_async_switch_buf_nolock(struct uart_emul_data *data)
401 {
402 data->rx_buf = data->rx_buf_next;
403 data->rx_buf_len = data->rx_buf_next_len;
404 data->rx_buf_offset = 0;
405 data->rx_buf_data_len = 0;
406 data->rx_buf_next = NULL;
407 data->rx_buf_next_len = 0;
408 }
409
uart_emul_async_rx_timeout_handler(struct k_work * _work)410 static void uart_emul_async_rx_timeout_handler(struct k_work *_work)
411 {
412 struct k_work_delayable *work = k_work_delayable_from_work(_work);
413 struct uart_emul_data *data = CONTAINER_OF(work, struct uart_emul_data, rx_timeout_work);
414 const struct device *dev = data->dev;
415
416 uint8_t *rx_buf;
417 size_t rx_buf_len;
418 size_t rx_buf_offset;
419 size_t rx_buf_data_len;
420 bool rx_en;
421 bool rx_buf_released = false;
422 bool rx_stopped = false;
423
424 K_SPINLOCK(&data->rx_lock) {
425 rx_en = data->rx_async_en;
426 rx_buf = data->rx_buf;
427 rx_buf_len = data->rx_buf_len;
428 rx_buf_offset = data->rx_buf_offset;
429 rx_buf_data_len = data->rx_buf_data_len;
430
431 data->rx_buf_offset += rx_buf_data_len;
432 data->rx_buf_data_len = 0;
433
434 if (data->rx_buf_offset >= rx_buf_len ||
435 (rx_buf_data_len > 0 && data->rx_release_on_timeout)) {
436 rx_buf_released = true;
437 uart_emul_async_switch_buf_nolock(data);
438 if (data->rx_buf == NULL) {
439 /* There was no second buffer scheduled, so stop receiving */
440 rx_stopped = true;
441 data->rx_async_en = false;
442 }
443 }
444 }
445
446 if (!rx_en || rx_buf == NULL || rx_buf_data_len == 0) {
447 return;
448 }
449
450 struct uart_event rx_rdy_event = {
451 .type = UART_RX_RDY,
452 .data.rx = {
453 .buf = rx_buf,
454 .offset = rx_buf_offset,
455 .len = rx_buf_data_len,
456 },
457 };
458
459 uart_emul_post_event(dev, &rx_rdy_event);
460
461 if (rx_buf_released) {
462 struct uart_event rx_buf_released_event = {
463 .type = UART_RX_BUF_RELEASED,
464 .data.rx_buf.buf = rx_buf,
465 };
466
467 uart_emul_post_event(dev, &rx_buf_released_event);
468 }
469 if (rx_stopped) {
470 uart_emul_simple_event(dev, UART_RX_DISABLED);
471 }
472 }
473
uart_emul_async_rx_handler(struct k_work * work)474 static void uart_emul_async_rx_handler(struct k_work *work)
475 {
476 struct uart_emul_data *data = CONTAINER_OF(work, struct uart_emul_data, rx_work);
477 const struct device *dev = data->dev;
478
479 bool rx_en = false;
480 bool empty = true;
481
482 do {
483 bool rx_rdy = false;
484 bool buf_request = false;
485
486 uint8_t *rx_buf = NULL;
487 size_t buf_len;
488 size_t offset;
489 size_t data_len;
490
491 K_SPINLOCK(&data->rx_lock) {
492 rx_en = data->rx_async_en;
493 rx_buf = data->rx_buf;
494 buf_len = data->rx_buf_len;
495 offset = data->rx_buf_offset;
496 data_len = data->rx_buf_data_len;
497 empty = ring_buf_is_empty(data->rx_rb);
498
499 if (!rx_en) {
500 K_SPINLOCK_BREAK;
501 }
502
503 if (rx_buf == NULL) {
504 uart_emul_async_switch_buf_nolock(data);
505 rx_buf = data->rx_buf;
506 buf_len = data->rx_buf_len;
507 offset = data->rx_buf_offset;
508 data_len = data->rx_buf_data_len;
509 }
510
511 if (rx_buf == NULL) {
512 /* During the last iteration the buffer was released but the
513 * application did not provide a new buffer. Stop RX and quit now.
514 */
515 data->rx_async_en = false;
516 K_SPINLOCK_BREAK;
517 }
518
519 if (empty) {
520 K_SPINLOCK_BREAK;
521 }
522
523 buf_request = data_len == 0 && data->rx_buf_next == NULL;
524
525 uint32_t read = ring_buf_get(data->rx_rb, &rx_buf[offset + data_len],
526 buf_len - (offset + data_len));
527 data_len += read;
528 data->rx_buf_data_len = data_len;
529
530 if (offset + data_len >= data->rx_buf_len) {
531 rx_rdy = true;
532 data->rx_buf = NULL;
533 data->rx_buf_len = 0;
534 data->rx_buf_offset = 0;
535 data->rx_buf_data_len = 0;
536 }
537 }
538
539 if (!rx_en) {
540 break;
541 }
542
543 if (rx_buf == NULL) {
544 uart_emul_simple_event(dev, UART_RX_DISABLED);
545 break;
546 }
547
548 if (empty && data->rx_buf_timeout != SYS_FOREVER_US) {
549 (void)k_work_reschedule_for_queue(&uart_emul_work_q, &data->rx_timeout_work,
550 K_USEC(data->rx_buf_timeout));
551 }
552
553 if (buf_request) {
554 uart_emul_simple_event(dev, UART_RX_BUF_REQUEST);
555 }
556
557 if (rx_rdy) {
558 struct uart_event rx_rdy_event = {
559 .type = UART_RX_RDY,
560 .data.rx = {
561 .buf = rx_buf,
562 .offset = offset,
563 .len = data_len,
564 },
565 };
566
567 uart_emul_post_event(dev, &rx_rdy_event);
568
569 struct uart_event rx_buf_released_event = {
570 .type = UART_RX_BUF_RELEASED,
571 .data.rx_buf.buf = rx_buf,
572 };
573
574 uart_emul_post_event(dev, &rx_buf_released_event);
575 }
576 } while (rx_en && !empty);
577 }
578
uart_emul_async_tx_handler(struct k_work * work)579 static void uart_emul_async_tx_handler(struct k_work *work)
580 {
581 struct uart_emul_data *data = CONTAINER_OF(work, struct uart_emul_data, tx_work);
582 const struct device *dev = data->dev;
583 const struct uart_emul_config *config = dev->config;
584
585 uint32_t written;
586
587 const uint8_t *tx_buf = NULL;
588 size_t tx_buf_len = 0;
589 size_t tx_buf_offset = 0;
590 bool tx_done = true;
591
592 K_SPINLOCK(&data->tx_lock) {
593 tx_buf = data->tx_buf;
594 tx_buf_len = data->tx_buf_len;
595 tx_buf_offset = data->tx_buf_offset;
596
597 if (!tx_buf) {
598 K_SPINLOCK_BREAK;
599 }
600
601 written = ring_buf_put(data->tx_rb, &data->tx_buf[tx_buf_offset],
602 tx_buf_len - tx_buf_offset);
603 tx_done = written == (tx_buf_len - tx_buf_offset);
604 if (!tx_done) {
605 data->tx_buf_offset += written;
606 K_SPINLOCK_BREAK;
607 }
608 data->tx_buf = NULL;
609 data->tx_buf_len = 0;
610 data->tx_buf_offset = 0;
611 }
612
613 if (!tx_buf) {
614 return;
615 }
616
617 if (config->loopback && written) {
618 uint32_t loop_written = uart_emul_put_rx_data(dev, &tx_buf[tx_buf_offset], written);
619
620 if (loop_written < written) {
621 LOG_WRN("Lost %" PRIu32 " bytes on loopback", written - loop_written);
622 }
623 }
624
625 uart_emul_tx_data_ready(dev);
626
627 if ((config->loopback && written) || !written) {
628 /* When using the loopback fixture, just allow to drop all bytes in the ring buffer
629 * not consumed by tx_data_ready_cb().
630 */
631
632 uint32_t flushed = uart_emul_flush_tx_data(dev);
633
634 if (flushed) {
635 if (written) {
636 LOG_DBG("Flushed %" PRIu32 " unused bytes from tx buffer", flushed);
637 } else {
638 LOG_WRN("Flushed %" PRIu32
639 " unused bytes from tx buffer to break out of infinite "
640 "loop! Consume or flush the bytes from the tx ring buffer "
641 "in your test case to prevent this!",
642 flushed);
643 }
644 }
645 }
646
647 if (!tx_done) {
648 /* We are not done yet, yield back into workqueue.
649 *
650 * This would basically be an infinite loop when tx_data_ready_cb() does not consume
651 * the bytes in the tx ring buffer.
652 */
653 k_work_submit_to_queue(&uart_emul_work_q, &data->tx_work);
654 return;
655 }
656
657 struct uart_event tx_done_event = {
658 .type = UART_TX_DONE,
659 .data.tx = {
660 .buf = tx_buf,
661 .len = tx_buf_len,
662 },
663 };
664
665 uart_emul_post_event(dev, &tx_done_event);
666 }
667
uart_emul_rx_stop(const struct device * dev,struct uart_emul_data * data)668 static void uart_emul_rx_stop(const struct device *dev, struct uart_emul_data *data)
669 {
670 uint8_t *rx_buf = NULL;
671 size_t rx_buf_offset = 0;
672 size_t rx_buf_data_len = 0;
673
674 k_work_cancel_delayable(&data->rx_timeout_work);
675
676 K_SPINLOCK(&data->rx_lock) {
677 if (!data->rx_async_en) {
678 K_SPINLOCK_BREAK;
679 }
680 rx_buf = data->rx_buf;
681 rx_buf_offset = data->rx_buf_offset;
682 rx_buf_data_len = data->rx_buf_data_len;
683
684 data->rx_buf = NULL;
685 data->rx_buf_len = 0;
686 data->rx_buf_offset = 0;
687 data->rx_buf_data_len = 0;
688 data->rx_buf_next = NULL;
689 data->rx_buf_next_len = 0;
690 data->rx_async_en = false;
691 data->rx_stopping = false;
692 }
693
694 if (rx_buf == NULL) {
695 return;
696 }
697
698 if (rx_buf_data_len > 0) {
699 struct uart_event rx_rdy_event = {
700 .type = UART_RX_RDY,
701 .data.rx = {
702 .buf = rx_buf,
703 .offset = rx_buf_offset,
704 .len = rx_buf_data_len,
705 },
706 };
707
708 uart_emul_post_event(dev, &rx_rdy_event);
709 }
710
711 struct uart_event rx_buf_released_event = {
712 .type = UART_RX_BUF_RELEASED,
713 .data.rx_buf.buf = rx_buf,
714 };
715
716 uart_emul_post_event(dev, &rx_buf_released_event);
717 uart_emul_simple_event(dev, UART_RX_DISABLED);
718 }
719
uart_emul_async_rx_disable_handler(struct k_work * work)720 static void uart_emul_async_rx_disable_handler(struct k_work *work)
721 {
722 struct uart_emul_data *data = CONTAINER_OF(work, struct uart_emul_data, rx_disable_work);
723 const struct device *dev = data->dev;
724
725 uart_emul_rx_stop(dev, data);
726 }
727
uart_emul_callback_set(const struct device * dev,uart_callback_t callback,void * user_data)728 static int uart_emul_callback_set(const struct device *dev, uart_callback_t callback,
729 void *user_data)
730 {
731 struct uart_emul_data *data = dev->data;
732
733 data->uart_callback = callback;
734 data->callback_user_data = user_data;
735
736 return 0;
737 }
738
uart_emul_tx(const struct device * dev,const uint8_t * buf,size_t len,int32_t timeout)739 static int uart_emul_tx(const struct device *dev, const uint8_t *buf, size_t len, int32_t timeout)
740 {
741 struct uart_emul_data *data = dev->data;
742 int ret = 0;
743
744 K_SPINLOCK(&data->tx_lock) {
745 if (data->tx_buf) {
746 ret = -EBUSY;
747 K_SPINLOCK_BREAK;
748 }
749
750 data->tx_buf = buf;
751 data->tx_buf_len = len;
752 data->tx_buf_offset = 0;
753
754 k_work_submit_to_queue(&uart_emul_work_q, &data->tx_work);
755 }
756
757 return ret;
758 }
759
uart_emul_tx_abort(const struct device * dev)760 static int uart_emul_tx_abort(const struct device *dev)
761 {
762 struct uart_emul_data *data = dev->data;
763 const uint8_t *tx_buf = NULL;
764 size_t tx_buf_sent;
765
766 K_SPINLOCK(&data->tx_lock) {
767 tx_buf = data->tx_buf;
768 tx_buf_sent = data->tx_buf_offset;
769
770 data->tx_buf = NULL;
771 data->tx_buf_len = 0;
772 data->tx_buf_offset = 0;
773
774 k_work_cancel(&data->tx_work);
775 }
776
777 if (!tx_buf) {
778 return -EFAULT;
779 }
780
781 struct uart_event tx_aborted_event = {
782 .type = UART_TX_ABORTED,
783 .data.tx = {
784 .buf = tx_buf,
785 .len = tx_buf_sent,
786 },
787 };
788
789 uart_emul_post_event(dev, &tx_aborted_event);
790
791 return 0;
792 }
793
uart_emul_rx_buf_rsp(const struct device * dev,uint8_t * buf,size_t len)794 static int uart_emul_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len)
795 {
796 struct uart_emul_data *data = dev->data;
797 int ret = 0;
798
799 K_SPINLOCK(&data->rx_lock) {
800 if (!data->rx_async_en) {
801 ret = -EACCES;
802 K_SPINLOCK_BREAK;
803 }
804
805 if (data->rx_buf_next != NULL) {
806 ret = -EBUSY;
807 K_SPINLOCK_BREAK;
808 }
809
810 data->rx_buf_next = buf;
811 data->rx_buf_next_len = len;
812 }
813
814 return ret;
815 }
816
uart_emul_rx_enable(const struct device * dev,uint8_t * buf,size_t len,int32_t timeout)817 static int uart_emul_rx_enable(const struct device *dev, uint8_t *buf, size_t len, int32_t timeout)
818 {
819 struct uart_emul_data *data = dev->data;
820 int ret = 0;
821 bool rx_stopping;
822
823 K_SPINLOCK(&data->rx_lock) {
824 rx_stopping = data->rx_stopping;
825 k_work_cancel(&data->rx_disable_work);
826 }
827
828 if (rx_stopping) {
829 uart_emul_rx_stop(dev, data);
830 }
831
832 K_SPINLOCK(&data->rx_lock) {
833 if (data->rx_async_en) {
834 ret = -EBUSY;
835 K_SPINLOCK_BREAK;
836 }
837
838 data->rx_async_en = true;
839 data->rx_buf = buf;
840 data->rx_buf_len = len;
841 data->rx_buf_timeout = timeout;
842 data->rx_buf_offset = 0;
843 data->rx_buf_data_len = 0;
844 data->rx_buf_next = NULL;
845 data->rx_buf_next_len = 0;
846
847 if (!ring_buf_is_empty(data->rx_rb)) {
848 (void)k_work_submit_to_queue(&uart_emul_work_q, &data->rx_work);
849 }
850 }
851
852 return ret;
853 }
854
uart_emul_rx_disable(const struct device * dev)855 static int uart_emul_rx_disable(const struct device *dev)
856 {
857 struct uart_emul_data *data = dev->data;
858 int ret = 0;
859
860 K_SPINLOCK(&data->rx_lock) {
861 if (!data->rx_async_en) {
862 ret = -EFAULT;
863 K_SPINLOCK_BREAK;
864 }
865 data->rx_stopping = true;
866 k_work_submit_to_queue(&uart_emul_work_q, &data->rx_disable_work);
867 }
868
869 return ret;
870 }
871 #endif /* CONFIG_UART_ASYNC_API */
872
873 static DEVICE_API(uart, uart_emul_api) = {
874 .poll_in = uart_emul_poll_in,
875 .poll_out = uart_emul_poll_out,
876 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
877 .config_get = uart_emul_config_get,
878 .configure = uart_emul_configure,
879 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
880 .err_check = uart_emul_err_check,
881 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
882 .fifo_fill = uart_emul_fifo_fill,
883 .fifo_read = uart_emul_fifo_read,
884 .irq_tx_enable = uart_emul_irq_tx_enable,
885 .irq_rx_enable = uart_emul_irq_rx_enable,
886 .irq_tx_disable = uart_emul_irq_tx_disable,
887 .irq_rx_disable = uart_emul_irq_rx_disable,
888 .irq_tx_ready = uart_emul_irq_tx_ready,
889 .irq_rx_ready = uart_emul_irq_rx_ready,
890 .irq_tx_complete = uart_emul_irq_tx_complete,
891 .irq_callback_set = uart_emul_irq_callback_set,
892 .irq_update = uart_emul_irq_update,
893 .irq_is_pending = uart_emul_irq_is_pending,
894 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
895 #ifdef CONFIG_UART_ASYNC_API
896 .callback_set = uart_emul_callback_set,
897 .tx = uart_emul_tx,
898 .tx_abort = uart_emul_tx_abort,
899 .rx_enable = uart_emul_rx_enable,
900 .rx_buf_rsp = uart_emul_rx_buf_rsp,
901 .rx_disable = uart_emul_rx_disable,
902 #endif /* CONFIG_UART_ASYNC_API */
903 };
904
uart_emul_callback_tx_data_ready_set(const struct device * dev,uart_emul_callback_tx_data_ready_t cb,void * user_data)905 void uart_emul_callback_tx_data_ready_set(const struct device *dev,
906 uart_emul_callback_tx_data_ready_t cb, void *user_data)
907 {
908 struct uart_emul_data *drv_data = dev->data;
909
910 drv_data->tx_data_ready_cb = cb;
911 drv_data->user_data = user_data;
912 }
913
uart_emul_put_rx_data(const struct device * dev,const uint8_t * data,size_t size)914 uint32_t uart_emul_put_rx_data(const struct device *dev, const uint8_t *data, size_t size)
915 {
916 struct uart_emul_data *drv_data = dev->data;
917 uint32_t count;
918 __unused bool empty;
919 __unused bool irq_en;
920 __unused bool rx_en;
921
922 K_SPINLOCK(&drv_data->rx_lock) {
923 count = ring_buf_put(drv_data->rx_rb, data, size);
924 empty = ring_buf_is_empty(drv_data->rx_rb);
925 IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, (irq_en = drv_data->rx_irq_en;));
926 IF_ENABLED(CONFIG_UART_ASYNC_API, (rx_en = drv_data->rx_async_en;));
927 }
928
929 if (count < size) {
930 uart_emul_set_errors(dev, UART_ERROR_OVERRUN);
931 }
932
933 IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, (
934 if (count > 0 && irq_en && !empty) {
935 (void)k_work_submit_to_queue(&uart_emul_work_q, &drv_data->irq_work);
936 }
937 ))
938 IF_ENABLED(CONFIG_UART_ASYNC_API, (
939 if (count > 0 && rx_en && !empty) {
940 (void)k_work_submit_to_queue(&uart_emul_work_q, &drv_data->rx_work);
941 }
942 ))
943
944 return count;
945 }
946
uart_emul_get_tx_data(const struct device * dev,uint8_t * data,size_t size)947 uint32_t uart_emul_get_tx_data(const struct device *dev, uint8_t *data, size_t size)
948 {
949 struct uart_emul_data *drv_data = dev->data;
950 k_spinlock_key_t key;
951 uint32_t count;
952
953 key = k_spin_lock(&drv_data->tx_lock);
954 count = ring_buf_get(drv_data->tx_rb, data, size);
955 k_spin_unlock(&drv_data->tx_lock, key);
956 return count;
957 }
958
uart_emul_flush_rx_data(const struct device * dev)959 uint32_t uart_emul_flush_rx_data(const struct device *dev)
960 {
961 struct uart_emul_data *drv_data = dev->data;
962 k_spinlock_key_t key;
963 uint32_t count;
964
965 key = k_spin_lock(&drv_data->rx_lock);
966 count = ring_buf_size_get(drv_data->rx_rb);
967 ring_buf_reset(drv_data->rx_rb);
968 k_spin_unlock(&drv_data->rx_lock, key);
969 return count;
970 }
971
uart_emul_flush_tx_data(const struct device * dev)972 uint32_t uart_emul_flush_tx_data(const struct device *dev)
973 {
974 struct uart_emul_data *drv_data = dev->data;
975 k_spinlock_key_t key;
976 uint32_t count;
977
978 key = k_spin_lock(&drv_data->tx_lock);
979 count = ring_buf_size_get(drv_data->tx_rb);
980 ring_buf_reset(drv_data->tx_rb);
981 k_spin_unlock(&drv_data->tx_lock, key);
982 return count;
983 }
984
uart_emul_set_errors(const struct device * dev,int errors)985 void uart_emul_set_errors(const struct device *dev, int errors)
986 {
987 struct uart_emul_data *drv_data = dev->data;
988
989 drv_data->errors |= errors;
990 }
991
uart_emul_set_release_buffer_on_timeout(const struct device * dev,bool release_on_timeout)992 void uart_emul_set_release_buffer_on_timeout(const struct device *dev, bool release_on_timeout)
993 {
994 __unused struct uart_emul_data *drv_data = dev->data;
995
996 IF_ENABLED(CONFIG_UART_ASYNC_API, (drv_data->rx_release_on_timeout = release_on_timeout;));
997 }
998
uart_emul_register(const struct device * dev,struct uart_emul * emul)999 int uart_emul_register(const struct device *dev, struct uart_emul *emul)
1000 {
1001 struct uart_emul_data *data = dev->data;
1002
1003 sys_slist_append(&data->emuls, &emul->node);
1004
1005 return 0;
1006 }
1007
1008 #define UART_EMUL_RX_FIFO_SIZE(inst) (DT_INST_PROP(inst, rx_fifo_size))
1009 #define UART_EMUL_TX_FIFO_SIZE(inst) (DT_INST_PROP(inst, tx_fifo_size))
1010
1011 #define EMUL_LINK_AND_COMMA(node_id) \
1012 { \
1013 .dev = DEVICE_DT_GET(node_id), \
1014 },
1015
1016 #define DEFINE_UART_EMUL(inst) \
1017 static const struct emul_link_for_bus emuls_##inst[] = { \
1018 DT_FOREACH_CHILD_STATUS_OKAY(DT_DRV_INST(inst), EMUL_LINK_AND_COMMA)}; \
1019 \
1020 RING_BUF_DECLARE(uart_emul_##inst##_rx_rb, UART_EMUL_RX_FIFO_SIZE(inst)); \
1021 RING_BUF_DECLARE(uart_emul_##inst##_tx_rb, UART_EMUL_TX_FIFO_SIZE(inst)); \
1022 \
1023 static const struct uart_emul_config uart_emul_cfg_##inst = { \
1024 .loopback = DT_INST_PROP(inst, loopback), \
1025 .latch_buffer_size = DT_INST_PROP(inst, latch_buffer_size), \
1026 .emul_list = { \
1027 .children = emuls_##inst, \
1028 .num_children = ARRAY_SIZE(emuls_##inst), \
1029 }, \
1030 }; \
1031 static struct uart_emul_data uart_emul_data_##inst = { \
1032 .emuls = SYS_SLIST_STATIC_INIT(&_CONCAT(uart_emul_data_, inst).emuls), \
1033 .dev = DEVICE_DT_INST_GET(inst), \
1034 .rx_rb = &uart_emul_##inst##_rx_rb, \
1035 .tx_rb = &uart_emul_##inst##_tx_rb, \
1036 IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, \
1037 (.irq_work = Z_WORK_INITIALIZER(uart_emul_irq_handler),)) \
1038 IF_ENABLED(CONFIG_UART_ASYNC_API, \
1039 (.tx_work = Z_WORK_INITIALIZER(uart_emul_async_tx_handler), \
1040 .rx_timeout_work = Z_WORK_DELAYABLE_INITIALIZER( \
1041 uart_emul_async_rx_timeout_handler), \
1042 .rx_work = Z_WORK_INITIALIZER(uart_emul_async_rx_handler), \
1043 .rx_disable_work = Z_WORK_INITIALIZER( \
1044 uart_emul_async_rx_disable_handler),)) \
1045 }; \
1046 \
1047 static int uart_emul_post_init_##inst(void) \
1048 { \
1049 return emul_init_for_bus(DEVICE_DT_INST_GET(inst)); \
1050 } \
1051 SYS_INIT(uart_emul_post_init_##inst, POST_KERNEL, CONFIG_UART_EMUL_DEVICE_INIT_PRIORITY); \
1052 \
1053 DEVICE_DT_INST_DEFINE(inst, NULL, NULL, &uart_emul_data_##inst, &uart_emul_cfg_##inst, \
1054 PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &uart_emul_api);
1055
1056 DT_INST_FOREACH_STATUS_OKAY(DEFINE_UART_EMUL)
1057