1 /*
2 * Copyright (c) 2018, Oticon A/S
3 * Copyright (c) 2025, Nordic Semiconductor ASA
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #define DT_DRV_COMPAT zephyr_native_pty_uart
9
10 #include <stdbool.h>
11 #include <zephyr/drivers/uart.h>
12 #include <zephyr/kernel.h>
13 #include <zephyr/sys/atomic.h>
14 #include <cmdline.h> /* native_sim command line options header */
15 #include <posix_native_task.h>
16 #include <nsi_host_trampolines.h>
17 #include <nsi_tracing.h>
18 #include "uart_native_pty_bottom.h"
19
20 #define ERROR posix_print_error_and_exit
21 #define WARN posix_print_warning
22
23 /*
24 * UART driver for native simulator based boards.
25 * It can support a configurable number of UARTs.
26 *
27 * One (and only one) of this can be connected to the process STDIN+STDOUT otherwise, they are
28 * connected to a dedicated pseudo terminal.
29 *
30 * Connecting to a dedicated PTY is the recommended option for interactive use, as the pseudo
31 * terminal driver will be configured in "raw" mode and will therefore behave more like a real UART.
32 *
33 * When connected to its own pseudo terminal, it may also auto attach a terminal emulator to it,
34 * if set so from command line.
35 */
36
37 struct native_pty_status {
38 int out_fd; /* File descriptor used for output */
39 int in_fd; /* File descriptor used for input */
40 bool on_stdinout; /* This UART is connected to a PTY and not STDIN/OUT */
41 bool stdin_disconnected;
42
43 bool auto_attach; /* For PTY, attach a terminal emulator automatically */
44 char *auto_attach_cmd; /* If auto_attach, which command to launch the terminal emulator */
45 bool wait_pts; /* Hold writes to the uart/pts until a client is connected/ready */
46 bool cmd_request_stdinout; /* User requested to connect this UART to the stdin/out */
47 #ifdef CONFIG_UART_ASYNC_API
48 struct {
49 const struct device *dev;
50 struct k_work_delayable tx_done;
51 uart_callback_t user_callback;
52 void *user_data;
53 const uint8_t *tx_buf;
54 size_t tx_len;
55 uint8_t *rx_buf;
56 size_t rx_len;
57 /* Instance-specific RX thread. */
58 struct k_thread rx_thread;
59 /* Stack for RX thread */
60 K_KERNEL_STACK_MEMBER(rx_stack, CONFIG_ARCH_POSIX_RECOMMENDED_STACK_SIZE);
61 } async;
62 #endif /* CONFIG_UART_ASYNC_API */
63 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
64 struct {
65 bool tx_enabled;
66 bool rx_enabled;
67 uart_irq_callback_user_data_t callback;
68 void *cb_data;
69 char char_store;
70 bool char_ready;
71 atomic_t thread_started;
72 /* Instance-specific IRQ emulation thread. */
73 struct k_thread poll_thread;
74 /* Stack for IRQ emulation thread */
75 K_KERNEL_STACK_MEMBER(poll_stack, CONFIG_ARCH_POSIX_RECOMMENDED_STACK_SIZE);
76 } irq;
77 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
78 };
79
80 static int np_uart_poll_out_n(struct native_pty_status *d, const unsigned char *buf, size_t len);
81 static void np_uart_poll_out(const struct device *dev, unsigned char out_char);
82 static int np_uart_poll_in(const struct device *dev, unsigned char *p_char);
83 static int np_uart_init(const struct device *dev);
84
85 #ifdef CONFIG_UART_ASYNC_API
86 static void np_uart_tx_done_work(struct k_work *work);
87 static int np_uart_callback_set(const struct device *dev, uart_callback_t callback,
88 void *user_data);
89 static int np_uart_tx(const struct device *dev, const uint8_t *buf, size_t len, int32_t timeout);
90 static int np_uart_tx_abort(const struct device *dev);
91 static int np_uart_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len);
92 static int np_uart_rx_enable(const struct device *dev, uint8_t *buf, size_t len, int32_t timeout);
93 static int np_uart_rx_disable(const struct device *dev);
94 #endif /* CONFIG_UART_ASYNC_API */
95
96 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
97 static int np_uart_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size);
98 static int np_uart_fifo_read(const struct device *dev, uint8_t *rx_data, const int size);
99 static void np_uart_irq_tx_enable(const struct device *dev);
100 static void np_uart_irq_tx_disable(const struct device *dev);
101 static int np_uart_irq_tx_ready(const struct device *dev);
102 static int np_uart_irq_tx_complete(const struct device *dev);
103 static void np_uart_irq_rx_enable(const struct device *dev);
104 static void np_uart_irq_rx_disable(const struct device *dev);
105 static int np_uart_irq_rx_ready(const struct device *dev);
106 static int np_uart_irq_is_pending(const struct device *dev);
107 static int np_uart_irq_update(const struct device *dev);
108 static void np_uart_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb,
109 void *cb_data);
110 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
111
112 static DEVICE_API(uart, np_uart_driver_api) = {
113 .poll_out = np_uart_poll_out,
114 .poll_in = np_uart_poll_in,
115 #ifdef CONFIG_UART_ASYNC_API
116 .callback_set = np_uart_callback_set,
117 .tx = np_uart_tx,
118 .tx_abort = np_uart_tx_abort,
119 .rx_buf_rsp = np_uart_rx_buf_rsp,
120 .rx_enable = np_uart_rx_enable,
121 .rx_disable = np_uart_rx_disable,
122 #endif /* CONFIG_UART_ASYNC_API */
123 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
124 .fifo_fill = np_uart_fifo_fill,
125 .fifo_read = np_uart_fifo_read,
126 .irq_tx_enable = np_uart_irq_tx_enable,
127 .irq_tx_disable = np_uart_irq_tx_disable,
128 .irq_tx_ready = np_uart_irq_tx_ready,
129 .irq_tx_complete = np_uart_irq_tx_complete,
130 .irq_rx_enable = np_uart_irq_rx_enable,
131 .irq_rx_disable = np_uart_irq_rx_disable,
132 .irq_rx_ready = np_uart_irq_rx_ready,
133 .irq_is_pending = np_uart_irq_is_pending,
134 .irq_update = np_uart_irq_update,
135 .irq_callback_set = np_uart_irq_callback_set,
136 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
137 };
138
139 #define NATIVE_PTY_INSTANCE(inst) \
140 static struct native_pty_status native_pty_status_##inst; \
141 \
142 DEVICE_DT_INST_DEFINE(inst, np_uart_init, NULL, \
143 (void *)&native_pty_status_##inst, NULL, \
144 PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, \
145 &np_uart_driver_api);
146
147 DT_INST_FOREACH_STATUS_OKAY(NATIVE_PTY_INSTANCE);
148
149 /**
150 * @brief Initialize a native_pty serial port
151 *
152 * @param dev uart device struct
153 *
154 * @return 0 (if it fails catastrophically, the execution is terminated)
155 */
np_uart_init(const struct device * dev)156 static int np_uart_init(const struct device *dev)
157 {
158
159 static bool stdinout_used;
160 struct native_pty_status *d;
161
162 d = (struct native_pty_status *)dev->data;
163
164 if (IS_ENABLED(CONFIG_UART_NATIVE_PTY_0_ON_STDINOUT)) {
165 static bool first_node = true;
166
167 if (first_node) {
168 d->on_stdinout = true;
169 }
170 first_node = false;
171 }
172
173 if (d->cmd_request_stdinout) {
174 if (stdinout_used) {
175 nsi_print_warning("%s requested to connect to STDIN/OUT, but another UART"
176 " is already connected to it => ignoring request.\n",
177 dev->name);
178 } else {
179 d->on_stdinout = true;
180 }
181 }
182
183 if (d->on_stdinout == true) {
184 d->in_fd = np_uart_pty_get_stdin_fileno();
185 d->out_fd = np_uart_pty_get_stdout_fileno();
186 stdinout_used = true;
187 } else {
188 if (d->auto_attach_cmd == NULL) {
189 d->auto_attach_cmd = CONFIG_UART_NATIVE_PTY_AUTOATTACH_DEFAULT_CMD;
190 } else { /* Running with --attach_uart_cmd, implies --attach_uart */
191 d->auto_attach = true;
192 }
193 int tty_fn = np_uart_open_pty(dev->name, d->auto_attach_cmd, d->auto_attach,
194 d->wait_pts);
195 d->in_fd = tty_fn;
196 d->out_fd = tty_fn;
197 }
198
199 #ifdef CONFIG_UART_ASYNC_API
200 k_work_init_delayable(&d->async.tx_done, np_uart_tx_done_work);
201 d->async.dev = dev;
202 #endif
203
204 return 0;
205 }
206
207 /*
208 * @brief Output len characters towards the serial port
209 *
210 * @param dev UART device struct
211 * @param buf Pointer to the characters to send.
212 * @param len Number of characters to send
213 */
np_uart_poll_out_n(struct native_pty_status * d,const unsigned char * buf,size_t len)214 static int np_uart_poll_out_n(struct native_pty_status *d, const unsigned char *buf, size_t len)
215 {
216 int ret;
217
218 if (d->wait_pts) {
219 while (1) {
220 ret = np_uart_slave_connected(d->out_fd);
221
222 if (ret == 1) {
223 break;
224 }
225 k_sleep(K_MSEC(100));
226 }
227 }
228
229 ret = nsi_host_write(d->out_fd, buf, len);
230
231 return ret;
232 }
233
234 /*
235 * @brief Output a character towards the serial port
236 *
237 * @param dev UART device struct
238 * @param out_char Character to send.
239 */
np_uart_poll_out(const struct device * dev,unsigned char out_char)240 static void np_uart_poll_out(const struct device *dev, unsigned char out_char)
241 {
242 (void)np_uart_poll_out_n((struct native_pty_status *)dev->data, &out_char, 1);
243 }
244
245 /**
246 * @brief Poll the device for up to len input characters
247 *
248 * @param dev UART device structure.
249 * @param p_char Pointer to character.
250 *
251 * @retval > 0 If a character arrived and was stored in p_char
252 * @retval -1 If no character was available to read
253 */
np_uart_read_n(struct native_pty_status * data,unsigned char * p_char,int len)254 static int np_uart_read_n(struct native_pty_status *data, unsigned char *p_char, int len)
255 {
256 int rc = -1;
257 int in_f = data->in_fd;
258
259 if (len <= 0) {
260 return -1;
261 }
262
263 if (data->on_stdinout) {
264 if (data->stdin_disconnected) {
265 return -1;
266 }
267 rc = np_uart_stdin_read_bottom(in_f, p_char, len);
268
269 if (rc == -2) {
270 data->stdin_disconnected = true;
271 return -1;
272 }
273
274 } else {
275 rc = nsi_host_read(in_f, p_char, len);
276 }
277
278 if (rc > 0) {
279 return rc;
280 }
281
282 return -1;
283 }
284
np_uart_poll_in(const struct device * dev,unsigned char * p_char)285 static int np_uart_poll_in(const struct device *dev, unsigned char *p_char)
286 {
287 struct native_pty_status *data = dev->data;
288
289 int ret = np_uart_read_n(data, p_char, 1);
290
291 if (ret == -1) {
292 return -1;
293 }
294 return 0;
295 }
296
297 #ifdef CONFIG_UART_ASYNC_API
298
np_uart_callback_set(const struct device * dev,uart_callback_t callback,void * user_data)299 static int np_uart_callback_set(const struct device *dev, uart_callback_t callback, void *user_data)
300 {
301 struct native_pty_status *data = dev->data;
302
303 data->async.user_callback = callback;
304 data->async.user_data = user_data;
305
306 return 0;
307 }
308
np_uart_tx_done_work(struct k_work * work)309 static void np_uart_tx_done_work(struct k_work *work)
310 {
311 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
312 struct native_pty_status *data =
313 CONTAINER_OF(dwork, struct native_pty_status, async.tx_done);
314 struct uart_event evt;
315 unsigned int key = irq_lock();
316
317 evt.type = UART_TX_DONE;
318 evt.data.tx.buf = data->async.tx_buf;
319 evt.data.tx.len = data->async.tx_len;
320
321 (void)nsi_host_write(data->out_fd, evt.data.tx.buf, evt.data.tx.len);
322
323 data->async.tx_buf = NULL;
324
325 if (data->async.user_callback) {
326 data->async.user_callback(data->async.dev, &evt, data->async.user_data);
327 }
328 irq_unlock(key);
329 }
330
np_uart_tx(const struct device * dev,const uint8_t * buf,size_t len,int32_t timeout)331 static int np_uart_tx(const struct device *dev, const uint8_t *buf, size_t len, int32_t timeout)
332 {
333 struct native_pty_status *data = dev->data;
334
335 if (data->async.tx_buf) {
336 /* Port is busy */
337 return -EBUSY;
338 }
339 data->async.tx_buf = buf;
340 data->async.tx_len = len;
341
342 /* Run the callback on the next tick to give the caller time to use the return value */
343 k_work_reschedule(&data->async.tx_done, K_TICKS(1));
344 return 0;
345 }
346
np_uart_tx_abort(const struct device * dev)347 static int np_uart_tx_abort(const struct device *dev)
348 {
349 struct native_pty_status *data = dev->data;
350 struct k_work_sync sync;
351 struct uart_event evt;
352 bool not_idle;
353
354 /* Cancel the callback */
355 not_idle = k_work_cancel_delayable_sync(&data->async.tx_done, &sync);
356 if (!not_idle) {
357 return -EFAULT;
358 }
359
360 /* Generate TX_DONE event with number of bytes transmitted */
361 evt.type = UART_TX_DONE;
362 evt.data.tx.buf = data->async.tx_buf;
363 evt.data.tx.len = 0;
364 if (data->async.user_callback) {
365 data->async.user_callback(data->async.dev, &evt, data->async.user_data);
366 }
367
368 /* Reset state */
369 data->async.tx_buf = NULL;
370 return 0;
371 }
372
373 /*
374 * Emulate async interrupts using a polling thread
375 */
native_pty_uart_async_poll_function(void * arg1,void * arg2,void * arg3)376 static void native_pty_uart_async_poll_function(void *arg1, void *arg2, void *arg3)
377 {
378 const struct device *dev = arg1;
379 struct native_pty_status *data = dev->data;
380 struct uart_event evt;
381 int rc;
382
383 ARG_UNUSED(arg2);
384 ARG_UNUSED(arg3);
385
386 while (data->async.rx_len) {
387 rc = np_uart_read_n(data, data->async.rx_buf, data->async.rx_len);
388 if (rc > 0) {
389 /* Data received */
390 evt.type = UART_RX_RDY;
391 evt.data.rx.buf = data->async.rx_buf;
392 evt.data.rx.offset = 0;
393 evt.data.rx.len = rc;
394 /* User callback */
395 if (data->async.user_callback) {
396 data->async.user_callback(data->async.dev, &evt,
397 data->async.user_data);
398 }
399 }
400 if ((data->async.rx_len != 0) && (rc < 0)) {
401 /* Sleep if RX not disabled and last read didn't result in any data */
402 k_sleep(K_MSEC(10));
403 }
404 }
405 }
406
np_uart_rx_buf_rsp(const struct device * dev,uint8_t * buf,size_t len)407 static int np_uart_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len)
408 {
409 /* Driver never requests additional buffers */
410 return -ENOTSUP;
411 }
412
np_uart_rx_enable(const struct device * dev,uint8_t * buf,size_t len,int32_t timeout)413 static int np_uart_rx_enable(const struct device *dev, uint8_t *buf, size_t len, int32_t timeout)
414 {
415 struct native_pty_status *data = dev->data;
416
417 ARG_UNUSED(timeout);
418
419 if (data->async.rx_buf != NULL) {
420 return -EBUSY;
421 }
422
423 data->async.rx_buf = buf;
424 data->async.rx_len = len;
425
426 /* Create a thread which will wait for data - replacement for IRQ */
427 k_thread_create(&data->async.rx_thread, data->async.rx_stack,
428 K_KERNEL_STACK_SIZEOF(data->async.rx_stack),
429 native_pty_uart_async_poll_function,
430 (void *)dev, NULL, NULL,
431 K_HIGHEST_THREAD_PRIO, 0, K_NO_WAIT);
432 return 0;
433 }
434
np_uart_rx_disable(const struct device * dev)435 static int np_uart_rx_disable(const struct device *dev)
436 {
437 struct native_pty_status *data = dev->data;
438
439 if (data->async.rx_buf == NULL) {
440 return -EFAULT;
441 }
442
443 data->async.rx_len = 0;
444 data->async.rx_buf = NULL;
445
446 /* Wait for RX thread to terminate */
447 return k_thread_join(&data->async.rx_thread, K_FOREVER);
448 }
449
450 #endif /* CONFIG_UART_ASYNC_API */
451
452 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
np_uart_irq_handler(const struct device * dev)453 static void np_uart_irq_handler(const struct device *dev)
454 {
455 struct native_pty_status *data = dev->data;
456
457 if (data->irq.callback) {
458 data->irq.callback(dev, data->irq.cb_data);
459 } else {
460 ERROR("%s: No callback registered\n", __func__);
461 }
462 }
463
np_uart_irq_read_1_ahead(struct native_pty_status * data)464 static void np_uart_irq_read_1_ahead(struct native_pty_status *data)
465 {
466 int ret = np_uart_read_n(data, &data->irq.char_store, 1);
467
468 if (ret == 1) {
469 data->irq.char_ready = true;
470 }
471
472 if (data->stdin_disconnected) {
473 /* There won't be any more data ever */
474 data->irq.rx_enabled = false;
475 }
476 }
477
478 /*
479 * Emulate uart interrupts using a polling thread
480 */
np_uart_irq_thread(void * arg1,void * arg2,void * arg3)481 static void np_uart_irq_thread(void *arg1, void *arg2, void *arg3)
482 {
483 ARG_UNUSED(arg2);
484 ARG_UNUSED(arg3);
485
486 struct device *dev = (struct device *)arg1;
487 struct native_pty_status *data = dev->data;
488
489 while (1) {
490 if (data->irq.rx_enabled) {
491 if (!data->irq.char_ready) {
492 np_uart_irq_read_1_ahead(data);
493 }
494
495 if (data->irq.char_ready) {
496 np_uart_irq_handler(dev);
497 }
498 }
499 if (data->irq.tx_enabled) {
500 np_uart_irq_handler(dev);
501 }
502
503 if ((data->irq.tx_enabled) ||
504 ((data->irq.rx_enabled) && (data->irq.char_ready))) {
505 /* There is pending work. Let's handle it right away */
506 continue;
507 }
508
509 k_timeout_t wait = K_FOREVER;
510
511 if (data->irq.rx_enabled) {
512 wait = K_MSEC(10);
513 }
514 (void)k_sleep(wait);
515 }
516 }
517
np_uart_irq_thread_start(const struct device * dev)518 static void np_uart_irq_thread_start(const struct device *dev)
519 {
520 struct native_pty_status *data = dev->data;
521
522 /* Create a thread which will wait for data - replacement for IRQ */
523 k_thread_create(&data->irq.poll_thread, data->irq.poll_stack,
524 K_KERNEL_STACK_SIZEOF(data->irq.poll_stack),
525 np_uart_irq_thread,
526 (void *)dev, NULL, NULL,
527 K_HIGHEST_THREAD_PRIO, 0, K_NO_WAIT);
528 }
529
np_uart_fifo_fill(const struct device * dev,const uint8_t * tx_data,int size)530 static int np_uart_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size)
531 {
532 return np_uart_poll_out_n((struct native_pty_status *)dev->data, tx_data, size);
533 }
534
np_uart_fifo_read(const struct device * dev,uint8_t * rx_data,int size)535 static int np_uart_fifo_read(const struct device *dev, uint8_t *rx_data, int size)
536 {
537 uint32_t len = 0;
538 int ret;
539 struct native_pty_status *data = dev->data;
540
541 if ((size <= 0) || data->stdin_disconnected) {
542 return 0;
543 }
544
545 if (data->irq.char_ready) {
546 rx_data[0] = data->irq.char_store;
547 rx_data++;
548 size--;
549 len = 1;
550 data->irq.char_ready = false;
551 /* Note this native_sim driver code cannot be interrupted,
552 * so there is no race with np_uart_irq_thread()
553 */
554 }
555
556 ret = np_uart_read_n(data, rx_data, size);
557
558 if (ret > 0) {
559 len += ret;
560 np_uart_irq_read_1_ahead(data);
561 }
562
563 return len;
564 }
565
np_uart_irq_tx_ready(const struct device * dev)566 static int np_uart_irq_tx_ready(const struct device *dev)
567 {
568 struct native_pty_status *data = dev->data;
569
570 return data->irq.tx_enabled ? 1 : 0;
571 }
572
np_uart_irq_tx_complete(const struct device * dev)573 static int np_uart_irq_tx_complete(const struct device *dev)
574 {
575 ARG_UNUSED(dev);
576
577 return 1;
578 }
579
np_uart_irq_tx_enable(const struct device * dev)580 static void np_uart_irq_tx_enable(const struct device *dev)
581 {
582 struct native_pty_status *data = dev->data;
583
584 bool kick_thread = !data->irq.tx_enabled;
585
586 data->irq.tx_enabled = true;
587
588 if (!atomic_set(&data->irq.thread_started, 1)) {
589 np_uart_irq_thread_start(dev);
590 }
591
592 if (kick_thread) {
593 /* Let's ensure the thread wakes to allow the Tx right away */
594 k_wakeup(&data->irq.poll_thread);
595 }
596 }
597
np_uart_irq_tx_disable(const struct device * dev)598 static void np_uart_irq_tx_disable(const struct device *dev)
599 {
600 struct native_pty_status *data = dev->data;
601
602 data->irq.tx_enabled = false;
603 }
604
np_uart_irq_rx_enable(const struct device * dev)605 static void np_uart_irq_rx_enable(const struct device *dev)
606 {
607 struct native_pty_status *data = dev->data;
608
609 if (data->stdin_disconnected) {
610 /* There won't ever be data => we ignore the request */
611 return;
612 }
613
614 bool kick_thread = !data->irq.rx_enabled;
615
616 data->irq.rx_enabled = true;
617
618 if (!atomic_set(&data->irq.thread_started, 1)) {
619 np_uart_irq_thread_start(dev);
620 }
621
622 if (kick_thread) {
623 /* Let's ensure the thread wakes to try to check for data */
624 k_wakeup(&data->irq.poll_thread);
625 }
626 }
627
np_uart_irq_rx_disable(const struct device * dev)628 static void np_uart_irq_rx_disable(const struct device *dev)
629 {
630 struct native_pty_status *data = dev->data;
631
632 data->irq.rx_enabled = false;
633 }
634
np_uart_irq_rx_ready(const struct device * dev)635 static int np_uart_irq_rx_ready(const struct device *dev)
636 {
637 struct native_pty_status *data = dev->data;
638
639 if (data->irq.rx_enabled && data->irq.char_ready) {
640 return 1;
641 }
642 return 0;
643 }
644
np_uart_irq_is_pending(const struct device * dev)645 static int np_uart_irq_is_pending(const struct device *dev)
646 {
647 return np_uart_irq_rx_ready(dev) ||
648 np_uart_irq_tx_ready(dev);
649 }
650
np_uart_irq_update(const struct device * dev)651 static int np_uart_irq_update(const struct device *dev)
652 {
653 ARG_UNUSED(dev);
654
655 return 1;
656 }
657
np_uart_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * cb_data)658 static void np_uart_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb,
659 void *cb_data)
660 {
661 struct native_pty_status *data = dev->data;
662
663 data->irq.callback = cb;
664 data->irq.cb_data = cb_data;
665 }
666 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
667
668
669 #define NATIVE_PTY_SET_AUTO_ATTACH_CMD(inst, cmd) \
670 native_pty_status_##inst.auto_attach_cmd = cmd;
671 #define NATIVE_PTY_SET_AUTO_ATTACH(inst, value) \
672 native_pty_status_##inst.auto_attach = value;
673 #define NATIVE_PTY_SET_WAIT_PTS(inst, value) \
674 native_pty_status_##inst.wait_pts = value;
675
auto_attach_cmd_cb(char * argv,int offset)676 static void auto_attach_cmd_cb(char *argv, int offset)
677 {
678 DT_INST_FOREACH_STATUS_OKAY_VARGS(NATIVE_PTY_SET_AUTO_ATTACH_CMD, &argv[offset]);
679 DT_INST_FOREACH_STATUS_OKAY_VARGS(NATIVE_PTY_SET_AUTO_ATTACH, true);
680 }
681
auto_attach_cb(char * argv,int offset)682 static void auto_attach_cb(char *argv, int offset)
683 {
684 DT_INST_FOREACH_STATUS_OKAY_VARGS(NATIVE_PTY_SET_AUTO_ATTACH, true);
685 }
686
wait_pts_cb(char * argv,int offset)687 static void wait_pts_cb(char *argv, int offset)
688 {
689 DT_INST_FOREACH_STATUS_OKAY_VARGS(NATIVE_PTY_SET_WAIT_PTS, true);
690 }
691
692 #define INST_NAME(inst) DEVICE_DT_NAME(DT_DRV_INST(inst))
693
694 #define NATIVE_PTY_COMMAND_LINE_OPTS(inst) \
695 { \
696 .is_switch = true, \
697 .option = INST_NAME(inst) "_stdinout", \
698 .type = 'b', \
699 .dest = &native_pty_status_##inst.cmd_request_stdinout, \
700 .descript = "Connect "INST_NAME(inst)" to STDIN/OUT instead of a PTY" \
701 " (can only be done for one UART)" \
702 }, \
703 { \
704 .is_switch = true, \
705 .option = INST_NAME(inst) "_attach_uart", \
706 .type = 'b', \
707 .dest = &native_pty_status_##inst.auto_attach, \
708 .descript = "Automatically attach "INST_NAME(inst)" to a terminal emulator." \
709 " (only applicable when connected to PTYs)" \
710 }, \
711 { \
712 .option = INST_NAME(inst) "_attach_uart_cmd", \
713 .name = "\"cmd\"", \
714 .type = 's', \
715 .dest = &native_pty_status_##inst.auto_attach_cmd, \
716 .descript = "Command used to automatically attach to the terminal "INST_NAME(inst) \
717 " (implies "INST_NAME(inst)"_auto_attach), by default: " \
718 "'" CONFIG_UART_NATIVE_PTY_AUTOATTACH_DEFAULT_CMD "'" \
719 " (only applicable when connected to PTYs)" \
720 }, \
721 { \
722 .is_switch = true, \
723 .option = INST_NAME(inst) "_wait_uart", \
724 .type = 'b', \
725 .dest = &native_pty_status_##inst.wait_pts, \
726 .descript = "Hold writes to "INST_NAME(inst)" until a client is connected/ready" \
727 " (only applicable when connected to PTYs)" \
728 },
729
np_add_uart_options(void)730 static void np_add_uart_options(void)
731 {
732 static struct args_struct_t uart_options[] = {
733 /* Set of parameters that apply to all PTY UARTs: */
734 {
735 .is_switch = true,
736 .option = "attach_uart",
737 .type = 'b',
738 .call_when_found = auto_attach_cb,
739 .descript = "Automatically attach all PTY UARTs to a terminal emulator."
740 " (only applicable when connected to PTYs)"
741 },
742 {
743 .option = "attach_uart_cmd",
744 .name = "\"cmd\"",
745 .type = 's',
746 .call_when_found = auto_attach_cmd_cb,
747 .descript = "Command used to automatically attach all PTY UARTs to a terminal "
748 "emulator (implies auto_attach), by default: "
749 "'" CONFIG_UART_NATIVE_PTY_AUTOATTACH_DEFAULT_CMD "'"
750 " (only applicable when connected to PTYs)"
751 },
752 {
753 .is_switch = true,
754 .option = "wait_uart",
755 .type = 'b',
756 .call_when_found = wait_pts_cb,
757 .descript = "Hold writes to all PTY UARTs until a client is connected/ready"
758 " (only applicable when connected to PTYs)"
759 },
760 /* Set of parameters that apply to each individual PTY UART: */
761 DT_INST_FOREACH_STATUS_OKAY(NATIVE_PTY_COMMAND_LINE_OPTS)
762 ARG_TABLE_ENDMARKER
763 };
764
765 native_add_command_line_opts(uart_options);
766 }
767
768 #define NATIVE_PTY_CLEANUP(inst) \
769 if ((!native_pty_status_##inst.on_stdinout) && (native_pty_status_##inst.in_fd != 0)) { \
770 nsi_host_close(native_pty_status_##inst.in_fd); \
771 native_pty_status_##inst.in_fd = 0; \
772 }
773
np_cleanup_uart(void)774 static void np_cleanup_uart(void)
775 {
776 DT_INST_FOREACH_STATUS_OKAY(NATIVE_PTY_CLEANUP);
777 }
778
779 NATIVE_TASK(np_add_uart_options, PRE_BOOT_1, 11);
780 NATIVE_TASK(np_cleanup_uart, ON_EXIT, 99);
781