1 /*
2  * Copyright (c) 2020 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(uart_mux, CONFIG_UART_MUX_LOG_LEVEL);
9 
10 #include <zephyr/sys/__assert.h>
11 #include <zephyr/kernel.h>
12 #include <zephyr/init.h>
13 #include <zephyr/syscall_handler.h>
14 #include <zephyr/device.h>
15 #include <zephyr/drivers/uart.h>
16 #include <zephyr/drivers/console/uart_mux.h>
17 #include <zephyr/sys/ring_buffer.h>
18 #include <zephyr/sys/util.h>
19 #include <zephyr/sys/atomic.h>
20 #include <zephyr/sys/iterable_sections.h>
21 
22 #include "gsm_mux.h"
23 
24 #if CONFIG_UART_MUX_DEVICE_COUNT == 0
25 #error "CONFIG_UART_MUX_DEVICE_COUNT tells number of DLCIs to create " \
26 	"and must be >0"
27 #endif
28 
29 #define UART_MUX_WORKQ_PRIORITY CONFIG_UART_MUX_RX_PRIORITY
30 #define UART_MUX_WORKQ_STACK_SIZE CONFIG_UART_MUX_RX_STACK_SIZE
31 
32 /* All the RX/TX data is passed via own workqueue. This is done like this
33  * as the GSM modem uses global workqueue which causes difficulties if we do
34  * the same here. This workqueue is shared between all the DLCI channels.
35  */
36 K_KERNEL_STACK_DEFINE(uart_mux_stack, UART_MUX_WORKQ_STACK_SIZE);
37 static struct k_work_q uart_mux_workq;
38 
39 /* The UART mux contains information about the real UART. It will synchronize
40  * the access to the real UART and pass data between it and GSM muxing API.
41  * Usually there is only one instance of these in the system, if we have only
42  * one UART connected to modem device.
43  */
44 struct uart_mux {
45 	/* The real UART device that is shared between muxed UARTs */
46 	const struct device *uart;
47 
48 	/* GSM mux related to this UART */
49 	struct gsm_mux *mux;
50 
51 	/* Received data is routed from ISR to MUX API via ring buffer */
52 	struct ring_buf *rx_ringbuf;
53 
54 	/* RX worker that passes data from RX ISR to GSM mux API */
55 	struct k_work rx_work;
56 
57 	/* Mutex for accessing the real UART */
58 	struct k_mutex lock;
59 
60 	/* Flag that tells whether this instance is initialized or not */
61 	atomic_t init_done;
62 
63 	/* Temporary buffer when reading data in ISR */
64 	uint8_t rx_buf[CONFIG_UART_MUX_TEMP_BUF_SIZE];
65 };
66 
67 #define DEFINE_UART_MUX(x, _)						\
68 	RING_BUF_DECLARE(uart_rx_ringbuf_##x,				\
69 			 CONFIG_UART_MUX_RINGBUF_SIZE);			\
70 	STRUCT_SECTION_ITERABLE(uart_mux, uart_mux_##x)	= {		\
71 			.rx_ringbuf = &uart_rx_ringbuf_##x,		\
72 	}
73 
74 LISTIFY(CONFIG_UART_MUX_REAL_DEVICE_COUNT, DEFINE_UART_MUX, (;), _);
75 
76 STRUCT_SECTION_START_EXTERN(uart_mux);
77 STRUCT_SECTION_END_EXTERN(uart_mux);
78 
79 /* UART Mux Driver Status Codes */
80 enum uart_mux_status_code {
81 	UART_MUX_UNKNOWN,      /* Initial connection status   */
82 	UART_MUX_CONFIGURED,   /* UART mux configuration done */
83 	UART_MUX_CONNECTED,    /* UART mux connected          */
84 	UART_MUX_DISCONNECTED, /* UART mux connection lost    */
85 };
86 
87 struct uart_mux_config {
88 };
89 
90 struct uart_mux_dev_data {
91 	sys_snode_t node;
92 
93 	/* Configuration data */
94 	struct uart_mux_config cfg;
95 
96 	/* This UART mux device */
97 	const struct device *dev;
98 
99 	/* The UART device where we are running on top of */
100 	struct uart_mux *real_uart;
101 
102 	/* TX worker that will mux the transmitted data */
103 	struct k_work tx_work;
104 
105 	/* ISR function callback worker */
106 	struct k_work cb_work;
107 
108 	/* ISR function callback */
109 	uart_irq_callback_user_data_t cb;
110 	void *cb_user_data;
111 
112 	/* Attach callback */
113 	uart_mux_attach_cb_t attach_cb;
114 	void *attach_user_data;
115 
116 	/* TX data from application is handled via ring buffer */
117 	struct ring_buf *tx_ringbuf;
118 
119 	/* Received data is routed from RX worker to application via ring
120 	 * buffer.
121 	 */
122 	struct ring_buf *rx_ringbuf;
123 
124 	/* Muxing status */
125 	enum uart_mux_status_code status;
126 
127 	/* DLCI (muxing virtual channel) linked to this muxed UART */
128 	struct gsm_dlci *dlci;
129 
130 	/* Status (enabled / disabled) for RX and TX */
131 	bool rx_enabled : 1;
132 	bool tx_enabled : 1;
133 	bool rx_ready : 1;
134 	bool tx_ready : 1;
135 	bool in_use : 1;
136 };
137 
138 struct uart_mux_cfg_data {
139 };
140 
141 static sys_slist_t uart_mux_data_devlist;
142 
uart_mux_cb_work(struct k_work * work)143 static void uart_mux_cb_work(struct k_work *work)
144 {
145 	struct uart_mux_dev_data *dev_data =
146 		CONTAINER_OF(work, struct uart_mux_dev_data, cb_work);
147 
148 	dev_data->cb(dev_data->dev, dev_data->cb_user_data);
149 }
150 
uart_mux_consume_ringbuf(struct uart_mux * uart_mux)151 static int uart_mux_consume_ringbuf(struct uart_mux *uart_mux)
152 {
153 	uint8_t *data;
154 	size_t len;
155 	int ret;
156 
157 	len = ring_buf_get_claim(uart_mux->rx_ringbuf, &data,
158 				 CONFIG_UART_MUX_RINGBUF_SIZE);
159 	if (len == 0) {
160 		LOG_DBG("Ringbuf %p is empty!", uart_mux->rx_ringbuf);
161 		return 0;
162 	}
163 
164 	/* We have now received muxed data. Push that through GSM mux API which
165 	 * will parse it and call proper functions to get the data to the user.
166 	 */
167 
168 	if (IS_ENABLED(CONFIG_UART_MUX_VERBOSE_DEBUG)) {
169 		char tmp[sizeof("RECV muxed ") + 10];
170 
171 		snprintk(tmp, sizeof(tmp), "RECV muxed %s",
172 			 uart_mux->uart->name);
173 		LOG_HEXDUMP_DBG(data, len, tmp);
174 	}
175 
176 	gsm_mux_recv_buf(uart_mux->mux, data, len);
177 
178 	ret = ring_buf_get_finish(uart_mux->rx_ringbuf, len);
179 	if (ret < 0) {
180 		LOG_DBG("Cannot flush ring buffer (%d)", ret);
181 	}
182 
183 	return -EAGAIN;
184 }
185 
uart_mux_rx_work(struct k_work * work)186 static void uart_mux_rx_work(struct k_work *work)
187 {
188 	struct uart_mux *uart_mux =
189 		CONTAINER_OF(work, struct uart_mux, rx_work);;
190 	int ret;
191 
192 	do {
193 		ret = uart_mux_consume_ringbuf(uart_mux);
194 	} while (ret == -EAGAIN);
195 }
196 
uart_mux_tx_work(struct k_work * work)197 static void uart_mux_tx_work(struct k_work *work)
198 {
199 	struct uart_mux_dev_data *dev_data =
200 		CONTAINER_OF(work, struct uart_mux_dev_data, tx_work);
201 	uint8_t *data;
202 	size_t len;
203 
204 	len = ring_buf_get_claim(dev_data->tx_ringbuf, &data,
205 				 CONFIG_UART_MUX_RINGBUF_SIZE);
206 	if (!len) {
207 		LOG_DBG("Ringbuf %p empty!", dev_data->tx_ringbuf);
208 		return;
209 	}
210 
211 	LOG_DBG("Got %ld bytes from ringbuffer send to uart %p", (unsigned long)len,
212 		dev_data->dev);
213 
214 	if (IS_ENABLED(CONFIG_UART_MUX_VERBOSE_DEBUG)) {
215 		char tmp[sizeof("SEND _x") +
216 			 sizeof(CONFIG_UART_MUX_DEVICE_NAME)];
217 
218 		snprintk(tmp, sizeof(tmp), "SEND %s",
219 			 dev_data->dev->name);
220 		LOG_HEXDUMP_DBG(data, len, tmp);
221 	}
222 
223 	(void)gsm_dlci_send(dev_data->dlci, data, len);
224 
225 	ring_buf_get_finish(dev_data->tx_ringbuf, len);
226 }
227 
uart_mux_init(const struct device * dev)228 static int uart_mux_init(const struct device *dev)
229 {
230 	struct uart_mux_dev_data *dev_data = dev->data;
231 
232 	gsm_mux_init();
233 
234 	dev_data->dev = dev;
235 	dev_data->real_uart = NULL; /* will be set when user attach to it */
236 
237 	sys_slist_find_and_remove(&uart_mux_data_devlist, &dev_data->node);
238 	sys_slist_prepend(&uart_mux_data_devlist, &dev_data->node);
239 
240 	k_work_init(&dev_data->tx_work, uart_mux_tx_work);
241 	k_work_init(&dev_data->cb_work, uart_mux_cb_work);
242 
243 	LOG_DBG("Device %s dev %p dev_data %p cfg %p created",
244 		dev->name, dev, dev_data, dev->config);
245 
246 	return 0;
247 }
248 
249 /* This IRQ handler is shared between muxing UARTs. After we have received
250  * data from it in uart_mux_rx_work(), we push the data to GSM mux API which
251  * will call proper callbacks to pass data to correct recipient.
252  */
uart_mux_isr(const struct device * uart,void * user_data)253 static void uart_mux_isr(const struct device *uart, void *user_data)
254 {
255 	struct uart_mux *real_uart = user_data;
256 	int rx = 0;
257 	size_t wrote = 0;
258 
259 	/* Read all data off UART, and send to RX worker for unmuxing */
260 	while (uart_irq_update(uart) &&
261 	       uart_irq_rx_ready(uart)) {
262 		rx = uart_fifo_read(uart, real_uart->rx_buf,
263 				    sizeof(real_uart->rx_buf));
264 		if (rx <= 0) {
265 			continue;
266 		}
267 
268 		wrote = ring_buf_put(real_uart->rx_ringbuf,
269 				     real_uart->rx_buf, rx);
270 		if (wrote < rx) {
271 			LOG_ERR("Ring buffer full, drop %ld bytes", (long)(rx - wrote));
272 		}
273 
274 		k_work_submit_to_queue(&uart_mux_workq, &real_uart->rx_work);
275 	}
276 }
277 
uart_mux_flush_isr(const struct device * dev)278 static void uart_mux_flush_isr(const struct device *dev)
279 {
280 	uint8_t c;
281 
282 	while (uart_fifo_read(dev, &c, 1) > 0) {
283 		continue;
284 	}
285 }
286 
uart_mux_disable(const struct device * dev)287 void uart_mux_disable(const struct device *dev)
288 {
289 	struct uart_mux_dev_data *dev_data = dev->data;
290 	const struct device *uart = dev_data->real_uart->uart;
291 
292 	uart_irq_rx_disable(uart);
293 	uart_irq_tx_disable(uart);
294 	uart_mux_flush_isr(uart);
295 
296 	gsm_mux_detach(dev_data->real_uart->mux);
297 }
298 
uart_mux_enable(const struct device * dev)299 void uart_mux_enable(const struct device *dev)
300 {
301 	struct uart_mux_dev_data *dev_data = dev->data;
302 	struct uart_mux *real_uart = dev_data->real_uart;
303 
304 	LOG_DBG("Claiming uart for uart_mux");
305 
306 	uart_irq_rx_disable(real_uart->uart);
307 	uart_irq_tx_disable(real_uart->uart);
308 	uart_mux_flush_isr(real_uart->uart);
309 	uart_irq_callback_user_data_set(
310 		real_uart->uart, uart_mux_isr,
311 		real_uart);
312 
313 	uart_irq_rx_enable(real_uart->uart);
314 }
315 
dlci_created_cb(struct gsm_dlci * dlci,bool connected,void * user_data)316 static void dlci_created_cb(struct gsm_dlci *dlci, bool connected,
317 			    void *user_data)
318 {
319 	struct uart_mux_dev_data *dev_data = user_data;
320 
321 	if (connected) {
322 		dev_data->status = UART_MUX_CONNECTED;
323 	} else {
324 		dev_data->status = UART_MUX_DISCONNECTED;
325 	}
326 
327 	LOG_DBG("%s %s", dev_data->dev->name,
328 		dev_data->status == UART_MUX_CONNECTED ? "connected" :
329 							 "disconnected");
330 
331 	if (dev_data->attach_cb) {
332 		dev_data->attach_cb(dev_data->dev,
333 				    dlci ? gsm_dlci_id(dlci) : -1,
334 				    connected,
335 				    dev_data->attach_user_data);
336 	}
337 }
338 
init_real_uart(const struct device * mux,const struct device * uart,struct uart_mux ** mux_uart)339 static int init_real_uart(const struct device *mux, const struct device *uart,
340 			  struct uart_mux **mux_uart)
341 {
342 	bool found = false;
343 	struct uart_mux *real_uart;
344 
345 	for (real_uart = TYPE_SECTION_START(uart_mux);
346 	     real_uart != TYPE_SECTION_END(uart_mux);
347 	     real_uart++) {
348 		if (real_uart->uart == uart) {
349 			found = true;
350 			break;
351 		}
352 	}
353 
354 	if (found == false) {
355 		for (real_uart = TYPE_SECTION_START(uart_mux);
356 		     real_uart != TYPE_SECTION_END(uart_mux);
357 		     real_uart++) {
358 			if (real_uart->uart == NULL) {
359 				real_uart->uart = uart;
360 				found = true;
361 				break;
362 			}
363 		}
364 
365 		if (found == false) {
366 			return -ENOENT;
367 		}
368 	}
369 
370 	/* Init the real UART only once */
371 	if (atomic_cas(&real_uart->init_done, false, true)) {
372 		real_uart->mux = gsm_mux_create(mux);
373 
374 		LOG_DBG("Initializing UART %s and GSM mux %p",
375 			real_uart->uart->name, (void *)real_uart->mux);
376 
377 		if (!real_uart->mux) {
378 			real_uart->uart = NULL;
379 			atomic_clear(&real_uart->init_done);
380 			return -ENOMEM;
381 		}
382 
383 		k_work_init(&real_uart->rx_work, uart_mux_rx_work);
384 		k_mutex_init(&real_uart->lock);
385 
386 		uart_irq_rx_disable(real_uart->uart);
387 		uart_irq_tx_disable(real_uart->uart);
388 		uart_mux_flush_isr(real_uart->uart);
389 		uart_irq_callback_user_data_set(
390 			real_uart->uart, uart_mux_isr,
391 			real_uart);
392 
393 		uart_irq_rx_enable(real_uart->uart);
394 	}
395 
396 	__ASSERT(real_uart->uart, "Real UART not set");
397 
398 	*mux_uart = real_uart;
399 
400 	return 0;
401 }
402 
403 /* This will bind the physical (real) UART to this muxed UART */
attach(const struct device * mux_uart,const struct device * uart,int dlci_address,uart_mux_attach_cb_t cb,void * user_data)404 static int attach(const struct device *mux_uart, const struct device *uart,
405 		  int dlci_address, uart_mux_attach_cb_t cb,
406 		  void *user_data)
407 {
408 	sys_snode_t *sn, *sns;
409 
410 	if (mux_uart == NULL || uart == NULL) {
411 		return -EINVAL;
412 	}
413 
414 	LOG_DBG("Attach DLCI %d (%s) to %s", dlci_address,
415 		mux_uart->name, uart->name);
416 
417 	SYS_SLIST_FOR_EACH_NODE_SAFE(&uart_mux_data_devlist, sn, sns) {
418 		struct uart_mux_dev_data *dev_data =
419 			CONTAINER_OF(sn, struct uart_mux_dev_data, node);
420 
421 		if (dev_data->dev == mux_uart) {
422 			struct uart_mux *real_uart;
423 			int ret;
424 
425 			ret = init_real_uart(mux_uart, uart, &real_uart);
426 			if (ret < 0) {
427 				return ret;
428 			}
429 
430 			dev_data->real_uart = real_uart;
431 			dev_data->tx_ready = true;
432 			dev_data->tx_enabled = true;
433 			dev_data->rx_enabled = true;
434 			dev_data->attach_cb = cb;
435 			dev_data->attach_user_data = user_data;
436 			dev_data->status = UART_MUX_CONFIGURED;
437 
438 			ret = gsm_dlci_create(real_uart->mux,
439 					      mux_uart,
440 					      dlci_address,
441 					      dlci_created_cb,
442 					      dev_data,
443 					      &dev_data->dlci);
444 			if (ret < 0) {
445 				LOG_DBG("Cannot create DLCI %d (%d)",
446 					dlci_address, ret);
447 				return ret;
448 			}
449 
450 			return 0;
451 		}
452 	}
453 
454 	return -ENOENT;
455 }
456 
uart_mux_poll_in(const struct device * dev,unsigned char * p_char)457 static int uart_mux_poll_in(const struct device *dev, unsigned char *p_char)
458 {
459 	ARG_UNUSED(dev);
460 	ARG_UNUSED(p_char);
461 
462 	return -ENOTSUP;
463 }
464 
uart_mux_poll_out(const struct device * dev,unsigned char out_char)465 static void uart_mux_poll_out(const struct device *dev,
466 			      unsigned char out_char)
467 {
468 	struct uart_mux_dev_data *dev_data = dev->data;
469 
470 	if (dev_data->dev == NULL) {
471 		return;
472 	}
473 
474 	(void)gsm_dlci_send(dev_data->dlci, &out_char, 1);
475 }
476 
uart_mux_err_check(const struct device * dev)477 static int uart_mux_err_check(const struct device *dev)
478 {
479 	ARG_UNUSED(dev);
480 
481 	return -ENOTSUP;
482 }
483 
uart_mux_configure(const struct device * dev,const struct uart_config * cfg)484 static int uart_mux_configure(const struct device *dev,
485 			      const struct uart_config *cfg)
486 {
487 	ARG_UNUSED(dev);
488 	ARG_UNUSED(cfg);
489 
490 	return -ENOTSUP;
491 }
492 
uart_mux_config_get(const struct device * dev,struct uart_config * cfg)493 static int uart_mux_config_get(const struct device *dev,
494 			       struct uart_config *cfg)
495 {
496 	ARG_UNUSED(dev);
497 	ARG_UNUSED(cfg);
498 
499 	return -ENOTSUP;
500 }
501 
uart_mux_fifo_fill(const struct device * dev,const uint8_t * tx_data,int len)502 static int uart_mux_fifo_fill(const struct device *dev,
503 			      const uint8_t *tx_data, int len)
504 {
505 	struct uart_mux_dev_data *dev_data;
506 	size_t wrote;
507 
508 	if (dev == NULL) {
509 		return -EINVAL;
510 	}
511 
512 	dev_data = dev->data;
513 	if (dev_data->dev == NULL) {
514 		return -ENOENT;
515 	}
516 
517 	/* If we're not in ISR context, do the xfer synchronously. This
518 	 * effectively let's applications use this implementation of fifo_fill
519 	 * as a multi-byte poll_out which prevents each byte getting wrapped by
520 	 * mux headers.
521 	 */
522 	if (!k_is_in_isr() && dev_data->dlci) {
523 		return gsm_dlci_send(dev_data->dlci, tx_data, len);
524 	}
525 
526 	LOG_DBG("dev_data %p len %d tx_ringbuf space %u",
527 		dev_data, len, ring_buf_space_get(dev_data->tx_ringbuf));
528 
529 	if (dev_data->status != UART_MUX_CONNECTED) {
530 		LOG_WRN("UART mux not connected, drop %d bytes", len);
531 		return 0;
532 	}
533 
534 	dev_data->tx_ready = false;
535 
536 	wrote = ring_buf_put(dev_data->tx_ringbuf, tx_data, len);
537 	if (wrote < len) {
538 		LOG_WRN("Ring buffer full, drop %ld bytes", (long)(len - wrote));
539 	}
540 
541 	k_work_submit_to_queue(&uart_mux_workq, &dev_data->tx_work);
542 
543 	return wrote;
544 }
545 
uart_mux_fifo_read(const struct device * dev,uint8_t * rx_data,const int size)546 static int uart_mux_fifo_read(const struct device *dev, uint8_t *rx_data,
547 			      const int size)
548 {
549 	struct uart_mux_dev_data *dev_data;
550 	uint32_t len;
551 
552 	if (dev == NULL) {
553 		return -EINVAL;
554 	}
555 
556 	dev_data = dev->data;
557 	if (dev_data->dev == NULL) {
558 		return -ENOENT;
559 	}
560 
561 	LOG_DBG("%s size %d rx_ringbuf space %u",
562 		dev->name, size,
563 		ring_buf_space_get(dev_data->rx_ringbuf));
564 
565 	len = ring_buf_get(dev_data->rx_ringbuf, rx_data, size);
566 
567 	if (ring_buf_is_empty(dev_data->rx_ringbuf)) {
568 		dev_data->rx_ready = false;
569 	}
570 
571 	return len;
572 }
573 
uart_mux_irq_tx_enable(const struct device * dev)574 static void uart_mux_irq_tx_enable(const struct device *dev)
575 {
576 	struct uart_mux_dev_data *dev_data = dev->data;
577 
578 	if (dev_data == NULL || dev_data->dev == NULL) {
579 		return;
580 	}
581 
582 	dev_data->tx_enabled = true;
583 
584 	if (dev_data->cb && dev_data->tx_ready) {
585 		k_work_submit_to_queue(&uart_mux_workq, &dev_data->cb_work);
586 	}
587 }
588 
uart_mux_irq_tx_disable(const struct device * dev)589 static void uart_mux_irq_tx_disable(const struct device *dev)
590 {
591 	struct uart_mux_dev_data *dev_data = dev->data;
592 
593 	if (dev_data == NULL || dev_data->dev == NULL) {
594 		return;
595 	}
596 
597 	dev_data->tx_enabled = false;
598 }
599 
uart_mux_irq_tx_ready(const struct device * dev)600 static int uart_mux_irq_tx_ready(const struct device *dev)
601 {
602 	struct uart_mux_dev_data *dev_data = dev->data;
603 
604 	if (dev_data == NULL) {
605 		return -EINVAL;
606 	}
607 
608 	if (dev_data->dev == NULL) {
609 		return -ENOENT;
610 	}
611 
612 	return dev_data->tx_ready;
613 }
614 
uart_mux_irq_rx_enable(const struct device * dev)615 static void uart_mux_irq_rx_enable(const struct device *dev)
616 {
617 	struct uart_mux_dev_data *dev_data = dev->data;
618 
619 	if (dev_data == NULL || dev_data->dev == NULL) {
620 		return;
621 	}
622 
623 	dev_data->rx_enabled = true;
624 
625 	if (dev_data->cb && dev_data->rx_ready) {
626 		k_work_submit_to_queue(&uart_mux_workq, &dev_data->cb_work);
627 	}
628 }
629 
uart_mux_irq_rx_disable(const struct device * dev)630 static void uart_mux_irq_rx_disable(const struct device *dev)
631 {
632 	struct uart_mux_dev_data *dev_data = dev->data;
633 
634 	if (dev_data == NULL || dev_data->dev == NULL) {
635 		return;
636 	}
637 
638 	dev_data->rx_enabled = false;
639 }
640 
uart_mux_irq_tx_complete(const struct device * dev)641 static int uart_mux_irq_tx_complete(const struct device *dev)
642 {
643 	ARG_UNUSED(dev);
644 
645 	return -ENOTSUP;
646 }
647 
uart_mux_irq_rx_ready(const struct device * dev)648 static int uart_mux_irq_rx_ready(const struct device *dev)
649 {
650 	struct uart_mux_dev_data *dev_data = dev->data;
651 
652 	if (dev_data == NULL) {
653 		return -EINVAL;
654 	}
655 
656 	if (dev_data->dev == NULL) {
657 		return -ENOENT;
658 	}
659 
660 	return dev_data->rx_ready;
661 }
662 
uart_mux_irq_err_enable(const struct device * dev)663 static void uart_mux_irq_err_enable(const struct device *dev)
664 {
665 	ARG_UNUSED(dev);
666 }
667 
uart_mux_irq_err_disable(const struct device * dev)668 static void uart_mux_irq_err_disable(const struct device *dev)
669 {
670 	ARG_UNUSED(dev);
671 }
672 
uart_mux_irq_is_pending(const struct device * dev)673 static int uart_mux_irq_is_pending(const struct device *dev)
674 {
675 	struct uart_mux_dev_data *dev_data = dev->data;
676 
677 	if (dev_data == NULL || dev_data->dev == NULL) {
678 		return 0;
679 	}
680 
681 	if (dev_data->tx_ready && dev_data->tx_enabled) {
682 		return 1;
683 	}
684 
685 	if (dev_data->rx_ready && dev_data->rx_enabled) {
686 		return 1;
687 	}
688 
689 	return 0;
690 }
691 
uart_mux_irq_update(const struct device * dev)692 static int uart_mux_irq_update(const struct device *dev)
693 {
694 	ARG_UNUSED(dev);
695 
696 	return 1;
697 }
698 
uart_mux_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * user_data)699 static void uart_mux_irq_callback_set(const struct device *dev,
700 				      uart_irq_callback_user_data_t cb,
701 				      void *user_data)
702 {
703 	struct uart_mux_dev_data *dev_data = dev->data;
704 
705 	if (dev_data == NULL) {
706 		return;
707 	}
708 
709 	dev_data->cb = cb;
710 	dev_data->cb_user_data = user_data;
711 }
712 
713 static struct uart_mux_driver_api uart_mux_driver_api = {
714 	.uart_api.poll_in = uart_mux_poll_in,
715 	.uart_api.poll_out = uart_mux_poll_out,
716 	.uart_api.err_check = uart_mux_err_check,
717 	.uart_api.configure = uart_mux_configure,
718 	.uart_api.config_get = uart_mux_config_get,
719 	.uart_api.fifo_fill = uart_mux_fifo_fill,
720 	.uart_api.fifo_read = uart_mux_fifo_read,
721 	.uart_api.irq_tx_enable = uart_mux_irq_tx_enable,
722 	.uart_api.irq_tx_disable = uart_mux_irq_tx_disable,
723 	.uart_api.irq_tx_ready = uart_mux_irq_tx_ready,
724 	.uart_api.irq_rx_enable = uart_mux_irq_rx_enable,
725 	.uart_api.irq_rx_disable = uart_mux_irq_rx_disable,
726 	.uart_api.irq_tx_complete = uart_mux_irq_tx_complete,
727 	.uart_api.irq_rx_ready = uart_mux_irq_rx_ready,
728 	.uart_api.irq_err_enable = uart_mux_irq_err_enable,
729 	.uart_api.irq_err_disable = uart_mux_irq_err_disable,
730 	.uart_api.irq_is_pending = uart_mux_irq_is_pending,
731 	.uart_api.irq_update = uart_mux_irq_update,
732 	.uart_api.irq_callback_set = uart_mux_irq_callback_set,
733 
734 	.attach = attach,
735 };
736 
uart_mux_alloc(void)737 const struct device *uart_mux_alloc(void)
738 {
739 	sys_snode_t *sn, *sns;
740 
741 	SYS_SLIST_FOR_EACH_NODE_SAFE(&uart_mux_data_devlist, sn, sns) {
742 		struct uart_mux_dev_data *dev_data =
743 			CONTAINER_OF(sn, struct uart_mux_dev_data, node);
744 
745 		if (dev_data->in_use) {
746 			continue;
747 		}
748 
749 		dev_data->in_use = true;
750 
751 		return dev_data->dev;
752 	}
753 
754 	return NULL;
755 }
756 
757 #ifdef CONFIG_USERSPACE
z_vrfy_uart_mux_find(int dlci_address)758 static inline const struct device *z_vrfy_uart_mux_find(int dlci_address)
759 {
760 	return z_impl_uart_mux_find(dlci_address);
761 }
762 #include <syscalls/uart_mux_find_mrsh.c>
763 #endif /* CONFIG_USERSPACE */
764 
z_impl_uart_mux_find(int dlci_address)765 const struct device *z_impl_uart_mux_find(int dlci_address)
766 {
767 	sys_snode_t *sn, *sns;
768 
769 	SYS_SLIST_FOR_EACH_NODE_SAFE(&uart_mux_data_devlist, sn, sns) {
770 		struct uart_mux_dev_data *dev_data =
771 			CONTAINER_OF(sn, struct uart_mux_dev_data, node);
772 
773 		if (!dev_data->in_use) {
774 			continue;
775 		}
776 
777 		if (dev_data->dlci == NULL) {
778 			continue;
779 		}
780 
781 		if (gsm_dlci_id(dev_data->dlci) == dlci_address) {
782 			return dev_data->dev;
783 		}
784 	}
785 
786 	return NULL;
787 }
788 
uart_mux_send(const struct device * uart,const uint8_t * buf,size_t size)789 int uart_mux_send(const struct device *uart, const uint8_t *buf, size_t size)
790 {
791 	struct uart_mux_dev_data *dev_data = uart->data;
792 	size_t remaining = size;
793 
794 	if (size == 0) {
795 		return 0;
796 	}
797 
798 	if (atomic_get(&dev_data->real_uart->init_done) == false) {
799 		return -ENODEV;
800 	}
801 
802 	if (IS_ENABLED(CONFIG_UART_MUX_VERBOSE_DEBUG)) {
803 		char tmp[sizeof("SEND muxed ") + 10];
804 
805 		snprintk(tmp, sizeof(tmp), "SEND muxed %s",
806 			 dev_data->real_uart->uart->name);
807 		LOG_HEXDUMP_DBG(buf, size, tmp);
808 	}
809 
810 	k_mutex_lock(&dev_data->real_uart->lock, K_FOREVER);
811 
812 	do {
813 		uart_poll_out(dev_data->real_uart->uart, *buf++);
814 	} while (--remaining);
815 
816 	k_mutex_unlock(&dev_data->real_uart->lock);
817 
818 	return size;
819 }
820 
uart_mux_recv(const struct device * mux,struct gsm_dlci * dlci,uint8_t * data,size_t len)821 int uart_mux_recv(const struct device *mux, struct gsm_dlci *dlci,
822 		  uint8_t *data,
823 		  size_t len)
824 {
825 	struct uart_mux_dev_data *dev_data = mux->data;
826 	size_t wrote = 0;
827 
828 	LOG_DBG("%s: dlci %p data %p len %zd", mux->name, (void *)dlci,
829 		data, len);
830 
831 	if (IS_ENABLED(CONFIG_UART_MUX_VERBOSE_DEBUG)) {
832 		char tmp[sizeof("RECV _x") +
833 			 sizeof(CONFIG_UART_MUX_DEVICE_NAME)];
834 
835 		snprintk(tmp, sizeof(tmp), "RECV %s",
836 			 dev_data->dev->name);
837 		LOG_HEXDUMP_DBG(data, len, tmp);
838 	}
839 
840 	wrote = ring_buf_put(dev_data->rx_ringbuf, data, len);
841 	if (wrote < len) {
842 		LOG_ERR("Ring buffer full, drop %ld bytes", (long)(len - wrote));
843 	}
844 
845 	dev_data->rx_ready = true;
846 
847 	if (dev_data->cb && dev_data->rx_enabled) {
848 		k_work_submit_to_queue(&uart_mux_workq, &dev_data->cb_work);
849 	}
850 
851 	return wrote;
852 }
853 
uart_mux_foreach(uart_mux_cb_t cb,void * user_data)854 void uart_mux_foreach(uart_mux_cb_t cb, void *user_data)
855 {
856 	sys_snode_t *sn, *sns;
857 
858 	SYS_SLIST_FOR_EACH_NODE_SAFE(&uart_mux_data_devlist, sn, sns) {
859 		struct uart_mux_dev_data *dev_data =
860 			CONTAINER_OF(sn, struct uart_mux_dev_data, node);
861 
862 		if (!dev_data->in_use) {
863 			continue;
864 		}
865 
866 		cb(dev_data->real_uart->uart, dev_data->dev,
867 		   dev_data->dlci ? gsm_dlci_id(dev_data->dlci) : -1,
868 		   user_data);
869 	}
870 }
871 
872 #define DEFINE_UART_MUX_CFG_DATA(x, _)					  \
873 	struct uart_mux_cfg_data uart_mux_config_##x = {		  \
874 	}
875 
876 #define DEFINE_UART_MUX_DEV_DATA(x, _)					  \
877 	RING_BUF_DECLARE(tx_ringbuf_##x, CONFIG_UART_MUX_RINGBUF_SIZE);	  \
878 	RING_BUF_DECLARE(rx_ringbuf_##x, CONFIG_UART_MUX_RINGBUF_SIZE);	  \
879 	static struct uart_mux_dev_data uart_mux_dev_data_##x = {	  \
880 		.tx_ringbuf = &tx_ringbuf_##x,				  \
881 		.rx_ringbuf = &rx_ringbuf_##x,				  \
882 	}
883 
884 #define DEFINE_UART_MUX_DEVICE(x, _)					  \
885 	DEVICE_DEFINE(uart_mux_##x,					  \
886 			    CONFIG_UART_MUX_DEVICE_NAME "_" #x,		  \
887 			    &uart_mux_init,				  \
888 			    NULL,					  \
889 			    &uart_mux_dev_data_##x,			  \
890 			    &uart_mux_config_##x,			  \
891 			    POST_KERNEL,				  \
892 			    CONFIG_CONSOLE_INIT_PRIORITY,		  \
893 			    &uart_mux_driver_api)
894 
895 LISTIFY(CONFIG_UART_MUX_DEVICE_COUNT, DEFINE_UART_MUX_CFG_DATA, (;),  _);
896 LISTIFY(CONFIG_UART_MUX_DEVICE_COUNT, DEFINE_UART_MUX_DEV_DATA, (;), _);
897 LISTIFY(CONFIG_UART_MUX_DEVICE_COUNT, DEFINE_UART_MUX_DEVICE, (;), _);
898 
init_uart_mux(void)899 static int init_uart_mux(void)
900 {
901 
902 	k_work_queue_start(&uart_mux_workq, uart_mux_stack,
903 			   K_KERNEL_STACK_SIZEOF(uart_mux_stack),
904 			   K_PRIO_COOP(UART_MUX_WORKQ_PRIORITY), NULL);
905 	k_thread_name_set(&uart_mux_workq.thread, "uart_mux_workq");
906 
907 	return 0;
908 }
909 
910 SYS_INIT(init_uart_mux, POST_KERNEL, CONFIG_CONSOLE_INIT_PRIORITY);
911