1 /*
2  * Copyright (c) 2018 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <shell/shell_uart.h>
8 #include <drivers/uart.h>
9 #include <init.h>
10 #include <logging/log.h>
11 #include <net/buf.h>
12 
13 #define LOG_MODULE_NAME shell_uart
14 LOG_MODULE_REGISTER(shell_uart);
15 
16 #ifdef CONFIG_SHELL_BACKEND_SERIAL_RX_POLL_PERIOD
17 #define RX_POLL_PERIOD K_MSEC(CONFIG_SHELL_BACKEND_SERIAL_RX_POLL_PERIOD)
18 #else
19 #define RX_POLL_PERIOD K_NO_WAIT
20 #endif
21 
22 #ifdef CONFIG_MCUMGR_SMP_SHELL
23 NET_BUF_POOL_DEFINE(smp_shell_rx_pool, CONFIG_MCUMGR_SMP_SHELL_RX_BUF_COUNT,
24 		    SMP_SHELL_RX_BUF_SIZE, 0, NULL);
25 #endif /* CONFIG_MCUMGR_SMP_SHELL */
26 
27 SHELL_UART_DEFINE(shell_transport_uart,
28 		  CONFIG_SHELL_BACKEND_SERIAL_TX_RING_BUFFER_SIZE,
29 		  CONFIG_SHELL_BACKEND_SERIAL_RX_RING_BUFFER_SIZE);
30 SHELL_DEFINE(shell_uart, CONFIG_SHELL_PROMPT_UART, &shell_transport_uart,
31 	     CONFIG_SHELL_BACKEND_SERIAL_LOG_MESSAGE_QUEUE_SIZE,
32 	     CONFIG_SHELL_BACKEND_SERIAL_LOG_MESSAGE_QUEUE_TIMEOUT,
33 	     SHELL_FLAG_OLF_CRLF);
34 
35 #ifdef CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
uart_rx_handle(const struct device * dev,const struct shell_uart * sh_uart)36 static void uart_rx_handle(const struct device *dev,
37 			   const struct shell_uart *sh_uart)
38 {
39 	uint8_t *data;
40 	uint32_t len;
41 	uint32_t rd_len;
42 	bool new_data = false;
43 #ifdef CONFIG_MCUMGR_SMP_SHELL
44 	struct smp_shell_data *const smp = &sh_uart->ctrl_blk->smp;
45 #endif
46 
47 	do {
48 		len = ring_buf_put_claim(sh_uart->rx_ringbuf, &data,
49 					 sh_uart->rx_ringbuf->size);
50 
51 		if (len > 0) {
52 			rd_len = uart_fifo_read(dev, data, len);
53 
54 			/* If there is any new data to be either taken into
55 			 * ring buffer or consumed by the SMP, signal the
56 			 * shell_thread.
57 			 */
58 			if (rd_len > 0) {
59 				new_data = true;
60 			}
61 #ifdef CONFIG_MCUMGR_SMP_SHELL
62 			/* Divert bytes from shell handling if it is
63 			 * part of an mcumgr frame.
64 			 */
65 			size_t i = smp_shell_rx_bytes(smp, data, rd_len);
66 
67 			rd_len -= i;
68 
69 			if (rd_len) {
70 				for (uint32_t j = 0; j < rd_len; j++) {
71 					data[j] = data[i + j];
72 				}
73 			}
74 #endif /* CONFIG_MCUMGR_SMP_SHELL */
75 			int err = ring_buf_put_finish(sh_uart->rx_ringbuf,
76 						      rd_len);
77 			(void)err;
78 			__ASSERT_NO_MSG(err == 0);
79 		} else {
80 			uint8_t dummy;
81 
82 			/* No space in the ring buffer - consume byte. */
83 			LOG_WRN("RX ring buffer full.");
84 
85 			rd_len = uart_fifo_read(dev, &dummy, 1);
86 #ifdef CONFIG_MCUMGR_SMP_SHELL
87 			/* If successful in getting byte from the fifo, try
88 			 * feeding it to SMP as a part of mcumgr frame.
89 			 */
90 			if ((rd_len != 0) &&
91 			    (smp_shell_rx_bytes(smp, &dummy, 1) == 1)) {
92 				new_data = true;
93 			}
94 #endif /* CONFIG_MCUMGR_SMP_SHELL */
95 		}
96 	} while (rd_len && (rd_len == len));
97 
98 	if (new_data) {
99 		sh_uart->ctrl_blk->handler(SHELL_TRANSPORT_EVT_RX_RDY,
100 					   sh_uart->ctrl_blk->context);
101 	}
102 }
103 
uart_dtr_wait(const struct device * dev)104 static void uart_dtr_wait(const struct device *dev)
105 {
106 	if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_CHECK_DTR)) {
107 		int dtr, err;
108 
109 		while (true) {
110 			err = uart_line_ctrl_get(dev, UART_LINE_CTRL_DTR, &dtr);
111 			if (err == -ENOSYS || err == -ENOTSUP) {
112 				break;
113 			}
114 			if (dtr) {
115 				break;
116 			}
117 			/* Give CPU resources to low priority threads. */
118 			k_sleep(K_MSEC(100));
119 		}
120 	}
121 }
122 
uart_tx_handle(const struct device * dev,const struct shell_uart * sh_uart)123 static void uart_tx_handle(const struct device *dev,
124 			   const struct shell_uart *sh_uart)
125 {
126 	uint32_t len;
127 	int err;
128 	const uint8_t *data;
129 
130 	len = ring_buf_get_claim(sh_uart->tx_ringbuf, (uint8_t **)&data,
131 				 sh_uart->tx_ringbuf->size);
132 	if (len) {
133 		/* Wait for DTR signal before sending anything to output. */
134 		uart_dtr_wait(dev);
135 		len = uart_fifo_fill(dev, data, len);
136 		err = ring_buf_get_finish(sh_uart->tx_ringbuf, len);
137 		__ASSERT_NO_MSG(err == 0);
138 	} else {
139 		uart_irq_tx_disable(dev);
140 		sh_uart->ctrl_blk->tx_busy = 0;
141 	}
142 
143 	sh_uart->ctrl_blk->handler(SHELL_TRANSPORT_EVT_TX_RDY,
144 				   sh_uart->ctrl_blk->context);
145 }
146 
uart_callback(const struct device * dev,void * user_data)147 static void uart_callback(const struct device *dev, void *user_data)
148 {
149 	const struct shell_uart *sh_uart = (struct shell_uart *)user_data;
150 
151 	uart_irq_update(dev);
152 
153 	if (uart_irq_rx_ready(dev)) {
154 		uart_rx_handle(dev, sh_uart);
155 	}
156 
157 	if (uart_irq_tx_ready(dev)) {
158 		uart_tx_handle(dev, sh_uart);
159 	}
160 }
161 #endif /* CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN */
162 
uart_irq_init(const struct shell_uart * sh_uart)163 static void uart_irq_init(const struct shell_uart *sh_uart)
164 {
165 #ifdef CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
166 	const struct device *dev = sh_uart->ctrl_blk->dev;
167 
168 	ring_buf_reset(sh_uart->tx_ringbuf);
169 	ring_buf_reset(sh_uart->rx_ringbuf);
170 	sh_uart->ctrl_blk->tx_busy = 0;
171 	uart_irq_callback_user_data_set(dev, uart_callback, (void *)sh_uart);
172 	uart_irq_rx_enable(dev);
173 #endif
174 }
175 
timer_handler(struct k_timer * timer)176 static void timer_handler(struct k_timer *timer)
177 {
178 	uint8_t c;
179 	const struct shell_uart *sh_uart = k_timer_user_data_get(timer);
180 
181 	while (uart_poll_in(sh_uart->ctrl_blk->dev, &c) == 0) {
182 		if (ring_buf_put(sh_uart->rx_ringbuf, &c, 1) == 0U) {
183 			/* ring buffer full. */
184 			LOG_WRN("RX ring buffer full.");
185 		}
186 		sh_uart->ctrl_blk->handler(SHELL_TRANSPORT_EVT_RX_RDY,
187 					   sh_uart->ctrl_blk->context);
188 	}
189 }
190 
init(const struct shell_transport * transport,const void * config,shell_transport_handler_t evt_handler,void * context)191 static int init(const struct shell_transport *transport,
192 		const void *config,
193 		shell_transport_handler_t evt_handler,
194 		void *context)
195 {
196 	const struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
197 
198 	sh_uart->ctrl_blk->dev = (const struct device *)config;
199 	sh_uart->ctrl_blk->handler = evt_handler;
200 	sh_uart->ctrl_blk->context = context;
201 
202 #ifdef CONFIG_MCUMGR_SMP_SHELL
203 	sh_uart->ctrl_blk->smp.buf_pool = &smp_shell_rx_pool;
204 	k_fifo_init(&sh_uart->ctrl_blk->smp.buf_ready);
205 #endif
206 
207 	if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN)) {
208 		uart_irq_init(sh_uart);
209 	} else {
210 		k_timer_init(sh_uart->timer, timer_handler, NULL);
211 		k_timer_user_data_set(sh_uart->timer, (void *)sh_uart);
212 		k_timer_start(sh_uart->timer, RX_POLL_PERIOD, RX_POLL_PERIOD);
213 	}
214 
215 	return 0;
216 }
217 
uninit(const struct shell_transport * transport)218 static int uninit(const struct shell_transport *transport)
219 {
220 	const struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
221 
222 	if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN)) {
223 		const struct device *dev = sh_uart->ctrl_blk->dev;
224 
225 		uart_irq_tx_disable(dev);
226 		uart_irq_rx_disable(dev);
227 	} else {
228 		k_timer_stop(sh_uart->timer);
229 	}
230 
231 	return 0;
232 }
233 
enable(const struct shell_transport * transport,bool blocking_tx)234 static int enable(const struct shell_transport *transport, bool blocking_tx)
235 {
236 	const struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
237 
238 	sh_uart->ctrl_blk->blocking_tx = blocking_tx;
239 
240 	if (blocking_tx) {
241 #ifdef CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
242 		uart_irq_tx_disable(sh_uart->ctrl_blk->dev);
243 #endif
244 	}
245 
246 	return 0;
247 }
248 
irq_write(const struct shell_uart * sh_uart,const void * data,size_t length,size_t * cnt)249 static void irq_write(const struct shell_uart *sh_uart, const void *data,
250 		     size_t length, size_t *cnt)
251 {
252 	*cnt = ring_buf_put(sh_uart->tx_ringbuf, data, length);
253 
254 	if (atomic_set(&sh_uart->ctrl_blk->tx_busy, 1) == 0) {
255 #ifdef CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
256 		uart_irq_tx_enable(sh_uart->ctrl_blk->dev);
257 #endif
258 	}
259 }
260 
write(const struct shell_transport * transport,const void * data,size_t length,size_t * cnt)261 static int write(const struct shell_transport *transport,
262 		 const void *data, size_t length, size_t *cnt)
263 {
264 	const struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
265 	const uint8_t *data8 = (const uint8_t *)data;
266 
267 	if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN) &&
268 		!sh_uart->ctrl_blk->blocking_tx) {
269 		irq_write(sh_uart, data, length, cnt);
270 	} else {
271 		for (size_t i = 0; i < length; i++) {
272 			uart_poll_out(sh_uart->ctrl_blk->dev, data8[i]);
273 		}
274 
275 		*cnt = length;
276 
277 		sh_uart->ctrl_blk->handler(SHELL_TRANSPORT_EVT_TX_RDY,
278 					   sh_uart->ctrl_blk->context);
279 	}
280 
281 	return 0;
282 }
283 
read(const struct shell_transport * transport,void * data,size_t length,size_t * cnt)284 static int read(const struct shell_transport *transport,
285 		void *data, size_t length, size_t *cnt)
286 {
287 	struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
288 
289 	*cnt = ring_buf_get(sh_uart->rx_ringbuf, data, length);
290 
291 	return 0;
292 }
293 
294 #ifdef CONFIG_MCUMGR_SMP_SHELL
update(const struct shell_transport * transport)295 static void update(const struct shell_transport *transport)
296 {
297 	struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
298 
299 	smp_shell_process(&sh_uart->ctrl_blk->smp);
300 }
301 #endif /* CONFIG_MCUMGR_SMP_SHELL */
302 
303 const struct shell_transport_api shell_uart_transport_api = {
304 	.init = init,
305 	.uninit = uninit,
306 	.enable = enable,
307 	.write = write,
308 	.read = read,
309 #ifdef CONFIG_MCUMGR_SMP_SHELL
310 	.update = update,
311 #endif /* CONFIG_MCUMGR_SMP_SHELL */
312 };
313 
enable_shell_uart(const struct device * arg)314 static int enable_shell_uart(const struct device *arg)
315 {
316 	ARG_UNUSED(arg);
317 	const struct device *dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_shell_uart));
318 	bool log_backend = CONFIG_SHELL_BACKEND_SERIAL_LOG_LEVEL > 0;
319 	uint32_t level =
320 		(CONFIG_SHELL_BACKEND_SERIAL_LOG_LEVEL > LOG_LEVEL_DBG) ?
321 		CONFIG_LOG_MAX_LEVEL : CONFIG_SHELL_BACKEND_SERIAL_LOG_LEVEL;
322 
323 	if (!device_is_ready(dev)) {
324 		return -ENODEV;
325 	}
326 
327 	if (IS_ENABLED(CONFIG_MCUMGR_SMP_SHELL)) {
328 		smp_shell_init();
329 	}
330 
331 	shell_init(&shell_uart, dev, true, log_backend, level);
332 
333 	return 0;
334 }
335 SYS_INIT(enable_shell_uart, POST_KERNEL,
336 	 CONFIG_SHELL_BACKEND_SERIAL_INIT_PRIORITY);
337 
shell_backend_uart_get_ptr(void)338 const struct shell *shell_backend_uart_get_ptr(void)
339 {
340 	return &shell_uart;
341 }
342