1 /*
2 * Copyright (c) 2018 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/shell/shell_uart.h>
8 #include <zephyr/drivers/uart.h>
9 #include <zephyr/init.h>
10 #include <zephyr/logging/log.h>
11 #include <zephyr/net/buf.h>
12
13 #define LOG_MODULE_NAME shell_uart
14 LOG_MODULE_REGISTER(shell_uart);
15
16 #ifdef CONFIG_SHELL_BACKEND_SERIAL_RX_POLL_PERIOD
17 #define RX_POLL_PERIOD K_MSEC(CONFIG_SHELL_BACKEND_SERIAL_RX_POLL_PERIOD)
18 #else
19 #define RX_POLL_PERIOD K_NO_WAIT
20 #endif
21
22 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
23 NET_BUF_POOL_DEFINE(smp_shell_rx_pool, CONFIG_MCUMGR_TRANSPORT_SHELL_RX_BUF_COUNT,
24 SMP_SHELL_RX_BUF_SIZE, 0, NULL);
25 #endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
26
27 SHELL_UART_DEFINE(shell_transport_uart,
28 CONFIG_SHELL_BACKEND_SERIAL_TX_RING_BUFFER_SIZE,
29 CONFIG_SHELL_BACKEND_SERIAL_RX_RING_BUFFER_SIZE);
30 SHELL_DEFINE(shell_uart, CONFIG_SHELL_PROMPT_UART, &shell_transport_uart,
31 CONFIG_SHELL_BACKEND_SERIAL_LOG_MESSAGE_QUEUE_SIZE,
32 CONFIG_SHELL_BACKEND_SERIAL_LOG_MESSAGE_QUEUE_TIMEOUT,
33 SHELL_FLAG_OLF_CRLF);
34
35 #ifdef CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
uart_rx_handle(const struct device * dev,const struct shell_uart * sh_uart)36 static void uart_rx_handle(const struct device *dev,
37 const struct shell_uart *sh_uart)
38 {
39 uint8_t *data;
40 uint32_t len;
41 uint32_t rd_len;
42 bool new_data = false;
43 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
44 struct smp_shell_data *const smp = &sh_uart->ctrl_blk->smp;
45 #endif
46
47 do {
48 len = ring_buf_put_claim(sh_uart->rx_ringbuf, &data,
49 sh_uart->rx_ringbuf->size);
50
51 if (len > 0) {
52 rd_len = uart_fifo_read(dev, data, len);
53
54 /* If there is any new data to be either taken into
55 * ring buffer or consumed by the SMP, signal the
56 * shell_thread.
57 */
58 if (rd_len > 0) {
59 new_data = true;
60 }
61 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
62 /* Divert bytes from shell handling if it is
63 * part of an mcumgr frame.
64 */
65 size_t i = smp_shell_rx_bytes(smp, data, rd_len);
66
67 rd_len -= i;
68
69 if (rd_len) {
70 for (uint32_t j = 0; j < rd_len; j++) {
71 data[j] = data[i + j];
72 }
73 }
74 #endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
75 int err = ring_buf_put_finish(sh_uart->rx_ringbuf,
76 rd_len);
77 (void)err;
78 __ASSERT_NO_MSG(err == 0);
79 } else {
80 uint8_t dummy;
81
82 /* No space in the ring buffer - consume byte. */
83 LOG_WRN("RX ring buffer full.");
84
85 rd_len = uart_fifo_read(dev, &dummy, 1);
86 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
87 /* If successful in getting byte from the fifo, try
88 * feeding it to SMP as a part of mcumgr frame.
89 */
90 if ((rd_len != 0) &&
91 (smp_shell_rx_bytes(smp, &dummy, 1) == 1)) {
92 new_data = true;
93 }
94 #endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
95 }
96 } while (rd_len && (rd_len == len));
97
98 if (new_data) {
99 sh_uart->ctrl_blk->handler(SHELL_TRANSPORT_EVT_RX_RDY,
100 sh_uart->ctrl_blk->context);
101 }
102 }
103
uart_dtr_check(const struct device * dev)104 static bool uart_dtr_check(const struct device *dev)
105 {
106 BUILD_ASSERT(!IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_CHECK_DTR) ||
107 IS_ENABLED(CONFIG_UART_LINE_CTRL),
108 "DTR check requires CONFIG_UART_LINE_CTRL");
109
110 if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_CHECK_DTR)) {
111 int dtr, err;
112
113 err = uart_line_ctrl_get(dev, UART_LINE_CTRL_DTR, &dtr);
114 if (err == -ENOSYS || err == -ENOTSUP) {
115 return true;
116 }
117
118 return dtr;
119 }
120
121 return true;
122 }
123
dtr_timer_handler(struct k_timer * timer)124 static void dtr_timer_handler(struct k_timer *timer)
125 {
126 const struct shell_uart *sh_uart = k_timer_user_data_get(timer);
127
128 if (!uart_dtr_check(sh_uart->ctrl_blk->dev)) {
129 return;
130 }
131
132 /* DTR is active, stop timer and start TX */
133 k_timer_stop(timer);
134 uart_irq_tx_enable(sh_uart->ctrl_blk->dev);
135 }
136
uart_tx_handle(const struct device * dev,const struct shell_uart * sh_uart)137 static void uart_tx_handle(const struct device *dev,
138 const struct shell_uart *sh_uart)
139 {
140 uint32_t len;
141 const uint8_t *data;
142
143 if (!uart_dtr_check(dev)) {
144 /* Wait for DTR signal before sending anything to output. */
145 uart_irq_tx_disable(dev);
146 k_timer_start(sh_uart->dtr_timer, K_MSEC(100), K_MSEC(100));
147 return;
148 }
149
150 len = ring_buf_get_claim(sh_uart->tx_ringbuf, (uint8_t **)&data,
151 sh_uart->tx_ringbuf->size);
152 if (len) {
153 int err;
154
155 len = uart_fifo_fill(dev, data, len);
156 err = ring_buf_get_finish(sh_uart->tx_ringbuf, len);
157 __ASSERT_NO_MSG(err == 0);
158 ARG_UNUSED(err);
159 } else {
160 uart_irq_tx_disable(dev);
161 sh_uart->ctrl_blk->tx_busy = 0;
162 }
163
164 sh_uart->ctrl_blk->handler(SHELL_TRANSPORT_EVT_TX_RDY,
165 sh_uart->ctrl_blk->context);
166 }
167
uart_callback(const struct device * dev,void * user_data)168 static void uart_callback(const struct device *dev, void *user_data)
169 {
170 const struct shell_uart *sh_uart = (struct shell_uart *)user_data;
171
172 uart_irq_update(dev);
173
174 if (uart_irq_rx_ready(dev)) {
175 uart_rx_handle(dev, sh_uart);
176 }
177
178 if (uart_irq_tx_ready(dev)) {
179 uart_tx_handle(dev, sh_uart);
180 }
181 }
182 #endif /* CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN */
183
uart_irq_init(const struct shell_uart * sh_uart)184 static void uart_irq_init(const struct shell_uart *sh_uart)
185 {
186 #ifdef CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
187 const struct device *dev = sh_uart->ctrl_blk->dev;
188
189 ring_buf_reset(sh_uart->tx_ringbuf);
190 ring_buf_reset(sh_uart->rx_ringbuf);
191 sh_uart->ctrl_blk->tx_busy = 0;
192 uart_irq_callback_user_data_set(dev, uart_callback, (void *)sh_uart);
193 uart_irq_rx_enable(dev);
194
195 if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_CHECK_DTR)) {
196 k_timer_init(sh_uart->dtr_timer, dtr_timer_handler, NULL);
197 k_timer_user_data_set(sh_uart->dtr_timer, (void *)sh_uart);
198 }
199 #endif
200 }
201
timer_handler(struct k_timer * timer)202 static void timer_handler(struct k_timer *timer)
203 {
204 uint8_t c;
205 const struct shell_uart *sh_uart = k_timer_user_data_get(timer);
206
207 while (uart_poll_in(sh_uart->ctrl_blk->dev, &c) == 0) {
208 if (ring_buf_put(sh_uart->rx_ringbuf, &c, 1) == 0U) {
209 /* ring buffer full. */
210 LOG_WRN("RX ring buffer full.");
211 }
212 sh_uart->ctrl_blk->handler(SHELL_TRANSPORT_EVT_RX_RDY,
213 sh_uart->ctrl_blk->context);
214 }
215 }
216
init(const struct shell_transport * transport,const void * config,shell_transport_handler_t evt_handler,void * context)217 static int init(const struct shell_transport *transport,
218 const void *config,
219 shell_transport_handler_t evt_handler,
220 void *context)
221 {
222 const struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
223
224 sh_uart->ctrl_blk->dev = (const struct device *)config;
225 sh_uart->ctrl_blk->handler = evt_handler;
226 sh_uart->ctrl_blk->context = context;
227
228 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
229 sh_uart->ctrl_blk->smp.buf_pool = &smp_shell_rx_pool;
230 k_fifo_init(&sh_uart->ctrl_blk->smp.buf_ready);
231 #endif
232
233 if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN)) {
234 uart_irq_init(sh_uart);
235 } else {
236 k_timer_init(sh_uart->timer, timer_handler, NULL);
237 k_timer_user_data_set(sh_uart->timer, (void *)sh_uart);
238 k_timer_start(sh_uart->timer, RX_POLL_PERIOD, RX_POLL_PERIOD);
239 }
240
241 return 0;
242 }
243
uninit(const struct shell_transport * transport)244 static int uninit(const struct shell_transport *transport)
245 {
246 const struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
247
248 if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN)) {
249 const struct device *dev = sh_uart->ctrl_blk->dev;
250
251 k_timer_stop(sh_uart->dtr_timer);
252 uart_irq_tx_disable(dev);
253 uart_irq_rx_disable(dev);
254 } else {
255 k_timer_stop(sh_uart->timer);
256 }
257
258 return 0;
259 }
260
enable(const struct shell_transport * transport,bool blocking_tx)261 static int enable(const struct shell_transport *transport, bool blocking_tx)
262 {
263 const struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
264
265 sh_uart->ctrl_blk->blocking_tx = blocking_tx;
266
267 if (blocking_tx) {
268 #ifdef CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
269 uart_irq_tx_disable(sh_uart->ctrl_blk->dev);
270 #endif
271 }
272
273 return 0;
274 }
275
irq_write(const struct shell_uart * sh_uart,const void * data,size_t length,size_t * cnt)276 static void irq_write(const struct shell_uart *sh_uart, const void *data,
277 size_t length, size_t *cnt)
278 {
279 *cnt = ring_buf_put(sh_uart->tx_ringbuf, data, length);
280
281 if (atomic_set(&sh_uart->ctrl_blk->tx_busy, 1) == 0) {
282 #ifdef CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN
283 uart_irq_tx_enable(sh_uart->ctrl_blk->dev);
284 #endif
285 }
286 }
287
write(const struct shell_transport * transport,const void * data,size_t length,size_t * cnt)288 static int write(const struct shell_transport *transport,
289 const void *data, size_t length, size_t *cnt)
290 {
291 const struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
292 const uint8_t *data8 = (const uint8_t *)data;
293
294 if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_INTERRUPT_DRIVEN) &&
295 !sh_uart->ctrl_blk->blocking_tx) {
296 irq_write(sh_uart, data, length, cnt);
297 } else {
298 for (size_t i = 0; i < length; i++) {
299 uart_poll_out(sh_uart->ctrl_blk->dev, data8[i]);
300 }
301
302 *cnt = length;
303
304 sh_uart->ctrl_blk->handler(SHELL_TRANSPORT_EVT_TX_RDY,
305 sh_uart->ctrl_blk->context);
306 }
307
308 return 0;
309 }
310
read(const struct shell_transport * transport,void * data,size_t length,size_t * cnt)311 static int read(const struct shell_transport *transport,
312 void *data, size_t length, size_t *cnt)
313 {
314 struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
315
316 *cnt = ring_buf_get(sh_uart->rx_ringbuf, data, length);
317
318 return 0;
319 }
320
321 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
update(const struct shell_transport * transport)322 static void update(const struct shell_transport *transport)
323 {
324 struct shell_uart *sh_uart = (struct shell_uart *)transport->ctx;
325
326 smp_shell_process(&sh_uart->ctrl_blk->smp);
327 }
328 #endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
329
330 const struct shell_transport_api shell_uart_transport_api = {
331 .init = init,
332 .uninit = uninit,
333 .enable = enable,
334 .write = write,
335 .read = read,
336 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
337 .update = update,
338 #endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
339 };
340
enable_shell_uart(void)341 static int enable_shell_uart(void)
342 {
343 const struct device *const dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_shell_uart));
344 bool log_backend = CONFIG_SHELL_BACKEND_SERIAL_LOG_LEVEL > 0;
345 uint32_t level =
346 (CONFIG_SHELL_BACKEND_SERIAL_LOG_LEVEL > LOG_LEVEL_DBG) ?
347 CONFIG_LOG_MAX_LEVEL : CONFIG_SHELL_BACKEND_SERIAL_LOG_LEVEL;
348 static const struct shell_backend_config_flags cfg_flags =
349 SHELL_DEFAULT_BACKEND_CONFIG_FLAGS;
350
351 if (!device_is_ready(dev)) {
352 return -ENODEV;
353 }
354
355 if (IS_ENABLED(CONFIG_MCUMGR_TRANSPORT_SHELL)) {
356 smp_shell_init();
357 }
358
359 shell_init(&shell_uart, dev, cfg_flags, log_backend, level);
360
361 return 0;
362 }
363
364 SYS_INIT(enable_shell_uart, POST_KERNEL,
365 CONFIG_SHELL_BACKEND_SERIAL_INIT_PRIORITY);
366
shell_backend_uart_get_ptr(void)367 const struct shell *shell_backend_uart_get_ptr(void)
368 {
369 return &shell_uart;
370 }
371