1 /*
2  * Copyright (c) 2018 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/sys/ring_buffer.h>
8 #include <zephyr/sys/atomic.h>
9 #include <zephyr/mgmt/mcumgr/transport/smp_shell.h>
10 #include <zephyr/shell/shell_uart.h>
11 #include <zephyr/drivers/uart.h>
12 #include <zephyr/drivers/serial/uart_async_rx.h>
13 #include <zephyr/init.h>
14 #include <zephyr/logging/log.h>
15 #include <zephyr/net/buf.h>
16 
17 #define LOG_MODULE_NAME shell_uart
18 LOG_MODULE_REGISTER(shell_uart);
19 
20 #ifdef CONFIG_SHELL_BACKEND_SERIAL_RX_POLL_PERIOD
21 #define RX_POLL_PERIOD K_MSEC(CONFIG_SHELL_BACKEND_SERIAL_RX_POLL_PERIOD)
22 #else
23 #define RX_POLL_PERIOD K_NO_WAIT
24 #endif
25 
26 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
27 NET_BUF_POOL_DEFINE(smp_shell_rx_pool, CONFIG_MCUMGR_TRANSPORT_SHELL_RX_BUF_COUNT,
28 		    SMP_SHELL_RX_BUF_SIZE, 0, NULL);
29 #endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
30 
async_callback(const struct device * dev,struct uart_event * evt,void * user_data)31 static void async_callback(const struct device *dev, struct uart_event *evt, void *user_data)
32 {
33 	struct shell_uart_async *sh_uart = (struct shell_uart_async *)user_data;
34 
35 	switch (evt->type) {
36 	case  UART_TX_DONE:
37 		k_sem_give(&sh_uart->tx_sem);
38 		break;
39 	case  UART_RX_RDY:
40 		uart_async_rx_on_rdy(&sh_uart->async_rx, evt->data.rx.buf, evt->data.rx.len);
41 		sh_uart->common.handler(SHELL_TRANSPORT_EVT_RX_RDY, sh_uart->common.context);
42 		break;
43 	case  UART_RX_BUF_REQUEST:
44 	{
45 		uint8_t *buf = uart_async_rx_buf_req(&sh_uart->async_rx);
46 		size_t len = uart_async_rx_get_buf_len(&sh_uart->async_rx);
47 
48 		if (buf) {
49 			int err = uart_rx_buf_rsp(dev, buf, len);
50 
51 			if (err < 0) {
52 				uart_async_rx_on_buf_rel(&sh_uart->async_rx, buf);
53 			}
54 		} else {
55 			atomic_inc(&sh_uart->pending_rx_req);
56 		}
57 
58 		break;
59 	}
60 	case  UART_RX_BUF_RELEASED:
61 		uart_async_rx_on_buf_rel(&sh_uart->async_rx, evt->data.rx_buf.buf);
62 		break;
63 	case  UART_RX_DISABLED:
64 		break;
65 	default:
66 		break;
67 	};
68 }
69 
uart_rx_handle(const struct device * dev,struct shell_uart_int_driven * sh_uart)70 static void uart_rx_handle(const struct device *dev, struct shell_uart_int_driven *sh_uart)
71 {
72 	uint8_t *data;
73 	uint32_t len;
74 	uint32_t rd_len;
75 	bool new_data = false;
76 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
77 	struct smp_shell_data *const smp = &sh_uart->common.smp;
78 #endif
79 
80 	do {
81 		len = ring_buf_put_claim(&sh_uart->rx_ringbuf, &data,
82 					 sh_uart->rx_ringbuf.size);
83 
84 		if (len > 0) {
85 			rd_len = uart_fifo_read(dev, data, len);
86 
87 			/* If there is any new data to be either taken into
88 			 * ring buffer or consumed by the SMP, signal the
89 			 * shell_thread.
90 			 */
91 			if (rd_len > 0) {
92 				new_data = true;
93 			}
94 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
95 			/* Divert bytes from shell handling if it is
96 			 * part of an mcumgr frame.
97 			 */
98 			size_t i = smp_shell_rx_bytes(smp, data, rd_len);
99 
100 			rd_len -= i;
101 
102 			if (rd_len) {
103 				for (uint32_t j = 0; j < rd_len; j++) {
104 					data[j] = data[i + j];
105 				}
106 			}
107 #endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
108 			int err = ring_buf_put_finish(&sh_uart->rx_ringbuf, rd_len);
109 			(void)err;
110 			__ASSERT_NO_MSG(err == 0);
111 		} else {
112 			uint8_t dummy;
113 
114 			/* No space in the ring buffer - consume byte. */
115 			LOG_WRN("RX ring buffer full.");
116 
117 			rd_len = uart_fifo_read(dev, &dummy, 1);
118 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
119 			/* If successful in getting byte from the fifo, try
120 			 * feeding it to SMP as a part of mcumgr frame.
121 			 */
122 			if ((rd_len != 0) && (smp_shell_rx_bytes(smp, &dummy, 1) == 1)) {
123 				new_data = true;
124 			}
125 #endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
126 		}
127 	} while (rd_len && (rd_len == len));
128 
129 	if (new_data) {
130 		sh_uart->common.handler(SHELL_TRANSPORT_EVT_RX_RDY, sh_uart->common.context);
131 	}
132 }
133 
uart_dtr_check(const struct device * dev)134 static bool uart_dtr_check(const struct device *dev)
135 {
136 	BUILD_ASSERT(!IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_CHECK_DTR) ||
137 		IS_ENABLED(CONFIG_UART_LINE_CTRL),
138 		"DTR check requires CONFIG_UART_LINE_CTRL");
139 
140 	if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_CHECK_DTR)) {
141 		int dtr, err;
142 
143 		err = uart_line_ctrl_get(dev, UART_LINE_CTRL_DTR, &dtr);
144 		if (err == -ENOSYS || err == -ENOTSUP) {
145 			return true;
146 		}
147 
148 		return dtr;
149 	}
150 
151 	return true;
152 }
153 
dtr_timer_handler(struct k_timer * timer)154 static void dtr_timer_handler(struct k_timer *timer)
155 {
156 	struct shell_uart_int_driven *sh_uart = k_timer_user_data_get(timer);
157 
158 	if (!uart_dtr_check(sh_uart->common.dev)) {
159 		return;
160 	}
161 
162 	/* DTR is active, stop timer and start TX */
163 	k_timer_stop(timer);
164 	uart_irq_tx_enable(sh_uart->common.dev);
165 }
166 
uart_tx_handle(const struct device * dev,struct shell_uart_int_driven * sh_uart)167 static void uart_tx_handle(const struct device *dev, struct shell_uart_int_driven *sh_uart)
168 {
169 	uint32_t len;
170 	const uint8_t *data;
171 
172 	if (!uart_dtr_check(dev)) {
173 		/* Wait for DTR signal before sending anything to output. */
174 		uart_irq_tx_disable(dev);
175 		k_timer_start(&sh_uart->dtr_timer, K_MSEC(100), K_MSEC(100));
176 		return;
177 	}
178 
179 	len = ring_buf_get_claim(&sh_uart->tx_ringbuf, (uint8_t **)&data,
180 				 sh_uart->tx_ringbuf.size);
181 	if (len) {
182 		int err;
183 
184 		len = uart_fifo_fill(dev, data, len);
185 		err = ring_buf_get_finish(&sh_uart->tx_ringbuf, len);
186 		__ASSERT_NO_MSG(err == 0);
187 		ARG_UNUSED(err);
188 	} else {
189 		uart_irq_tx_disable(dev);
190 		sh_uart->tx_busy = 0;
191 	}
192 
193 	sh_uart->common.handler(SHELL_TRANSPORT_EVT_TX_RDY, sh_uart->common.context);
194 }
195 
uart_callback(const struct device * dev,void * user_data)196 static void uart_callback(const struct device *dev, void *user_data)
197 {
198 	struct shell_uart_int_driven *sh_uart = (struct shell_uart_int_driven *)user_data;
199 
200 	uart_irq_update(dev);
201 
202 	if (uart_irq_rx_ready(dev)) {
203 		uart_rx_handle(dev, sh_uart);
204 	}
205 
206 	if (uart_irq_tx_ready(dev)) {
207 		uart_tx_handle(dev, sh_uart);
208 	}
209 }
210 
irq_init(struct shell_uart_int_driven * sh_uart)211 static void irq_init(struct shell_uart_int_driven *sh_uart)
212 {
213 	const struct device *dev = sh_uart->common.dev;
214 
215 	ring_buf_init(&sh_uart->rx_ringbuf, CONFIG_SHELL_BACKEND_SERIAL_RX_RING_BUFFER_SIZE,
216 		      sh_uart->rx_buf);
217 	ring_buf_init(&sh_uart->tx_ringbuf, CONFIG_SHELL_BACKEND_SERIAL_TX_RING_BUFFER_SIZE,
218 		      sh_uart->tx_buf);
219 	sh_uart->tx_busy = 0;
220 	uart_irq_callback_user_data_set(dev, uart_callback, (void *)sh_uart);
221 	uart_irq_rx_enable(dev);
222 
223 	if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_CHECK_DTR)) {
224 		k_timer_init(&sh_uart->dtr_timer, dtr_timer_handler, NULL);
225 		k_timer_user_data_set(&sh_uart->dtr_timer, (void *)sh_uart);
226 	}
227 }
228 
rx_enable(const struct device * dev,uint8_t * buf,size_t len)229 static int rx_enable(const struct device *dev, uint8_t *buf, size_t len)
230 {
231 	return uart_rx_enable(dev, buf, len, 10000);
232 }
233 
async_init(struct shell_uart_async * sh_uart)234 static void async_init(struct shell_uart_async *sh_uart)
235 {
236 	const struct device *dev = sh_uart->common.dev;
237 	struct uart_async_rx *async_rx = &sh_uart->async_rx;
238 	int err;
239 
240 	sh_uart->async_rx_config = (struct uart_async_rx_config){
241 		.buffer = sh_uart->rx_data,
242 		.length = ASYNC_RX_BUF_SIZE,
243 		.buf_cnt = CONFIG_SHELL_BACKEND_SERIAL_ASYNC_RX_BUFFER_COUNT,
244 	};
245 
246 	k_sem_init(&sh_uart->tx_sem, 0, 1);
247 
248 	err = uart_async_rx_init(async_rx, &sh_uart->async_rx_config);
249 	(void)err;
250 	__ASSERT_NO_MSG(err == 0);
251 
252 	uint8_t *buf = uart_async_rx_buf_req(async_rx);
253 
254 	err = uart_callback_set(dev, async_callback, (void *)sh_uart);
255 	(void)err;
256 	__ASSERT_NO_MSG(err == 0);
257 
258 	err = rx_enable(dev, buf, uart_async_rx_get_buf_len(async_rx));
259 	(void)err;
260 	__ASSERT_NO_MSG(err == 0);
261 }
262 
polling_rx_timeout_handler(struct k_timer * timer)263 static void polling_rx_timeout_handler(struct k_timer *timer)
264 {
265 	uint8_t c;
266 	struct shell_uart_polling *sh_uart = k_timer_user_data_get(timer);
267 
268 	while (uart_poll_in(sh_uart->common.dev, &c) == 0) {
269 		if (ring_buf_put(&sh_uart->rx_ringbuf, &c, 1) == 0U) {
270 			/* ring buffer full. */
271 			LOG_WRN("RX ring buffer full.");
272 		}
273 		sh_uart->common.handler(SHELL_TRANSPORT_EVT_RX_RDY, sh_uart->common.context);
274 	}
275 }
276 
polling_init(struct shell_uart_polling * sh_uart)277 static void polling_init(struct shell_uart_polling *sh_uart)
278 {
279 	k_timer_init(&sh_uart->rx_timer, polling_rx_timeout_handler, NULL);
280 	k_timer_user_data_set(&sh_uart->rx_timer, (void *)sh_uart);
281 	k_timer_start(&sh_uart->rx_timer, RX_POLL_PERIOD, RX_POLL_PERIOD);
282 
283 	ring_buf_init(&sh_uart->rx_ringbuf, CONFIG_SHELL_BACKEND_SERIAL_RX_RING_BUFFER_SIZE,
284 		      sh_uart->rx_buf);
285 }
286 
init(const struct shell_transport * transport,const void * config,shell_transport_handler_t evt_handler,void * context)287 static int init(const struct shell_transport *transport,
288 		const void *config,
289 		shell_transport_handler_t evt_handler,
290 		void *context)
291 {
292 	struct shell_uart_common *common = (struct shell_uart_common *)transport->ctx;
293 
294 	common->dev = (const struct device *)config;
295 	common->handler = evt_handler;
296 	common->context = context;
297 
298 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
299 	common->smp.buf_pool = &smp_shell_rx_pool;
300 	k_fifo_init(&common->smp.buf_ready);
301 #endif
302 
303 	if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_ASYNC)) {
304 		async_init((struct shell_uart_async *)transport->ctx);
305 	} else if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_INTERRUPT_DRIVEN)) {
306 		irq_init((struct shell_uart_int_driven *)transport->ctx);
307 	} else {
308 		polling_init((struct shell_uart_polling *)transport->ctx);
309 	}
310 
311 	return 0;
312 }
313 
irq_uninit(struct shell_uart_int_driven * sh_uart)314 static void irq_uninit(struct shell_uart_int_driven *sh_uart)
315 {
316 	const struct device *dev = sh_uart->common.dev;
317 
318 	k_timer_stop(&sh_uart->dtr_timer);
319 	uart_irq_tx_disable(dev);
320 	uart_irq_rx_disable(dev);
321 }
322 
async_uninit(struct shell_uart_async * sh_uart)323 static void async_uninit(struct shell_uart_async *sh_uart)
324 {
325 }
326 
polling_uninit(struct shell_uart_polling * sh_uart)327 static void polling_uninit(struct shell_uart_polling *sh_uart)
328 {
329 	k_timer_stop(&sh_uart->rx_timer);
330 }
331 
uninit(const struct shell_transport * transport)332 static int uninit(const struct shell_transport *transport)
333 {
334 	if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_ASYNC)) {
335 		async_uninit((struct shell_uart_async *)transport->ctx);
336 	} else if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_INTERRUPT_DRIVEN)) {
337 		irq_uninit((struct shell_uart_int_driven *)transport->ctx);
338 	} else {
339 		polling_uninit((struct shell_uart_polling *)transport->ctx);
340 	}
341 
342 	return 0;
343 }
344 
enable(const struct shell_transport * transport,bool blocking_tx)345 static int enable(const struct shell_transport *transport, bool blocking_tx)
346 {
347 	struct shell_uart_common *sh_uart = (struct shell_uart_common *)transport->ctx;
348 
349 	sh_uart->blocking_tx = blocking_tx;
350 
351 	if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_INTERRUPT_DRIVEN) && blocking_tx) {
352 		uart_irq_tx_disable(sh_uart->dev);
353 	}
354 
355 	return 0;
356 }
357 
polling_write(struct shell_uart_common * sh_uart,const void * data,size_t length,size_t * cnt)358 static int polling_write(struct shell_uart_common *sh_uart,
359 			 const void *data, size_t length, size_t *cnt)
360 {
361 	const uint8_t *data8 = (const uint8_t *)data;
362 
363 	for (size_t i = 0; i < length; i++) {
364 		uart_poll_out(sh_uart->dev, data8[i]);
365 	}
366 
367 	*cnt = length;
368 
369 	sh_uart->handler(SHELL_TRANSPORT_EVT_TX_RDY, sh_uart->context);
370 
371 	return 0;
372 }
373 
irq_write(struct shell_uart_int_driven * sh_uart,const void * data,size_t length,size_t * cnt)374 static int irq_write(struct shell_uart_int_driven *sh_uart,
375 		 const void *data, size_t length, size_t *cnt)
376 {
377 	*cnt = ring_buf_put(&sh_uart->tx_ringbuf, data, length);
378 
379 	if (atomic_set(&sh_uart->tx_busy, 1) == 0) {
380 		uart_irq_tx_enable(sh_uart->common.dev);
381 	}
382 
383 	return 0;
384 }
385 
async_write(struct shell_uart_async * sh_uart,const void * data,size_t length,size_t * cnt)386 static int async_write(struct shell_uart_async *sh_uart,
387 		       const void *data, size_t length, size_t *cnt)
388 {
389 	int err;
390 
391 	err = uart_tx(sh_uart->common.dev, data, length, SYS_FOREVER_US);
392 	if (err < 0) {
393 		*cnt = 0;
394 		return err;
395 	}
396 
397 	err = k_sem_take(&sh_uart->tx_sem, K_FOREVER);
398 	*cnt = length;
399 
400 	sh_uart->common.handler(SHELL_TRANSPORT_EVT_TX_RDY, sh_uart->common.context);
401 
402 	return err;
403 }
404 
write_uart(const struct shell_transport * transport,const void * data,size_t length,size_t * cnt)405 static int write_uart(const struct shell_transport *transport,
406 		      const void *data, size_t length, size_t *cnt)
407 {
408 	struct shell_uart_common *sh_uart = (struct shell_uart_common *)transport->ctx;
409 
410 	if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_POLLING) || sh_uart->blocking_tx) {
411 		return polling_write(sh_uart, data, length, cnt);
412 	} else if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_INTERRUPT_DRIVEN)) {
413 		return irq_write((struct shell_uart_int_driven *)transport->ctx, data, length, cnt);
414 	} else {
415 		return async_write((struct shell_uart_async *)transport->ctx, data, length, cnt);
416 	}
417 }
418 
irq_read(struct shell_uart_int_driven * sh_uart,void * data,size_t length,size_t * cnt)419 static int irq_read(struct shell_uart_int_driven *sh_uart,
420 		    void *data, size_t length, size_t *cnt)
421 {
422 	*cnt = ring_buf_get(&sh_uart->rx_ringbuf, data, length);
423 
424 	return 0;
425 }
426 
polling_read(struct shell_uart_polling * sh_uart,void * data,size_t length,size_t * cnt)427 static int polling_read(struct shell_uart_polling *sh_uart,
428 			void *data, size_t length, size_t *cnt)
429 {
430 	*cnt = ring_buf_get(&sh_uart->rx_ringbuf, data, length);
431 
432 	return 0;
433 }
434 
async_read(struct shell_uart_async * sh_uart,void * data,size_t length,size_t * cnt)435 static int async_read(struct shell_uart_async *sh_uart,
436 		      void *data, size_t length, size_t *cnt)
437 {
438 	uint8_t *buf;
439 	size_t blen;
440 	struct uart_async_rx *async_rx = &sh_uart->async_rx;
441 
442 	blen = uart_async_rx_data_claim(async_rx, &buf, length);
443 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
444 	struct smp_shell_data *const smp = &sh_uart->common.smp;
445 	size_t sh_cnt = 0;
446 
447 	for (size_t i = 0; i < blen; i++) {
448 		if (smp_shell_rx_bytes(smp, &buf[i], 1) == 0) {
449 			((uint8_t *)data)[sh_cnt++] = buf[i];
450 		}
451 	}
452 #else
453 	size_t sh_cnt = blen;
454 
455 	memcpy(data, buf, blen);
456 #endif
457 	bool buf_available = uart_async_rx_data_consume(async_rx, sh_cnt);
458 	*cnt = sh_cnt;
459 
460 	if (sh_uart->pending_rx_req && buf_available) {
461 		uint8_t *buf = uart_async_rx_buf_req(async_rx);
462 		size_t len = uart_async_rx_get_buf_len(async_rx);
463 		int err;
464 
465 		__ASSERT_NO_MSG(buf != NULL);
466 		atomic_dec(&sh_uart->pending_rx_req);
467 		err = uart_rx_buf_rsp(sh_uart->common.dev, buf, len);
468 		/* If it is too late and RX is disabled then re-enable it. */
469 		if (err < 0) {
470 			if (err == -EACCES) {
471 				sh_uart->pending_rx_req = 0;
472 				err = rx_enable(sh_uart->common.dev, buf, len);
473 			} else {
474 				return err;
475 			}
476 		}
477 	}
478 
479 	return 0;
480 }
481 
read_uart(const struct shell_transport * transport,void * data,size_t length,size_t * cnt)482 static int read_uart(const struct shell_transport *transport,
483 		     void *data, size_t length, size_t *cnt)
484 {
485 	if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_INTERRUPT_DRIVEN)) {
486 		return irq_read((struct shell_uart_int_driven *)transport->ctx, data, length, cnt);
487 	} else if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_ASYNC)) {
488 		return async_read((struct shell_uart_async *)transport->ctx, data, length, cnt);
489 	} else {
490 		return polling_read((struct shell_uart_polling *)transport->ctx, data, length, cnt);
491 	}
492 }
493 
494 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
update(const struct shell_transport * transport)495 static void update(const struct shell_transport *transport)
496 {
497 	/*
498 	 * This is dependent on the fact that `struct shell_uart_common`
499 	 * is always the first member, regardless of the UART configuration
500 	 */
501 	struct shell_uart_common *sh_uart = (struct shell_uart_common *)transport->ctx;
502 
503 	smp_shell_process(&sh_uart->smp);
504 }
505 #endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
506 
507 const struct shell_transport_api shell_uart_transport_api = {
508 	.init = init,
509 	.uninit = uninit,
510 	.enable = enable,
511 	.write = write_uart,
512 	.read = read_uart,
513 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
514 	.update = update,
515 #endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
516 };
517 
518 SHELL_UART_DEFINE(shell_transport_uart);
519 SHELL_DEFINE(shell_uart, CONFIG_SHELL_PROMPT_UART, &shell_transport_uart,
520 	     CONFIG_SHELL_BACKEND_SERIAL_LOG_MESSAGE_QUEUE_SIZE,
521 	     CONFIG_SHELL_BACKEND_SERIAL_LOG_MESSAGE_QUEUE_TIMEOUT,
522 	     SHELL_FLAG_OLF_CRLF);
523 
524 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
shell_uart_smp_shell_data_get_ptr(void)525 struct smp_shell_data *shell_uart_smp_shell_data_get_ptr(void)
526 {
527 	struct shell_uart_common *common = (struct shell_uart_common *)shell_transport_uart.ctx;
528 
529 	return &common->smp;
530 }
531 #endif
532 
enable_shell_uart(void)533 static int enable_shell_uart(void)
534 {
535 	const struct device *const dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_shell_uart));
536 	bool log_backend = CONFIG_SHELL_BACKEND_SERIAL_LOG_LEVEL > 0;
537 	uint32_t level =
538 		(CONFIG_SHELL_BACKEND_SERIAL_LOG_LEVEL > LOG_LEVEL_DBG) ?
539 		CONFIG_LOG_MAX_LEVEL : CONFIG_SHELL_BACKEND_SERIAL_LOG_LEVEL;
540 	static const struct shell_backend_config_flags cfg_flags =
541 					SHELL_DEFAULT_BACKEND_CONFIG_FLAGS;
542 
543 	if (!device_is_ready(dev)) {
544 		return -ENODEV;
545 	}
546 
547 	if (IS_ENABLED(CONFIG_MCUMGR_TRANSPORT_SHELL)) {
548 		smp_shell_init();
549 	}
550 
551 	shell_init(&shell_uart, dev, cfg_flags, log_backend, level);
552 
553 	return 0;
554 }
555 
556 SYS_INIT(enable_shell_uart, POST_KERNEL,
557 	 CONFIG_SHELL_BACKEND_SERIAL_INIT_PRIORITY);
558 
shell_backend_uart_get_ptr(void)559 const struct shell *shell_backend_uart_get_ptr(void)
560 {
561 	return &shell_uart;
562 }
563