1 /*
2  * Copyright (c) 2018 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/sys/ring_buffer.h>
8 #include <zephyr/sys/atomic.h>
9 #include <zephyr/mgmt/mcumgr/transport/smp_shell.h>
10 #include <zephyr/shell/shell_uart.h>
11 #include <zephyr/drivers/uart.h>
12 #include <zephyr/drivers/serial/uart_async_rx.h>
13 #include <zephyr/init.h>
14 #include <zephyr/logging/log.h>
15 #include <zephyr/net_buf.h>
16 #include <zephyr/pm/device_runtime.h>
17 
18 #define LOG_MODULE_NAME shell_uart
19 LOG_MODULE_REGISTER(shell_uart);
20 
21 #ifdef CONFIG_SHELL_BACKEND_SERIAL_RX_POLL_PERIOD
22 #define RX_POLL_PERIOD K_MSEC(CONFIG_SHELL_BACKEND_SERIAL_RX_POLL_PERIOD)
23 #else
24 #define RX_POLL_PERIOD K_NO_WAIT
25 #endif
26 
27 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
28 NET_BUF_POOL_DEFINE(smp_shell_rx_pool, CONFIG_MCUMGR_TRANSPORT_SHELL_RX_BUF_COUNT,
29 		    SMP_SHELL_RX_BUF_SIZE, 0, NULL);
30 #endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
31 
async_callback(const struct device * dev,struct uart_event * evt,void * user_data)32 static void async_callback(const struct device *dev, struct uart_event *evt, void *user_data)
33 {
34 	struct shell_uart_async *sh_uart = (struct shell_uart_async *)user_data;
35 
36 	switch (evt->type) {
37 	case  UART_TX_DONE:
38 		k_sem_give(&sh_uart->tx_sem);
39 		break;
40 	case  UART_RX_RDY:
41 		uart_async_rx_on_rdy(&sh_uart->async_rx, evt->data.rx.buf, evt->data.rx.len);
42 		sh_uart->common.handler(SHELL_TRANSPORT_EVT_RX_RDY, sh_uart->common.context);
43 		break;
44 	case  UART_RX_BUF_REQUEST:
45 	{
46 		uint8_t *buf = uart_async_rx_buf_req(&sh_uart->async_rx);
47 		size_t len = uart_async_rx_get_buf_len(&sh_uart->async_rx);
48 
49 		if (buf) {
50 			int err = uart_rx_buf_rsp(dev, buf, len);
51 
52 			if (err < 0) {
53 				uart_async_rx_on_buf_rel(&sh_uart->async_rx, buf);
54 			}
55 		} else {
56 			atomic_inc(&sh_uart->pending_rx_req);
57 		}
58 
59 		break;
60 	}
61 	case  UART_RX_BUF_RELEASED:
62 		uart_async_rx_on_buf_rel(&sh_uart->async_rx, evt->data.rx_buf.buf);
63 		break;
64 	case  UART_RX_DISABLED:
65 		break;
66 	default:
67 		break;
68 	};
69 }
70 
uart_rx_handle(const struct device * dev,struct shell_uart_int_driven * sh_uart)71 static void uart_rx_handle(const struct device *dev, struct shell_uart_int_driven *sh_uart)
72 {
73 	uint8_t *data;
74 	uint32_t len;
75 	uint32_t rd_len;
76 	bool new_data = false;
77 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
78 	struct smp_shell_data *const smp = &sh_uart->common.smp;
79 #endif
80 
81 	do {
82 		len = ring_buf_put_claim(&sh_uart->rx_ringbuf, &data,
83 					 sh_uart->rx_ringbuf.size);
84 
85 		if (len > 0) {
86 			rd_len = uart_fifo_read(dev, data, len);
87 
88 			/* If there is any new data to be either taken into
89 			 * ring buffer or consumed by the SMP, signal the
90 			 * shell_thread.
91 			 */
92 			if (rd_len > 0) {
93 				new_data = true;
94 			}
95 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
96 			/* Divert bytes from shell handling if it is
97 			 * part of an mcumgr frame.
98 			 */
99 			size_t i = smp_shell_rx_bytes(smp, data, rd_len);
100 
101 			rd_len -= i;
102 
103 			if (rd_len) {
104 				for (uint32_t j = 0; j < rd_len; j++) {
105 					data[j] = data[i + j];
106 				}
107 			}
108 #endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
109 			int err = ring_buf_put_finish(&sh_uart->rx_ringbuf, rd_len);
110 			(void)err;
111 			__ASSERT_NO_MSG(err == 0);
112 		} else {
113 			uint8_t dummy;
114 
115 			/* No space in the ring buffer - consume byte. */
116 			LOG_WRN("RX ring buffer full.");
117 
118 			rd_len = uart_fifo_read(dev, &dummy, 1);
119 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
120 			/* If successful in getting byte from the fifo, try
121 			 * feeding it to SMP as a part of mcumgr frame.
122 			 */
123 			if ((rd_len != 0) && (smp_shell_rx_bytes(smp, &dummy, 1) == 1)) {
124 				new_data = true;
125 			}
126 #endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
127 		}
128 	} while (rd_len && (rd_len == len));
129 
130 	if (new_data) {
131 		sh_uart->common.handler(SHELL_TRANSPORT_EVT_RX_RDY, sh_uart->common.context);
132 	}
133 }
134 
uart_dtr_check(const struct device * dev)135 static bool uart_dtr_check(const struct device *dev)
136 {
137 	BUILD_ASSERT(!IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_CHECK_DTR) ||
138 		IS_ENABLED(CONFIG_UART_LINE_CTRL),
139 		"DTR check requires CONFIG_UART_LINE_CTRL");
140 
141 	if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_CHECK_DTR)) {
142 		int dtr, err;
143 
144 		err = uart_line_ctrl_get(dev, UART_LINE_CTRL_DTR, &dtr);
145 		if (err == -ENOSYS || err == -ENOTSUP) {
146 			return true;
147 		}
148 
149 		return dtr;
150 	}
151 
152 	return true;
153 }
154 
dtr_timer_handler(struct k_timer * timer)155 static void dtr_timer_handler(struct k_timer *timer)
156 {
157 	struct shell_uart_int_driven *sh_uart = k_timer_user_data_get(timer);
158 
159 	if (!uart_dtr_check(sh_uart->common.dev)) {
160 		return;
161 	}
162 
163 	/* DTR is active, stop timer and start TX */
164 	k_timer_stop(timer);
165 	uart_irq_tx_enable(sh_uart->common.dev);
166 }
167 
uart_tx_handle(const struct device * dev,struct shell_uart_int_driven * sh_uart)168 static void uart_tx_handle(const struct device *dev, struct shell_uart_int_driven *sh_uart)
169 {
170 	uint32_t len;
171 	const uint8_t *data;
172 
173 	if (!uart_dtr_check(dev)) {
174 		/* Wait for DTR signal before sending anything to output. */
175 		uart_irq_tx_disable(dev);
176 		k_timer_start(&sh_uart->dtr_timer, K_MSEC(100), K_MSEC(100));
177 		return;
178 	}
179 
180 	len = ring_buf_get_claim(&sh_uart->tx_ringbuf, (uint8_t **)&data,
181 				 sh_uart->tx_ringbuf.size);
182 	if (len) {
183 		int err;
184 
185 		len = uart_fifo_fill(dev, data, len);
186 		err = ring_buf_get_finish(&sh_uart->tx_ringbuf, len);
187 		__ASSERT_NO_MSG(err == 0);
188 		ARG_UNUSED(err);
189 	} else {
190 		uart_irq_tx_disable(dev);
191 		sh_uart->tx_busy = 0;
192 	}
193 
194 	sh_uart->common.handler(SHELL_TRANSPORT_EVT_TX_RDY, sh_uart->common.context);
195 }
196 
uart_callback(const struct device * dev,void * user_data)197 static void uart_callback(const struct device *dev, void *user_data)
198 {
199 	struct shell_uart_int_driven *sh_uart = (struct shell_uart_int_driven *)user_data;
200 
201 	uart_irq_update(dev);
202 
203 	if (uart_irq_rx_ready(dev)) {
204 		uart_rx_handle(dev, sh_uart);
205 	}
206 
207 	if (uart_irq_tx_ready(dev)) {
208 		uart_tx_handle(dev, sh_uart);
209 	}
210 }
211 
irq_init(struct shell_uart_int_driven * sh_uart)212 static void irq_init(struct shell_uart_int_driven *sh_uart)
213 {
214 	const struct device *dev = sh_uart->common.dev;
215 
216 	ring_buf_init(&sh_uart->rx_ringbuf, CONFIG_SHELL_BACKEND_SERIAL_RX_RING_BUFFER_SIZE,
217 		      sh_uart->rx_buf);
218 	ring_buf_init(&sh_uart->tx_ringbuf, CONFIG_SHELL_BACKEND_SERIAL_TX_RING_BUFFER_SIZE,
219 		      sh_uart->tx_buf);
220 	sh_uart->tx_busy = 0;
221 	uart_irq_callback_user_data_set(dev, uart_callback, (void *)sh_uart);
222 	uart_irq_rx_enable(dev);
223 
224 	if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_CHECK_DTR)) {
225 		k_timer_init(&sh_uart->dtr_timer, dtr_timer_handler, NULL);
226 		k_timer_user_data_set(&sh_uart->dtr_timer, (void *)sh_uart);
227 	}
228 }
229 
rx_enable(const struct device * dev,uint8_t * buf,size_t len)230 static int rx_enable(const struct device *dev, uint8_t *buf, size_t len)
231 {
232 	return uart_rx_enable(dev, buf, len, 10000);
233 }
234 
async_init(struct shell_uart_async * sh_uart)235 static void async_init(struct shell_uart_async *sh_uart)
236 {
237 	const struct device *dev = sh_uart->common.dev;
238 	struct uart_async_rx *async_rx = &sh_uart->async_rx;
239 	int err;
240 
241 	sh_uart->async_rx_config = (struct uart_async_rx_config){
242 		.buffer = sh_uart->rx_data,
243 		.length = ASYNC_RX_BUF_SIZE,
244 		.buf_cnt = CONFIG_SHELL_BACKEND_SERIAL_ASYNC_RX_BUFFER_COUNT,
245 	};
246 
247 	k_sem_init(&sh_uart->tx_sem, 0, 1);
248 
249 	err = uart_async_rx_init(async_rx, &sh_uart->async_rx_config);
250 	(void)err;
251 	__ASSERT_NO_MSG(err == 0);
252 
253 	uint8_t *buf = uart_async_rx_buf_req(async_rx);
254 
255 	err = uart_callback_set(dev, async_callback, (void *)sh_uart);
256 	(void)err;
257 	__ASSERT_NO_MSG(err == 0);
258 
259 	err = rx_enable(dev, buf, uart_async_rx_get_buf_len(async_rx));
260 	(void)err;
261 	__ASSERT_NO_MSG(err == 0);
262 }
263 
polling_rx_timeout_handler(struct k_timer * timer)264 static void polling_rx_timeout_handler(struct k_timer *timer)
265 {
266 	uint8_t c;
267 	struct shell_uart_polling *sh_uart = k_timer_user_data_get(timer);
268 
269 	while ((ring_buf_space_get(&sh_uart->rx_ringbuf) > 0) &&
270 	       (uart_poll_in(sh_uart->common.dev, &c) == 0)) {
271 		ring_buf_put(&sh_uart->rx_ringbuf, &c, 1);
272 		sh_uart->common.handler(SHELL_TRANSPORT_EVT_RX_RDY, sh_uart->common.context);
273 	}
274 }
275 
polling_init(struct shell_uart_polling * sh_uart)276 static void polling_init(struct shell_uart_polling *sh_uart)
277 {
278 	k_timer_init(&sh_uart->rx_timer, polling_rx_timeout_handler, NULL);
279 	k_timer_user_data_set(&sh_uart->rx_timer, (void *)sh_uart);
280 	k_timer_start(&sh_uart->rx_timer, RX_POLL_PERIOD, RX_POLL_PERIOD);
281 
282 	ring_buf_init(&sh_uart->rx_ringbuf, CONFIG_SHELL_BACKEND_SERIAL_RX_RING_BUFFER_SIZE,
283 		      sh_uart->rx_buf);
284 }
285 
init(const struct shell_transport * transport,const void * config,shell_transport_handler_t evt_handler,void * context)286 static int init(const struct shell_transport *transport,
287 		const void *config,
288 		shell_transport_handler_t evt_handler,
289 		void *context)
290 {
291 	struct shell_uart_common *common = (struct shell_uart_common *)transport->ctx;
292 	int ret;
293 
294 	common->dev = (const struct device *)config;
295 	common->handler = evt_handler;
296 	common->context = context;
297 
298 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
299 	common->smp.buf_pool = &smp_shell_rx_pool;
300 	k_fifo_init(&common->smp.buf_ready);
301 #endif
302 
303 	ret = pm_device_runtime_get(common->dev);
304 	if (ret < 0) {
305 		return ret;
306 	}
307 
308 	if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_ASYNC)) {
309 		async_init((struct shell_uart_async *)transport->ctx);
310 	} else if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_INTERRUPT_DRIVEN)) {
311 		irq_init((struct shell_uart_int_driven *)transport->ctx);
312 	} else {
313 		polling_init((struct shell_uart_polling *)transport->ctx);
314 	}
315 
316 	return 0;
317 }
318 
irq_uninit(struct shell_uart_int_driven * sh_uart)319 static void irq_uninit(struct shell_uart_int_driven *sh_uart)
320 {
321 	const struct device *dev = sh_uart->common.dev;
322 
323 	k_timer_stop(&sh_uart->dtr_timer);
324 	uart_irq_tx_disable(dev);
325 	uart_irq_rx_disable(dev);
326 }
327 
async_uninit(struct shell_uart_async * sh_uart)328 static void async_uninit(struct shell_uart_async *sh_uart)
329 {
330 }
331 
polling_uninit(struct shell_uart_polling * sh_uart)332 static void polling_uninit(struct shell_uart_polling *sh_uart)
333 {
334 	k_timer_stop(&sh_uart->rx_timer);
335 }
336 
uninit(const struct shell_transport * transport)337 static int uninit(const struct shell_transport *transport)
338 {
339 	struct shell_uart_common *common = (struct shell_uart_common *)transport->ctx;
340 
341 	if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_ASYNC)) {
342 		async_uninit((struct shell_uart_async *)transport->ctx);
343 	} else if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_INTERRUPT_DRIVEN)) {
344 		irq_uninit((struct shell_uart_int_driven *)transport->ctx);
345 	} else {
346 		polling_uninit((struct shell_uart_polling *)transport->ctx);
347 	}
348 
349 	return pm_device_runtime_put(common->dev);
350 }
351 
enable(const struct shell_transport * transport,bool blocking_tx)352 static int enable(const struct shell_transport *transport, bool blocking_tx)
353 {
354 	struct shell_uart_common *sh_uart = (struct shell_uart_common *)transport->ctx;
355 
356 	sh_uart->blocking_tx =
357 		blocking_tx || IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_FORCE_TX_BLOCKING_MODE);
358 
359 	if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_INTERRUPT_DRIVEN) && blocking_tx) {
360 		uart_irq_tx_disable(sh_uart->dev);
361 	}
362 
363 	return 0;
364 }
365 
polling_write(struct shell_uart_common * sh_uart,const void * data,size_t length,size_t * cnt)366 static int polling_write(struct shell_uart_common *sh_uart,
367 			 const void *data, size_t length, size_t *cnt)
368 {
369 	const uint8_t *data8 = (const uint8_t *)data;
370 
371 	for (size_t i = 0; i < length; i++) {
372 		uart_poll_out(sh_uart->dev, data8[i]);
373 	}
374 
375 	*cnt = length;
376 
377 	sh_uart->handler(SHELL_TRANSPORT_EVT_TX_RDY, sh_uart->context);
378 
379 	return 0;
380 }
381 
irq_write(struct shell_uart_int_driven * sh_uart,const void * data,size_t length,size_t * cnt)382 static int irq_write(struct shell_uart_int_driven *sh_uart,
383 		 const void *data, size_t length, size_t *cnt)
384 {
385 	*cnt = ring_buf_put(&sh_uart->tx_ringbuf, data, length);
386 
387 	if (atomic_set(&sh_uart->tx_busy, 1) == 0) {
388 		uart_irq_tx_enable(sh_uart->common.dev);
389 	}
390 
391 	return 0;
392 }
393 
async_write(struct shell_uart_async * sh_uart,const void * data,size_t length,size_t * cnt)394 static int async_write(struct shell_uart_async *sh_uart,
395 		       const void *data, size_t length, size_t *cnt)
396 {
397 	int err;
398 
399 	err = uart_tx(sh_uart->common.dev, data, length, SYS_FOREVER_US);
400 	if (err < 0) {
401 		*cnt = 0;
402 		return err;
403 	}
404 
405 	err = k_sem_take(&sh_uart->tx_sem, K_FOREVER);
406 	*cnt = length;
407 
408 	sh_uart->common.handler(SHELL_TRANSPORT_EVT_TX_RDY, sh_uart->common.context);
409 
410 	return err;
411 }
412 
write_uart(const struct shell_transport * transport,const void * data,size_t length,size_t * cnt)413 static int write_uart(const struct shell_transport *transport,
414 		      const void *data, size_t length, size_t *cnt)
415 {
416 	struct shell_uart_common *sh_uart = (struct shell_uart_common *)transport->ctx;
417 
418 	if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_POLLING) || sh_uart->blocking_tx) {
419 		return polling_write(sh_uart, data, length, cnt);
420 	} else if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_INTERRUPT_DRIVEN)) {
421 		return irq_write((struct shell_uart_int_driven *)transport->ctx, data, length, cnt);
422 	} else {
423 		return async_write((struct shell_uart_async *)transport->ctx, data, length, cnt);
424 	}
425 }
426 
irq_read(struct shell_uart_int_driven * sh_uart,void * data,size_t length,size_t * cnt)427 static int irq_read(struct shell_uart_int_driven *sh_uart,
428 		    void *data, size_t length, size_t *cnt)
429 {
430 	*cnt = ring_buf_get(&sh_uart->rx_ringbuf, data, length);
431 
432 	return 0;
433 }
434 
polling_read(struct shell_uart_polling * sh_uart,void * data,size_t length,size_t * cnt)435 static int polling_read(struct shell_uart_polling *sh_uart,
436 			void *data, size_t length, size_t *cnt)
437 {
438 	*cnt = ring_buf_get(&sh_uart->rx_ringbuf, data, length);
439 
440 	return 0;
441 }
442 
async_read(struct shell_uart_async * sh_uart,void * data,size_t length,size_t * cnt)443 static int async_read(struct shell_uart_async *sh_uart,
444 		      void *data, size_t length, size_t *cnt)
445 {
446 	uint8_t *buf;
447 	size_t blen;
448 	struct uart_async_rx *async_rx = &sh_uart->async_rx;
449 
450 	blen = uart_async_rx_data_claim(async_rx, &buf, length);
451 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
452 	struct smp_shell_data *const smp = &sh_uart->common.smp;
453 	size_t sh_cnt = 0;
454 
455 	for (size_t i = 0; i < blen; i++) {
456 		if (smp_shell_rx_bytes(smp, &buf[i], 1) == 0) {
457 			((uint8_t *)data)[sh_cnt++] = buf[i];
458 		}
459 	}
460 #else
461 	size_t sh_cnt = blen;
462 
463 	memcpy(data, buf, blen);
464 #endif
465 	bool buf_available = uart_async_rx_data_consume(async_rx, sh_cnt);
466 	*cnt = sh_cnt;
467 
468 	if (sh_uart->pending_rx_req && buf_available) {
469 		uint8_t *buf = uart_async_rx_buf_req(async_rx);
470 		size_t len = uart_async_rx_get_buf_len(async_rx);
471 		int err;
472 
473 		__ASSERT_NO_MSG(buf != NULL);
474 		atomic_dec(&sh_uart->pending_rx_req);
475 		err = uart_rx_buf_rsp(sh_uart->common.dev, buf, len);
476 		/* If it is too late and RX is disabled then re-enable it. */
477 		if (err < 0) {
478 			if (err == -EACCES) {
479 				sh_uart->pending_rx_req = 0;
480 				err = rx_enable(sh_uart->common.dev, buf, len);
481 			} else {
482 				return err;
483 			}
484 		}
485 	}
486 
487 	return 0;
488 }
489 
read_uart(const struct shell_transport * transport,void * data,size_t length,size_t * cnt)490 static int read_uart(const struct shell_transport *transport,
491 		     void *data, size_t length, size_t *cnt)
492 {
493 	if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_INTERRUPT_DRIVEN)) {
494 		return irq_read((struct shell_uart_int_driven *)transport->ctx, data, length, cnt);
495 	} else if (IS_ENABLED(CONFIG_SHELL_BACKEND_SERIAL_API_ASYNC)) {
496 		return async_read((struct shell_uart_async *)transport->ctx, data, length, cnt);
497 	} else {
498 		return polling_read((struct shell_uart_polling *)transport->ctx, data, length, cnt);
499 	}
500 }
501 
502 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
update(const struct shell_transport * transport)503 static void update(const struct shell_transport *transport)
504 {
505 	/*
506 	 * This is dependent on the fact that `struct shell_uart_common`
507 	 * is always the first member, regardless of the UART configuration
508 	 */
509 	struct shell_uart_common *sh_uart = (struct shell_uart_common *)transport->ctx;
510 
511 	smp_shell_process(&sh_uart->smp);
512 }
513 #endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
514 
515 const struct shell_transport_api shell_uart_transport_api = {
516 	.init = init,
517 	.uninit = uninit,
518 	.enable = enable,
519 	.write = write_uart,
520 	.read = read_uart,
521 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
522 	.update = update,
523 #endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
524 };
525 
526 SHELL_UART_DEFINE(shell_transport_uart);
527 SHELL_DEFINE(shell_uart, CONFIG_SHELL_PROMPT_UART, &shell_transport_uart,
528 	     CONFIG_SHELL_BACKEND_SERIAL_LOG_MESSAGE_QUEUE_SIZE,
529 	     CONFIG_SHELL_BACKEND_SERIAL_LOG_MESSAGE_QUEUE_TIMEOUT,
530 	     SHELL_FLAG_OLF_CRLF);
531 
532 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
shell_uart_smp_shell_data_get_ptr(void)533 struct smp_shell_data *shell_uart_smp_shell_data_get_ptr(void)
534 {
535 	struct shell_uart_common *common = (struct shell_uart_common *)shell_transport_uart.ctx;
536 
537 	return &common->smp;
538 }
539 #endif
540 
enable_shell_uart(void)541 static int enable_shell_uart(void)
542 {
543 	const struct device *const dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_shell_uart));
544 	bool log_backend = CONFIG_SHELL_BACKEND_SERIAL_LOG_LEVEL > 0;
545 	uint32_t level =
546 		(CONFIG_SHELL_BACKEND_SERIAL_LOG_LEVEL > LOG_LEVEL_DBG) ?
547 		CONFIG_LOG_MAX_LEVEL : CONFIG_SHELL_BACKEND_SERIAL_LOG_LEVEL;
548 	static const struct shell_backend_config_flags cfg_flags =
549 					SHELL_DEFAULT_BACKEND_CONFIG_FLAGS;
550 
551 	if (!device_is_ready(dev)) {
552 		return -ENODEV;
553 	}
554 
555 	if (IS_ENABLED(CONFIG_MCUMGR_TRANSPORT_SHELL)) {
556 		smp_shell_init();
557 	}
558 
559 	shell_init(&shell_uart, dev, cfg_flags, log_backend, level);
560 
561 	return 0;
562 }
563 
564 SYS_INIT(enable_shell_uart, POST_KERNEL,
565 	 CONFIG_SHELL_BACKEND_SERIAL_INIT_PRIORITY);
566 
shell_backend_uart_get_ptr(void)567 const struct shell *shell_backend_uart_get_ptr(void)
568 {
569 	return &shell_uart;
570 }
571