1 /*
2  * Copyright Runtime.io 2018. All rights reserved.
3  * Copyright (c) 2021-2022 Nordic Semiconductor ASA
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <assert.h>
9 #include <zephyr/kernel.h>
10 #include <zephyr/device.h>
11 #include <zephyr/net/buf.h>
12 #include <zephyr/mgmt/mcumgr/mgmt/mgmt.h>
13 #include <zephyr/mgmt/mcumgr/smp/smp.h>
14 #include <zephyr/mgmt/mcumgr/transport/smp.h>
15 
16 #include <mgmt/mcumgr/transport/smp_reassembly.h>
17 
18 #include <zephyr/logging/log.h>
19 LOG_MODULE_REGISTER(mcumgr_smp, CONFIG_MCUMGR_TRANSPORT_LOG_LEVEL);
20 
21 /* To be able to unit test some callers some functions need to be
22  * demoted to allow overriding them.
23  */
24 #ifdef CONFIG_ZTEST
25 #define WEAK __weak
26 #else
27 #define WEAK
28 #endif
29 
30 K_THREAD_STACK_DEFINE(smp_work_queue_stack, CONFIG_MCUMGR_TRANSPORT_WORKQUEUE_STACK_SIZE);
31 
32 static struct k_work_q smp_work_queue;
33 
34 static const struct k_work_queue_config smp_work_queue_config = {
35 	.name = "mcumgr smp"
36 };
37 
38 NET_BUF_POOL_DEFINE(pkt_pool, CONFIG_MCUMGR_TRANSPORT_NETBUF_COUNT,
39 		    CONFIG_MCUMGR_TRANSPORT_NETBUF_SIZE,
40 		    CONFIG_MCUMGR_TRANSPORT_NETBUF_USER_DATA_SIZE, NULL);
41 
smp_packet_alloc(void)42 struct net_buf *smp_packet_alloc(void)
43 {
44 	return net_buf_alloc(&pkt_pool, K_NO_WAIT);
45 }
46 
smp_packet_free(struct net_buf * nb)47 void smp_packet_free(struct net_buf *nb)
48 {
49 	net_buf_unref(nb);
50 }
51 
52 /**
53  * @brief Allocates a response buffer.
54  *
55  * If a source buf is provided, its user data is copied into the new buffer.
56  *
57  * @param req		An optional source buffer to copy user data from.
58  * @param arg		The streamer providing the callback.
59  *
60  * @return	Newly-allocated buffer on success
61  *		NULL on failure.
62  */
smp_alloc_rsp(const void * req,void * arg)63 void *smp_alloc_rsp(const void *req, void *arg)
64 {
65 	const struct net_buf *req_nb;
66 	struct net_buf *rsp_nb;
67 	struct smp_transport *smpt = arg;
68 
69 	req_nb = req;
70 
71 	rsp_nb = smp_packet_alloc();
72 	if (rsp_nb == NULL) {
73 		return NULL;
74 	}
75 
76 	if (smpt->functions.ud_copy) {
77 		smpt->functions.ud_copy(rsp_nb, req_nb);
78 	} else {
79 		memcpy(net_buf_user_data(rsp_nb),
80 		       net_buf_user_data((void *)req_nb),
81 		       req_nb->user_data_size);
82 	}
83 
84 	return rsp_nb;
85 }
86 
smp_free_buf(void * buf,void * arg)87 void smp_free_buf(void *buf, void *arg)
88 {
89 	struct smp_transport *smpt = arg;
90 
91 	if (!buf) {
92 		return;
93 	}
94 
95 	if (smpt->functions.ud_free) {
96 		smpt->functions.ud_free(net_buf_user_data((struct net_buf *)buf));
97 	}
98 
99 	smp_packet_free(buf);
100 }
101 
102 /**
103  * Processes a single SMP packet and sends the corresponding response(s).
104  */
105 static int
smp_process_packet(struct smp_transport * smpt,struct net_buf * nb)106 smp_process_packet(struct smp_transport *smpt, struct net_buf *nb)
107 {
108 	struct cbor_nb_reader reader;
109 	struct cbor_nb_writer writer;
110 	struct smp_streamer streamer;
111 	int rc;
112 
113 	streamer = (struct smp_streamer) {
114 		.reader = &reader,
115 		.writer = &writer,
116 		.smpt = smpt,
117 	};
118 
119 	rc = smp_process_request_packet(&streamer, nb);
120 	return rc;
121 }
122 
123 /**
124  * Processes all received SNP request packets.
125  */
126 static void
smp_handle_reqs(struct k_work * work)127 smp_handle_reqs(struct k_work *work)
128 {
129 	struct smp_transport *smpt;
130 	struct net_buf *nb;
131 
132 	smpt = (void *)work;
133 
134 	while ((nb = net_buf_get(&smpt->fifo, K_NO_WAIT)) != NULL) {
135 		smp_process_packet(smpt, nb);
136 	}
137 }
138 
smp_transport_init(struct smp_transport * smpt)139 int smp_transport_init(struct smp_transport *smpt)
140 {
141 	__ASSERT((smpt->functions.output != NULL),
142 		 "Required transport output function pointer cannot be NULL");
143 
144 	if (smpt->functions.output == NULL) {
145 		return -EINVAL;
146 	}
147 
148 #ifdef CONFIG_MCUMGR_TRANSPORT_REASSEMBLY
149 	smp_reassembly_init(smpt);
150 #endif
151 
152 	k_work_init(&smpt->work, smp_handle_reqs);
153 	k_fifo_init(&smpt->fifo);
154 
155 	return 0;
156 }
157 
158 /**
159  * @brief Enqueues an incoming SMP request packet for processing.
160  *
161  * This function always consumes the supplied net_buf.
162  *
163  * @param smpt                  The transport to use to send the corresponding
164  *                                  response(s).
165  * @param nb                    The request packet to process.
166  */
167 WEAK void
smp_rx_req(struct smp_transport * smpt,struct net_buf * nb)168 smp_rx_req(struct smp_transport *smpt, struct net_buf *nb)
169 {
170 	net_buf_put(&smpt->fifo, nb);
171 	k_work_submit_to_queue(&smp_work_queue, &smpt->work);
172 }
173 
smp_rx_remove_invalid(struct smp_transport * zst,void * arg)174 void smp_rx_remove_invalid(struct smp_transport *zst, void *arg)
175 {
176 	struct net_buf *nb;
177 	struct k_fifo temp_fifo;
178 
179 	if (zst->functions.query_valid_check == NULL) {
180 		/* No check check function registered, abort check */
181 		return;
182 	}
183 
184 	/* Cancel current work-queue if ongoing */
185 	if (k_work_busy_get(&zst->work) & (K_WORK_RUNNING | K_WORK_QUEUED)) {
186 		k_work_cancel(&zst->work);
187 	}
188 
189 	/* Run callback function and remove all buffers that are no longer needed. Store those
190 	 * that are in a temporary FIFO
191 	 */
192 	k_fifo_init(&temp_fifo);
193 
194 	while ((nb = net_buf_get(&zst->fifo, K_NO_WAIT)) != NULL) {
195 		if (!zst->functions.query_valid_check(nb, arg)) {
196 			smp_free_buf(nb, zst);
197 		} else {
198 			net_buf_put(&temp_fifo, nb);
199 		}
200 	}
201 
202 	/* Re-insert the remaining queued operations into the original FIFO */
203 	while ((nb = net_buf_get(&temp_fifo, K_NO_WAIT)) != NULL) {
204 		net_buf_put(&zst->fifo, nb);
205 	}
206 
207 	/* If at least one entry remains, queue the workqueue for running */
208 	if (!k_fifo_is_empty(&zst->fifo)) {
209 		k_work_submit_to_queue(&smp_work_queue, &zst->work);
210 	}
211 }
212 
smp_rx_clear(struct smp_transport * zst)213 void smp_rx_clear(struct smp_transport *zst)
214 {
215 	struct net_buf *nb;
216 
217 	/* Cancel current work-queue if ongoing */
218 	if (k_work_busy_get(&zst->work) & (K_WORK_RUNNING | K_WORK_QUEUED)) {
219 		k_work_cancel(&zst->work);
220 	}
221 
222 	/* Drain the FIFO of all entries without re-adding any */
223 	while ((nb = net_buf_get(&zst->fifo, K_NO_WAIT)) != NULL) {
224 		smp_free_buf(nb, zst);
225 	}
226 }
227 
smp_init(void)228 static int smp_init(void)
229 {
230 	k_work_queue_init(&smp_work_queue);
231 
232 	k_work_queue_start(&smp_work_queue, smp_work_queue_stack,
233 			   K_THREAD_STACK_SIZEOF(smp_work_queue_stack),
234 			   CONFIG_MCUMGR_TRANSPORT_WORKQUEUE_THREAD_PRIO, &smp_work_queue_config);
235 
236 	return 0;
237 }
238 
239 SYS_INIT(smp_init, APPLICATION, CONFIG_APPLICATION_INIT_PRIORITY);
240