1 /*
2  * Copyright (c) 2017 Intel Corporation
3  * Copyright (c) 2020 Lingao Meng
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/kernel.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <stdbool.h>
12 #include <stdlib.h>
13 #include <zephyr/sys/atomic.h>
14 #include <zephyr/sys/util.h>
15 #include <zephyr/sys/byteorder.h>
16 
17 #include <zephyr/net/buf.h>
18 #include <zephyr/bluetooth/bluetooth.h>
19 #include <zephyr/bluetooth/conn.h>
20 #include <zephyr/bluetooth/mesh.h>
21 
22 #include "mesh.h"
23 #include "net.h"
24 #include "rpl.h"
25 #include "settings.h"
26 
27 #define LOG_LEVEL CONFIG_BT_MESH_RPL_LOG_LEVEL
28 #include <zephyr/logging/log.h>
29 LOG_MODULE_REGISTER(bt_mesh_rpl);
30 
31 /* Replay Protection List information for persistent storage. */
32 struct rpl_val {
33 	uint32_t seq:24,
34 	      old_iv:1;
35 };
36 
37 static struct bt_mesh_rpl replay_list[CONFIG_BT_MESH_CRPL];
38 static ATOMIC_DEFINE(store, CONFIG_BT_MESH_CRPL);
39 
40 enum {
41 	PENDING_CLEAR,
42 	PENDING_RESET,
43 	RPL_FLAGS_COUNT,
44 };
45 static ATOMIC_DEFINE(rpl_flags, RPL_FLAGS_COUNT);
46 
rpl_idx(const struct bt_mesh_rpl * rpl)47 static inline int rpl_idx(const struct bt_mesh_rpl *rpl)
48 {
49 	return rpl - &replay_list[0];
50 }
51 
clear_rpl(struct bt_mesh_rpl * rpl)52 static void clear_rpl(struct bt_mesh_rpl *rpl)
53 {
54 	int err;
55 	char path[18];
56 
57 	if (!rpl->src) {
58 		return;
59 	}
60 
61 	atomic_clear_bit(store, rpl_idx(rpl));
62 
63 	snprintk(path, sizeof(path), "bt/mesh/RPL/%x", rpl->src);
64 	err = settings_delete(path);
65 	if (err) {
66 		LOG_ERR("Failed to clear RPL");
67 	} else {
68 		LOG_DBG("Cleared RPL");
69 	}
70 }
71 
schedule_rpl_store(struct bt_mesh_rpl * entry,bool force)72 static void schedule_rpl_store(struct bt_mesh_rpl *entry, bool force)
73 {
74 	atomic_set_bit(store, rpl_idx(entry));
75 
76 	if (force
77 #ifdef CONFIG_BT_MESH_RPL_STORE_TIMEOUT
78 	    || CONFIG_BT_MESH_RPL_STORE_TIMEOUT >= 0
79 #endif
80 	    ) {
81 		bt_mesh_settings_store_schedule(BT_MESH_SETTINGS_RPL_PENDING);
82 	}
83 }
84 
bt_mesh_rpl_update(struct bt_mesh_rpl * rpl,struct bt_mesh_net_rx * rx)85 void bt_mesh_rpl_update(struct bt_mesh_rpl *rpl,
86 		struct bt_mesh_net_rx *rx)
87 {
88 	/* If this is the first message on the new IV index, we should reset it
89 	 * to zero to avoid invalid combinations of IV index and seg.
90 	 */
91 	if (rpl->old_iv && !rx->old_iv) {
92 		rpl->seg = 0;
93 	}
94 
95 	rpl->src = rx->ctx.addr;
96 	rpl->seq = rx->seq;
97 	rpl->old_iv = rx->old_iv;
98 
99 	if (IS_ENABLED(CONFIG_BT_SETTINGS)) {
100 		schedule_rpl_store(rpl, false);
101 	}
102 }
103 
104 /* Check the Replay Protection List for a replay attempt. If non-NULL match
105  * parameter is given the RPL slot is returned, but it is not immediately
106  * updated. This is used to prevent storing data in RPL that has been rejected
107  * by upper logic (access, transport commands) and for receiving the segmented messages.
108  * If a NULL match is given the RPL is immediately updated (used for proxy configuration).
109  */
bt_mesh_rpl_check(struct bt_mesh_net_rx * rx,struct bt_mesh_rpl ** match)110 bool bt_mesh_rpl_check(struct bt_mesh_net_rx *rx, struct bt_mesh_rpl **match)
111 {
112 	struct bt_mesh_rpl *rpl;
113 	int i;
114 
115 	/* Don't bother checking messages from ourselves */
116 	if (rx->net_if == BT_MESH_NET_IF_LOCAL) {
117 		return false;
118 	}
119 
120 	/* The RPL is used only for the local node */
121 	if (!rx->local_match) {
122 		return false;
123 	}
124 
125 	for (i = 0; i < ARRAY_SIZE(replay_list); i++) {
126 		rpl = &replay_list[i];
127 
128 		/* Empty slot */
129 		if (!rpl->src) {
130 			goto match;
131 		}
132 
133 		/* Existing slot for given address */
134 		if (rpl->src == rx->ctx.addr) {
135 			if (!rpl->old_iv &&
136 			    atomic_test_bit(rpl_flags, PENDING_RESET) &&
137 			    !atomic_test_bit(store, i)) {
138 				/* Until rpl reset is finished, entry with old_iv == false and
139 				 * without "store" bit set will be removed, therefore it can be
140 				 * reused. If such entry is reused, "store" bit will be set and
141 				 * the entry won't be removed.
142 				 */
143 				goto match;
144 			}
145 
146 			if (rx->old_iv && !rpl->old_iv) {
147 				return true;
148 			}
149 
150 			if ((!rx->old_iv && rpl->old_iv) ||
151 			    rpl->seq < rx->seq) {
152 				goto match;
153 			} else {
154 				return true;
155 			}
156 		}
157 	}
158 
159 	LOG_ERR("RPL is full!");
160 	return true;
161 
162 match:
163 	if (match) {
164 		*match = rpl;
165 	} else {
166 		bt_mesh_rpl_update(rpl, rx);
167 	}
168 
169 	return false;
170 }
171 
bt_mesh_rpl_clear(void)172 void bt_mesh_rpl_clear(void)
173 {
174 	LOG_DBG("");
175 
176 	if (!IS_ENABLED(CONFIG_BT_SETTINGS)) {
177 		(void)memset(replay_list, 0, sizeof(replay_list));
178 		return;
179 	}
180 
181 	atomic_set_bit(rpl_flags, PENDING_CLEAR);
182 
183 	bt_mesh_settings_store_schedule(BT_MESH_SETTINGS_RPL_PENDING);
184 }
185 
bt_mesh_rpl_find(uint16_t src)186 static struct bt_mesh_rpl *bt_mesh_rpl_find(uint16_t src)
187 {
188 	int i;
189 
190 	for (i = 0; i < ARRAY_SIZE(replay_list); i++) {
191 		if (replay_list[i].src == src) {
192 			return &replay_list[i];
193 		}
194 	}
195 
196 	return NULL;
197 }
198 
bt_mesh_rpl_alloc(uint16_t src)199 static struct bt_mesh_rpl *bt_mesh_rpl_alloc(uint16_t src)
200 {
201 	int i;
202 
203 	for (i = 0; i < ARRAY_SIZE(replay_list); i++) {
204 		if (!replay_list[i].src) {
205 			replay_list[i].src = src;
206 			return &replay_list[i];
207 		}
208 	}
209 
210 	return NULL;
211 }
212 
bt_mesh_rpl_reset(void)213 void bt_mesh_rpl_reset(void)
214 {
215 	/* Discard "old old" IV Index entries from RPL and flag
216 	 * any other ones (which are valid) as old.
217 	 */
218 	if (IS_ENABLED(CONFIG_BT_SETTINGS)) {
219 		int i;
220 
221 		for (i = 0; i < ARRAY_SIZE(replay_list); i++) {
222 			struct bt_mesh_rpl *rpl = &replay_list[i];
223 
224 			if (!rpl->src) {
225 				continue;
226 			}
227 
228 			/* Entries with "store" bit set will be stored, other entries will be
229 			 * removed.
230 			 */
231 			atomic_set_bit_to(store, i, !rpl->old_iv);
232 			rpl->old_iv = !rpl->old_iv;
233 		}
234 
235 		if (i != 0) {
236 			atomic_set_bit(rpl_flags, PENDING_RESET);
237 			bt_mesh_settings_store_schedule(BT_MESH_SETTINGS_RPL_PENDING);
238 		}
239 	} else {
240 		int shift = 0;
241 		int last = 0;
242 
243 		for (int i = 0; i < ARRAY_SIZE(replay_list); i++) {
244 			struct bt_mesh_rpl *rpl = &replay_list[i];
245 
246 			if (rpl->src) {
247 				if (rpl->old_iv) {
248 					(void)memset(rpl, 0, sizeof(*rpl));
249 
250 					shift++;
251 				} else {
252 					rpl->old_iv = true;
253 
254 					if (shift > 0) {
255 						replay_list[i - shift] = *rpl;
256 					}
257 				}
258 
259 				last = i;
260 			}
261 		}
262 
263 		(void)memset(&replay_list[last - shift + 1], 0, sizeof(struct bt_mesh_rpl) * shift);
264 	}
265 }
266 
rpl_set(const char * name,size_t len_rd,settings_read_cb read_cb,void * cb_arg)267 static int rpl_set(const char *name, size_t len_rd,
268 		   settings_read_cb read_cb, void *cb_arg)
269 {
270 	struct bt_mesh_rpl *entry;
271 	struct rpl_val rpl;
272 	int err;
273 	uint16_t src;
274 
275 	if (!name) {
276 		LOG_ERR("Insufficient number of arguments");
277 		return -ENOENT;
278 	}
279 
280 	src = strtol(name, NULL, 16);
281 	entry = bt_mesh_rpl_find(src);
282 
283 	if (len_rd == 0) {
284 		LOG_DBG("val (null)");
285 		if (entry) {
286 			(void)memset(entry, 0, sizeof(*entry));
287 		} else {
288 			LOG_WRN("Unable to find RPL entry for 0x%04x", src);
289 		}
290 
291 		return 0;
292 	}
293 
294 	if (!entry) {
295 		entry = bt_mesh_rpl_alloc(src);
296 		if (!entry) {
297 			LOG_ERR("Unable to allocate RPL entry for 0x%04x", src);
298 			return -ENOMEM;
299 		}
300 	}
301 
302 	err = bt_mesh_settings_set(read_cb, cb_arg, &rpl, sizeof(rpl));
303 	if (err) {
304 		LOG_ERR("Failed to set `net`");
305 		return err;
306 	}
307 
308 	entry->seq = rpl.seq;
309 	entry->old_iv = rpl.old_iv;
310 
311 	LOG_DBG("RPL entry for 0x%04x: Seq 0x%06x old_iv %u", entry->src, entry->seq,
312 		entry->old_iv);
313 
314 	return 0;
315 }
316 
317 BT_MESH_SETTINGS_DEFINE(rpl, "RPL", rpl_set);
318 
store_rpl(struct bt_mesh_rpl * entry)319 static void store_rpl(struct bt_mesh_rpl *entry)
320 {
321 	struct rpl_val rpl = {0};
322 	char path[18];
323 	int err;
324 
325 	if (!entry->src) {
326 		return;
327 	}
328 
329 	LOG_DBG("src 0x%04x seq 0x%06x old_iv %u", entry->src, entry->seq, entry->old_iv);
330 
331 	rpl.seq = entry->seq;
332 	rpl.old_iv = entry->old_iv;
333 
334 	snprintk(path, sizeof(path), "bt/mesh/RPL/%x", entry->src);
335 
336 	err = settings_save_one(path, &rpl, sizeof(rpl));
337 	if (err) {
338 		LOG_ERR("Failed to store RPL %s value", path);
339 	} else {
340 		LOG_DBG("Stored RPL %s value", path);
341 	}
342 }
343 
bt_mesh_rpl_pending_store(uint16_t addr)344 void bt_mesh_rpl_pending_store(uint16_t addr)
345 {
346 	int shift = 0;
347 	int last = 0;
348 	bool clr;
349 	bool rst;
350 
351 	if (!IS_ENABLED(CONFIG_BT_SETTINGS) ||
352 	    (!BT_MESH_ADDR_IS_UNICAST(addr) &&
353 	     addr != BT_MESH_ADDR_ALL_NODES)) {
354 		return;
355 	}
356 
357 	if (addr == BT_MESH_ADDR_ALL_NODES) {
358 		bt_mesh_settings_store_cancel(BT_MESH_SETTINGS_RPL_PENDING);
359 	}
360 
361 	clr = atomic_test_and_clear_bit(rpl_flags, PENDING_CLEAR);
362 	rst = atomic_test_bit(rpl_flags, PENDING_RESET);
363 
364 	for (int i = 0; i < ARRAY_SIZE(replay_list); i++) {
365 		struct bt_mesh_rpl *rpl = &replay_list[i];
366 
367 		if (addr != BT_MESH_ADDR_ALL_NODES && addr != rpl->src) {
368 			continue;
369 		}
370 
371 		if (clr) {
372 			clear_rpl(rpl);
373 			shift++;
374 		} else if (atomic_test_and_clear_bit(store, i)) {
375 			if (shift > 0) {
376 				replay_list[i - shift] = *rpl;
377 			}
378 
379 			store_rpl(&replay_list[i - shift]);
380 		} else if (rst) {
381 			clear_rpl(rpl);
382 
383 			/* Check if this entry was re-used during removal. If so, shift it as well.
384 			 * Otherwise, increment shift counter.
385 			 */
386 			if (atomic_test_and_clear_bit(store, i)) {
387 				replay_list[i - shift] = *rpl;
388 				atomic_set_bit(store, i - shift);
389 			} else {
390 				shift++;
391 			}
392 		}
393 
394 		last = i;
395 
396 		if (addr != BT_MESH_ADDR_ALL_NODES) {
397 			break;
398 		}
399 	}
400 
401 	atomic_clear_bit(rpl_flags, PENDING_RESET);
402 
403 	if (addr == BT_MESH_ADDR_ALL_NODES) {
404 		(void)memset(&replay_list[last - shift + 1], 0, sizeof(struct bt_mesh_rpl) * shift);
405 	}
406 }
407