1 /*
2  * Copyright (c) 2021 Nordic Semiconductor
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include "mesh_test.h"
7 #include "mesh/mesh.h"
8 #include "mesh/net.h"
9 #include "mesh/rpl.h"
10 #include "mesh/transport.h"
11 
12 #define LOG_MODULE_NAME test_rpc
13 
14 #include <zephyr/logging/log.h>
15 LOG_MODULE_REGISTER(LOG_MODULE_NAME, LOG_LEVEL_INF);
16 
17 #define WAIT_TIME 60 /*seconds*/
18 #define TEST_DATA_WAITING_TIME 5 /* seconds */
19 #define TEST_DATA_SIZE 20
20 
21 static const struct bt_mesh_test_cfg tx_cfg = {
22 	.addr = 0x0001,
23 	.dev_key = { 0x01 },
24 };
25 static const struct bt_mesh_test_cfg rx_cfg = {
26 	.addr = 0x0002,
27 	.dev_key = { 0x02 },
28 };
29 
30 static uint8_t test_data[TEST_DATA_SIZE];
31 static uint8_t rx_cnt;
32 static bool is_tx_succeeded;
33 
test_tx_init(void)34 static void test_tx_init(void)
35 {
36 	bt_mesh_test_cfg_set(&tx_cfg, WAIT_TIME);
37 }
38 
test_rx_init(void)39 static void test_rx_init(void)
40 {
41 	bt_mesh_test_cfg_set(&rx_cfg, WAIT_TIME);
42 }
43 
tx_started(uint16_t dur,int err,void * data)44 static void tx_started(uint16_t dur, int err, void *data)
45 {
46 	if (err) {
47 		FAIL("Couldn't start sending (err: %d)", err);
48 	}
49 
50 	LOG_INF("Sending started");
51 }
52 
tx_ended(int err,void * data)53 static void tx_ended(int err, void *data)
54 {
55 	struct k_sem *sem = data;
56 
57 	if (err) {
58 		is_tx_succeeded = false;
59 		LOG_INF("Sending failed (%d)", err);
60 	} else {
61 		is_tx_succeeded = true;
62 		LOG_INF("Sending succeeded");
63 	}
64 
65 	k_sem_give(sem);
66 }
67 
rx_ended(uint8_t * data,size_t len)68 static void rx_ended(uint8_t *data, size_t len)
69 {
70 	memset(test_data, rx_cnt++, sizeof(test_data));
71 
72 	if (memcmp(test_data, data, len)) {
73 		FAIL("Unexpected rx data");
74 	}
75 
76 	LOG_INF("Receiving succeeded");
77 }
78 
tx_sar_conf(void)79 static void tx_sar_conf(void)
80 {
81 #ifdef CONFIG_BT_MESH_V1d1
82 	/* Reconfigure SAR Transmitter state so that the transport layer doesn't
83 	 * retransmit.
84 	 */
85 	struct bt_mesh_sar_tx tx_set = {
86 		.seg_int_step = CONFIG_BT_MESH_SAR_TX_SEG_INT_STEP,
87 		.unicast_retrans_count = 0,
88 		.unicast_retrans_without_prog_count = 0,
89 		.unicast_retrans_int_step = CONFIG_BT_MESH_SAR_TX_UNICAST_RETRANS_INT_STEP,
90 		.unicast_retrans_int_inc = CONFIG_BT_MESH_SAR_TX_UNICAST_RETRANS_INT_INC,
91 		.multicast_retrans_count = CONFIG_BT_MESH_SAR_TX_MULTICAST_RETRANS_COUNT,
92 		.multicast_retrans_int = CONFIG_BT_MESH_SAR_TX_MULTICAST_RETRANS_INT,
93 	};
94 
95 #if defined(CONFIG_BT_MESH_SAR_CFG)
96 	bt_mesh_test_sar_conf_set(&tx_set, NULL);
97 #else
98 	bt_mesh.sar_tx = tx_set;
99 #endif
100 #endif
101 }
102 
rx_sar_conf(void)103 static void rx_sar_conf(void)
104 {
105 #ifdef CONFIG_BT_MESH_V1d1
106 	/* Reconfigure SAR Receiver state so that the transport layer does
107 	 * generate Segmented Acks as rarely as possible.
108 	 */
109 	struct bt_mesh_sar_rx rx_set = {
110 		.seg_thresh = 0x1f,
111 		.ack_delay_inc = 0x7,
112 		.discard_timeout = CONFIG_BT_MESH_SAR_RX_DISCARD_TIMEOUT,
113 		.rx_seg_int_step = 0xf,
114 		.ack_retrans_count = CONFIG_BT_MESH_SAR_RX_ACK_RETRANS_COUNT,
115 	};
116 
117 #if defined(CONFIG_BT_MESH_SAR_CFG)
118 	bt_mesh_test_sar_conf_set(NULL, &rx_set);
119 #else
120 	bt_mesh.sar_rx = rx_set;
121 #endif
122 #endif
123 }
124 
test_tx_immediate_replay_attack(void)125 static void test_tx_immediate_replay_attack(void)
126 {
127 	bt_mesh_test_setup();
128 	tx_sar_conf();
129 
130 	static const struct bt_mesh_send_cb send_cb = {
131 		.start = tx_started,
132 		.end = tx_ended,
133 	};
134 	struct k_sem sem;
135 
136 	k_sem_init(&sem, 0, 1);
137 
138 	uint32_t seq = bt_mesh.seq;
139 
140 	for (int i = 0; i < 3; i++) {
141 		is_tx_succeeded = false;
142 
143 		memset(test_data, i, sizeof(test_data));
144 		ASSERT_OK(bt_mesh_test_send_ra(rx_cfg.addr, test_data,
145 			sizeof(test_data), &send_cb, &sem));
146 
147 		if (k_sem_take(&sem, K_SECONDS(TEST_DATA_WAITING_TIME))) {
148 			LOG_ERR("Send timed out");
149 		}
150 
151 		ASSERT_TRUE(is_tx_succeeded);
152 	}
153 
154 	bt_mesh.seq = seq;
155 
156 	for (int i = 0; i < 3; i++) {
157 		is_tx_succeeded = true;
158 
159 		memset(test_data, i, sizeof(test_data));
160 		ASSERT_OK(bt_mesh_test_send_ra(rx_cfg.addr, test_data,
161 			sizeof(test_data), &send_cb, &sem));
162 
163 		if (k_sem_take(&sem, K_SECONDS(TEST_DATA_WAITING_TIME))) {
164 			LOG_ERR("Send timed out");
165 		}
166 
167 		ASSERT_TRUE(!is_tx_succeeded);
168 	}
169 
170 	PASS();
171 }
172 
test_rx_immediate_replay_attack(void)173 static void test_rx_immediate_replay_attack(void)
174 {
175 	bt_mesh_test_setup();
176 	rx_sar_conf();
177 	bt_mesh_test_ra_cb_setup(rx_ended);
178 
179 	k_sleep(K_SECONDS(6 * TEST_DATA_WAITING_TIME));
180 
181 	ASSERT_TRUE_MSG(rx_cnt == 3, "Device didn't receive expected data\n");
182 
183 	PASS();
184 }
185 
test_tx_power_replay_attack(void)186 static void test_tx_power_replay_attack(void)
187 {
188 	bt_mesh_test_setup();
189 	tx_sar_conf();
190 
191 	static const struct bt_mesh_send_cb send_cb = {
192 		.start = tx_started,
193 		.end = tx_ended,
194 	};
195 	struct k_sem sem;
196 
197 	k_sem_init(&sem, 0, 1);
198 
199 	for (int i = 0; i < 3; i++) {
200 		is_tx_succeeded = true;
201 
202 		memset(test_data, i, sizeof(test_data));
203 		ASSERT_OK(bt_mesh_test_send_ra(rx_cfg.addr, test_data,
204 			sizeof(test_data), &send_cb, &sem));
205 
206 		if (k_sem_take(&sem, K_SECONDS(TEST_DATA_WAITING_TIME))) {
207 			LOG_ERR("Send timed out");
208 		}
209 
210 		ASSERT_TRUE(!is_tx_succeeded);
211 	}
212 
213 	for (int i = 0; i < 3; i++) {
214 		is_tx_succeeded = false;
215 
216 		memset(test_data, i, sizeof(test_data));
217 		ASSERT_OK(bt_mesh_test_send_ra(rx_cfg.addr, test_data,
218 			sizeof(test_data), &send_cb, &sem));
219 
220 		if (k_sem_take(&sem, K_SECONDS(TEST_DATA_WAITING_TIME))) {
221 			LOG_ERR("Send timed out");
222 		}
223 
224 		ASSERT_TRUE(is_tx_succeeded);
225 	}
226 
227 	PASS();
228 }
229 
test_rx_power_replay_attack(void)230 static void test_rx_power_replay_attack(void)
231 {
232 	bt_mesh_test_setup();
233 	rx_sar_conf();
234 	bt_mesh_test_ra_cb_setup(rx_ended);
235 
236 	k_sleep(K_SECONDS(6 * TEST_DATA_WAITING_TIME));
237 
238 	ASSERT_TRUE_MSG(rx_cnt == 3, "Device didn't receive expected data\n");
239 
240 	PASS();
241 }
242 
send_end_cb(int err,void * cb_data)243 static void send_end_cb(int err, void *cb_data)
244 {
245 	struct k_sem *sem = cb_data;
246 
247 	ASSERT_EQUAL(err, 0);
248 	k_sem_give(sem);
249 }
250 
msg_send(uint16_t src,uint16_t dst)251 static bool msg_send(uint16_t src, uint16_t dst)
252 {
253 	static struct bt_mesh_send_cb cb = {
254 		.end = send_end_cb,
255 	};
256 	struct bt_mesh_msg_ctx ctx = {
257 		.net_idx = 0,
258 		.app_idx = 0,
259 		.addr = dst,
260 		.send_rel = false,
261 		.send_ttl = BT_MESH_TTL_DEFAULT,
262 	};
263 	struct bt_mesh_net_tx tx = {
264 		.ctx = &ctx,
265 		.src = src,
266 	};
267 	struct k_sem sem;
268 	int err;
269 
270 	k_sem_init(&sem, 0, 1);
271 	BT_MESH_MODEL_BUF_DEFINE(msg, TEST_MSG_OP_1, 0);
272 
273 	bt_mesh_model_msg_init(&msg, TEST_MSG_OP_1);
274 
275 	err = bt_mesh_trans_send(&tx, &msg, &cb, &sem);
276 	if (err) {
277 		LOG_ERR("Failed to send message (err %d)", err);
278 		return false;
279 	}
280 
281 	err = k_sem_take(&sem, K_SECONDS(10));
282 	if (err) {
283 		LOG_ERR("Send timed out (err %d)", err);
284 		return false;
285 	}
286 
287 	return true;
288 }
289 
msg_recv(uint16_t expected_addr)290 static bool msg_recv(uint16_t expected_addr)
291 {
292 	struct bt_mesh_test_msg msg;
293 	int err;
294 
295 	err = bt_mesh_test_recv_msg(&msg, K_SECONDS(10));
296 	if (err) {
297 		LOG_ERR("Failed to receive message from %u (err %d)", expected_addr, err);
298 		return false;
299 	}
300 
301 	LOG_DBG("Received msg from %u", msg.ctx.addr);
302 	ASSERT_EQUAL(expected_addr, msg.ctx.addr);
303 
304 	return true;
305 }
306 
ivi_update_toggle(void)307 static bool ivi_update_toggle(void)
308 {
309 	bool res;
310 
311 	bt_mesh_iv_update_test(true);
312 	res = bt_mesh_iv_update();
313 	bt_mesh_iv_update_test(false);
314 
315 	return res;
316 }
317 
318 /* 1 second delays have been added to prevent interfering tail of
319  * the previous rx transaction with the beginning of the new tx transaction.
320  */
test_rx_rpl_frag(void)321 static void test_rx_rpl_frag(void)
322 {
323 	bt_mesh_test_setup();
324 
325 	k_sleep(K_SECONDS(10));
326 
327 	/* Wait 3 messages from different sources. */
328 	for (int i = 0; i < 3; i++) {
329 		ASSERT_TRUE(msg_recv(100 + i));
330 	}
331 
332 	k_sleep(K_SECONDS(1));
333 
334 	/* Ask tx node to proceed to next test step. */
335 	ASSERT_TRUE(msg_send(rx_cfg.addr, tx_cfg.addr));
336 
337 	/* Start IVI Update. This will set old_iv for all entries in RPL to 1. */
338 	ASSERT_TRUE(ivi_update_toggle());
339 
340 	/* Receive messages from even nodes with new IVI. RPL entry with odd address will stay
341 	 * with old IVI.
342 	 */
343 	ASSERT_TRUE(msg_recv(100));
344 	ASSERT_TRUE(msg_recv(102));
345 
346 	k_sleep(K_SECONDS(1));
347 
348 	/* Ask tx node to proceed to next test step. */
349 	ASSERT_TRUE(msg_send(rx_cfg.addr, tx_cfg.addr));
350 
351 	/* Complete IVI Update. */
352 	ASSERT_FALSE(ivi_update_toggle());
353 
354 	/* Bump SeqNum in RPL for even addresses. */
355 	ASSERT_TRUE(msg_recv(100));
356 	ASSERT_TRUE(msg_recv(102));
357 
358 	k_sleep(K_SECONDS(1));
359 
360 	/* Start IVI Update again. */
361 	/* RPL entry with odd address should be removed causing fragmentation in RPL. old_iv flag
362 	 * for even entries will be set to 1.
363 	 */
364 	ASSERT_TRUE(ivi_update_toggle());
365 
366 	/* Ask tx node to proceed to next test step. */
367 	ASSERT_TRUE(msg_send(rx_cfg.addr, tx_cfg.addr));
368 
369 	/* Complete IVI Update. */
370 	ASSERT_FALSE(ivi_update_toggle());
371 
372 	/* Odd address entry should have been removed keeping even addresses accessible. */
373 	struct bt_mesh_rpl *rpl = NULL;
374 	struct bt_mesh_net_rx rx = {
375 		.old_iv = 1,
376 		.seq = 0,
377 		.ctx.addr = 100,
378 		.local_match = 1,
379 	};
380 	ASSERT_TRUE(bt_mesh_rpl_check(&rx, &rpl));
381 	rx.ctx.addr = 101;
382 	ASSERT_FALSE(bt_mesh_rpl_check(&rx, &rpl));
383 	rx.ctx.addr = 102;
384 	ASSERT_TRUE(bt_mesh_rpl_check(&rx, &rpl));
385 
386 	/* Let the settings store RPL. */
387 	k_sleep(K_SECONDS(CONFIG_BT_MESH_RPL_STORE_TIMEOUT));
388 
389 	PASS();
390 }
391 
392 /* 1 second delays have been added to prevent interfering tail of
393  * the previous rx transaction with the beginning of the new tx transaction.
394  */
test_tx_rpl_frag(void)395 static void test_tx_rpl_frag(void)
396 {
397 	bt_mesh_test_setup();
398 
399 	k_sleep(K_SECONDS(10));
400 
401 	/* Send message for 3 different addresses. */
402 	for (size_t i = 0; i < 3; i++) {
403 		ASSERT_TRUE(msg_send(100 + i, rx_cfg.addr));
404 	}
405 
406 	/* Wait for the rx node. */
407 	ASSERT_TRUE(msg_recv(rx_cfg.addr));
408 
409 	k_sleep(K_SECONDS(1));
410 
411 	/* Start IVI Update. */
412 	ASSERT_TRUE(ivi_update_toggle());
413 
414 	/* Send msg from elem 1 and 3 with new IVI. 2nd elem should have old IVI. */
415 	ASSERT_TRUE(msg_send(100, rx_cfg.addr));
416 	ASSERT_TRUE(msg_send(102, rx_cfg.addr));
417 
418 	/* Wait for the rx node. */
419 	ASSERT_TRUE(msg_recv(rx_cfg.addr));
420 
421 	k_sleep(K_SECONDS(1));
422 
423 	/* Complete IVI Update. */
424 	ASSERT_FALSE(ivi_update_toggle());
425 
426 	/* Send message from even addresses with new IVI keeping odd address with old IVI. */
427 	ASSERT_TRUE(msg_send(100, rx_cfg.addr));
428 	ASSERT_TRUE(msg_send(102, rx_cfg.addr));
429 
430 	/* Start IVI Update again to be in sync with rx node. */
431 	ASSERT_TRUE(ivi_update_toggle());
432 
433 	/* Wait for rx node. */
434 	ASSERT_TRUE(msg_recv(rx_cfg.addr));
435 
436 	/* Complete IVI Update. */
437 	ASSERT_FALSE(ivi_update_toggle());
438 
439 	PASS();
440 }
441 
test_rx_reboot_after_defrag(void)442 static void test_rx_reboot_after_defrag(void)
443 {
444 	bt_mesh_test_setup();
445 
446 	/* Test that RPL entries are restored correctly after defrag and reboot. */
447 	struct bt_mesh_rpl *rpl = NULL;
448 	struct bt_mesh_net_rx rx = {
449 		.old_iv = 1,
450 		.seq = 0,
451 		.ctx.addr = 100,
452 		.local_match = 1,
453 	};
454 	ASSERT_TRUE(bt_mesh_rpl_check(&rx, &rpl));
455 	rx.ctx.addr = 101;
456 	ASSERT_FALSE(bt_mesh_rpl_check(&rx, &rpl));
457 	rx.ctx.addr = 102;
458 	ASSERT_TRUE(bt_mesh_rpl_check(&rx, &rpl));
459 
460 	PASS();
461 }
462 
463 #define TEST_CASE(role, name, description)                     \
464 	{                                                      \
465 		.test_id = "rpc_" #role "_" #name,             \
466 		.test_descr = description,                     \
467 		.test_post_init_f = test_##role##_init,        \
468 		.test_tick_f = bt_mesh_test_timeout,           \
469 		.test_main_f = test_##role##_##name,           \
470 	}
471 
472 static const struct bst_test_instance test_rpc[] = {
473 	TEST_CASE(tx, immediate_replay_attack, "RPC: perform replay attack immediately"),
474 	TEST_CASE(tx, power_replay_attack,     "RPC: perform replay attack after power cycle"),
475 	TEST_CASE(tx, rpl_frag, "RPC: Send messages after double IVI Update"),
476 
477 	TEST_CASE(rx, immediate_replay_attack, "RPC: device under immediate attack"),
478 	TEST_CASE(rx, power_replay_attack,     "RPC: device under power cycle reply attack"),
479 	TEST_CASE(rx, rpl_frag, "RPC: Test RPL fragmentation after double IVI Update"),
480 	TEST_CASE(rx, reboot_after_defrag, "RPC: Test PRL after defrag and reboot"),
481 	BSTEST_END_MARKER
482 };
483 
test_rpc_install(struct bst_test_list * tests)484 struct bst_test_list *test_rpc_install(struct bst_test_list *tests)
485 {
486 	tests = bst_add_tests(tests, test_rpc);
487 	return tests;
488 }
489