1 /*
2  * Copyright (c) 2020 Demant
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <string.h>
8 #include <zephyr/types.h>
9 #include <zephyr/ztest.h>
10 
11 #include <stdio.h>
12 #include <stdlib.h>
13 
14 #define CONFIG_BT_CTLR_FILTER_ACCEPT_LIST 1
15 #define CONFIG_BT_CTLR_PRIVACY 1
16 #define CONFIG_BT_CTLR_RL_SIZE 8
17 #define CONFIG_BT_CTLR_FAL_SIZE 8
18 #define CONFIG_BT_CTLR_RPA_CACHE_SIZE 4
19 #define CONFIG_BT_CTLR_TRPA_CACHE_SIZE 4
20 #define CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY 1
21 #define CONFIG_BT_LOG_LEVEL 1
22 
23 #include "ll_sw/ull_filter.c"
24 
25 /*
26  * Unit test of SW deferred privacy data structure and related methods
27  * Tests the prpa and trpa cache functions (prpa_cache_add, prpa_cache_clear, prpa_cache_find
28  * and trpa_cache_add, trpa_cache_clear, trpa_cache_find)
29  */
30 
31 #define BT_ADDR_INIT(P0, P1, P2, P3, P4, P5)                                   \
32 	(&(bt_addr_t){ { P0, P1, P2, P3, P4, P5 } })
33 
helper_privacy_clear(void)34 void helper_privacy_clear(void)
35 {
36 	zassert_equal(newest_prpa, 0, "");
37 	for (uint8_t i = 0; i < CONFIG_BT_CTLR_RPA_CACHE_SIZE; i++) {
38 		zassert_equal(prpa_cache[i].taken, 0U, "");
39 	}
40 	zassert_equal(newest_trpa, 0, "");
41 	for (uint8_t i = 0; i < CONFIG_BT_CTLR_TRPA_CACHE_SIZE; i++) {
42 		zassert_equal(trpa_cache[i].rl_idx, FILTER_IDX_NONE, "");
43 	}
44 }
45 
helper_prpa_add(int skew)46 void helper_prpa_add(int skew)
47 {
48 	bt_addr_t a1, a2, a3, a4, a5;
49 	uint8_t pos, ex_pos;
50 
51 	bt_addr_copy(&a1, BT_ADDR_INIT(0x12, 0x13, 0x14, 0x15, 0x16, 0x17));
52 	bt_addr_copy(&a2, BT_ADDR_INIT(0x22, 0x23, 0x24, 0x25, 0x26, 0x27));
53 	bt_addr_copy(&a3, BT_ADDR_INIT(0x32, 0x33, 0x34, 0x35, 0x36, 0x37));
54 	bt_addr_copy(&a4, BT_ADDR_INIT(0x42, 0x43, 0x44, 0x45, 0x46, 0x47));
55 	bt_addr_copy(&a5, BT_ADDR_INIT(0x52, 0x53, 0x54, 0x55, 0x56, 0x57));
56 
57 	prpa_cache_add(&a1);
58 	pos = prpa_cache_find(&a1);
59 	ex_pos = (1 + skew) % CONFIG_BT_CTLR_RPA_CACHE_SIZE;
60 	zassert_equal(pos, ex_pos, "%d == %d", pos, ex_pos);
61 
62 	prpa_cache_add(&a2);
63 	pos = prpa_cache_find(&a2);
64 	ex_pos = (2 + skew) % CONFIG_BT_CTLR_RPA_CACHE_SIZE;
65 	zassert_equal(pos, ex_pos, "");
66 
67 	prpa_cache_add(&a3);
68 	pos = prpa_cache_find(&a3);
69 	ex_pos = (3 + skew) % CONFIG_BT_CTLR_RPA_CACHE_SIZE;
70 	zassert_equal(pos, ex_pos, "");
71 
72 	/* Adding this should cause wrap around */
73 	prpa_cache_add(&a4);
74 	pos = prpa_cache_find(&a4);
75 	ex_pos = (4 + skew) % CONFIG_BT_CTLR_RPA_CACHE_SIZE;
76 	zassert_equal(pos, ex_pos, "");
77 
78 	/* adding this should cause a1 to be dropped */
79 	prpa_cache_add(&a5);
80 	pos = prpa_cache_find(&a5);
81 	ex_pos = (1 + skew) % CONFIG_BT_CTLR_RPA_CACHE_SIZE;
82 	zassert_equal(pos, ex_pos, "");
83 
84 	/* check that a1 can no longer be found */
85 	pos = prpa_cache_find(&a1);
86 	zassert_equal(pos, FILTER_IDX_NONE, "");
87 }
88 
helper_trpa_add(int skew)89 void helper_trpa_add(int skew)
90 {
91 	bt_addr_t a1, a2, a3, a4, a5;
92 	uint8_t pos, ex_pos;
93 
94 	bt_addr_copy(&a1, BT_ADDR_INIT(0x12, 0x13, 0x14, 0x15, 0x16, 0x17));
95 	bt_addr_copy(&a2, BT_ADDR_INIT(0x22, 0x23, 0x24, 0x25, 0x26, 0x27));
96 	bt_addr_copy(&a3, BT_ADDR_INIT(0x32, 0x33, 0x34, 0x35, 0x36, 0x37));
97 	bt_addr_copy(&a4, BT_ADDR_INIT(0x42, 0x43, 0x44, 0x45, 0x46, 0x47));
98 	bt_addr_copy(&a5, BT_ADDR_INIT(0x52, 0x53, 0x54, 0x55, 0x56, 0x57));
99 
100 	trpa_cache_add(&a1, 0);
101 	pos = trpa_cache_find(&a1, 0);
102 	ex_pos = (1 + skew) % CONFIG_BT_CTLR_TRPA_CACHE_SIZE;
103 	zassert_equal(pos, ex_pos, "%d == %d", pos, ex_pos);
104 
105 	trpa_cache_add(&a2, 1);
106 	pos = trpa_cache_find(&a2, 1);
107 	ex_pos = (2 + skew) % CONFIG_BT_CTLR_TRPA_CACHE_SIZE;
108 	zassert_equal(pos, ex_pos, "");
109 
110 	trpa_cache_add(&a3, 2);
111 	pos = trpa_cache_find(&a3, 2);
112 	ex_pos = (3 + skew) % CONFIG_BT_CTLR_TRPA_CACHE_SIZE;
113 	zassert_equal(pos, ex_pos, "");
114 
115 	/* Adding this should cause wrap around */
116 	trpa_cache_add(&a4, 3);
117 	pos = trpa_cache_find(&a4, 3);
118 	ex_pos = (4 + skew) % CONFIG_BT_CTLR_TRPA_CACHE_SIZE;
119 	zassert_equal(pos, ex_pos, "");
120 
121 	/* adding this should cause a1 to be dropped */
122 	trpa_cache_add(&a5, 4);
123 	pos = trpa_cache_find(&a5, 4);
124 	ex_pos = (1 + skew) % CONFIG_BT_CTLR_TRPA_CACHE_SIZE;
125 	zassert_equal(pos, ex_pos, "");
126 
127 	/* check that a1 can no longer be found */
128 	pos = trpa_cache_find(&a1, 0);
129 	zassert_equal(pos, FILTER_IDX_NONE, "");
130 }
131 
before(void * data)132 static void before(void *data)
133 {
134 	ARG_UNUSED(data);
135 
136 	/* Run before each test - clear the cache so we start fresh each time. */
137 	prpa_cache_clear();
138 	trpa_cache_clear();
139 }
140 
141 ZTEST_SUITE(test_ctrl_sw_privacy_unit, NULL, NULL, before, NULL, NULL);
142 
ZTEST(test_ctrl_sw_privacy_unit,test_privacy_clear)143 ZTEST(test_ctrl_sw_privacy_unit, test_privacy_clear)
144 {
145 	helper_privacy_clear();
146 }
147 
ZTEST(test_ctrl_sw_privacy_unit,test_privacy_add)148 ZTEST(test_ctrl_sw_privacy_unit, test_privacy_add)
149 {
150 	helper_prpa_add(0);
151 	helper_trpa_add(0);
152 }
153 
ZTEST(test_ctrl_sw_privacy_unit,test_privacy_add_stress)154 ZTEST(test_ctrl_sw_privacy_unit, test_privacy_add_stress)
155 {
156 	bt_addr_t ar;
157 
158 	for (uint8_t skew = 0; skew < CONFIG_BT_CTLR_RPA_CACHE_SIZE; skew++) {
159 		for (uint8_t i = 0; i < skew; i++) {
160 			bt_addr_copy(&ar,
161 				     BT_ADDR_INIT(0xde, 0xad, 0xbe,
162 						  0xef, 0xaa, 0xff));
163 			prpa_cache_add(&ar);
164 		}
165 		helper_prpa_add(skew);
166 		prpa_cache_clear();
167 	}
168 
169 	for (uint8_t skew = 0; skew < CONFIG_BT_CTLR_TRPA_CACHE_SIZE; skew++) {
170 		for (uint8_t i = 0; i < skew; i++) {
171 			bt_addr_copy(&ar,
172 				     BT_ADDR_INIT(0xde, 0xad, 0xbe,
173 						  0xef, 0xaa, 0xff));
174 			trpa_cache_add(&ar, 0);
175 		}
176 		helper_trpa_add(skew);
177 		trpa_cache_clear();
178 	}
179 }
180