1 /*
2 * Copyright (c) 2017-2019 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <string.h>
8
9 #include <zephyr/kernel.h>
10 #include <soc.h>
11 #include <zephyr/bluetooth/hci_types.h>
12 #include <zephyr/sys/byteorder.h>
13
14 #include "hal/cpu.h"
15 #include "hal/ccm.h"
16
17 #include "util/util.h"
18 #include "util/mem.h"
19 #include "util/memq.h"
20 #include "util/mayfly.h"
21 #include "util/dbuf.h"
22
23 #include "pdu_df.h"
24 #include "lll/pdu_vendor.h"
25 #include "pdu.h"
26
27 #include "lll.h"
28 #include "lll/lll_adv_types.h"
29 #include "lll_adv.h"
30 #include "lll/lll_adv_pdu.h"
31 #include "lll_scan.h"
32 #include "lll/lll_df_types.h"
33 #include "lll_conn.h"
34 #include "lll_filter.h"
35
36 #include "ll_sw/ull_tx_queue.h"
37
38 #include "ull_adv_types.h"
39 #include "ull_scan_types.h"
40 #include "ull_conn_types.h"
41 #include "ull_filter.h"
42
43 #include "ull_internal.h"
44 #include "ull_adv_internal.h"
45 #include "ull_scan_internal.h"
46 #include "ull_conn_internal.h"
47
48 #include "ll.h"
49
50 #include "hal/debug.h"
51
52 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
53 #include <zephyr/logging/log.h>
54 LOG_MODULE_REGISTER(bt_ctlr_ull_filter);
55
56 #define ADDR_TYPE_ANON 0xFF
57
58 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
59 /* Hardware Filter Accept List */
60 static struct lll_filter fal_filter;
61
62 #if defined(CONFIG_BT_CTLR_PRIVACY)
63 #include "common/rpa.h"
64
65 /* Filter Accept List peer list */
66 static struct lll_fal fal[CONFIG_BT_CTLR_FAL_SIZE];
67
68 /* Resolving list */
69 static struct lll_resolve_list rl[CONFIG_BT_CTLR_RL_SIZE];
70 static uint8_t rl_enable;
71
72 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
73 /* Cache of known unknown peer RPAs */
74 static uint8_t newest_prpa;
75 static struct lll_prpa_cache prpa_cache[CONFIG_BT_CTLR_RPA_CACHE_SIZE];
76
77 /* Cache of known unknown target RPAs */
78 static uint8_t newest_trpa;
79 static struct lll_trpa_cache trpa_cache[CONFIG_BT_CTLR_TRPA_CACHE_SIZE];
80
81 struct prpa_resolve_work {
82 struct k_work prpa_work;
83 bt_addr_t rpa;
84 resolve_callback_t cb;
85 };
86
87 struct target_resolve_work {
88 struct k_work target_work;
89 bt_addr_t rpa;
90 uint8_t idx;
91 resolve_callback_t cb;
92 };
93 #endif /* CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
94
95 static uint8_t peer_irks[CONFIG_BT_CTLR_RL_SIZE][IRK_SIZE];
96 static uint8_t peer_irk_rl_ids[CONFIG_BT_CTLR_RL_SIZE];
97 static uint8_t peer_irk_count;
98
99 static bt_addr_t local_rpas[CONFIG_BT_CTLR_RL_SIZE];
100
101 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
102 static struct prpa_resolve_work resolve_work;
103 static struct target_resolve_work t_work;
104
105 BUILD_ASSERT(ARRAY_SIZE(prpa_cache) < FILTER_IDX_NONE);
106 BUILD_ASSERT(ARRAY_SIZE(trpa_cache) < FILTER_IDX_NONE);
107 #endif /* CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
108 BUILD_ASSERT(ARRAY_SIZE(fal) < FILTER_IDX_NONE);
109 BUILD_ASSERT(ARRAY_SIZE(rl) < FILTER_IDX_NONE);
110
111 /* Hardware filter for the resolving list */
112 static struct lll_filter rl_filter;
113
114 #define DEFAULT_RPA_TIMEOUT_MS (900 * 1000)
115 static uint32_t rpa_timeout_ms;
116 static int64_t rpa_last_ms;
117
118 static struct k_work_delayable rpa_work;
119
120 #define LIST_MATCH(list, i, type, addr) (list[i].taken && \
121 (list[i].id_addr_type == (type & 0x1)) && \
122 !memcmp(list[i].id_addr.val, addr, BDADDR_SIZE))
123
124 static void fal_clear(void);
125 static uint8_t fal_find(uint8_t addr_type, const uint8_t *const addr,
126 uint8_t *const free_idx);
127 static uint32_t fal_add(bt_addr_le_t *id_addr);
128 static uint32_t fal_remove(bt_addr_le_t *id_addr);
129 static void fal_update(void);
130
131 static void rl_clear(void);
132 static void rl_update(void);
133 static int rl_access_check(bool check_ar);
134
135 #if defined(CONFIG_BT_BROADCASTER)
136 static void rpa_adv_refresh(struct ll_adv_set *adv);
137 #endif
138 static void rpa_timeout(struct k_work *work);
139 static void rpa_refresh_start(void);
140 static void rpa_refresh_stop(void);
141 #else /* !CONFIG_BT_CTLR_PRIVACY */
142 static uint32_t filter_add(struct lll_filter *filter, uint8_t addr_type,
143 uint8_t *bdaddr);
144 static uint32_t filter_remove(struct lll_filter *filter, uint8_t addr_type,
145 uint8_t *bdaddr);
146 #endif /* !CONFIG_BT_CTLR_PRIVACY */
147
148 static uint32_t filter_find(const struct lll_filter *const filter,
149 uint8_t addr_type, const uint8_t *const bdaddr);
150 static void filter_insert(struct lll_filter *const filter, int index,
151 uint8_t addr_type, const uint8_t *const bdaddr);
152 static void filter_clear(struct lll_filter *filter);
153
154 #if defined(CONFIG_BT_CTLR_PRIVACY) && \
155 defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
156 static void conn_rpa_update(uint8_t rl_idx);
157 #endif /* CONFIG_BT_CTLR_PRIVACY && CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
158
159 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
160 static void prpa_cache_clear(void);
161 static uint8_t prpa_cache_find(bt_addr_t *prpa_cache_addr);
162 static void prpa_cache_add(bt_addr_t *prpa_cache_addr);
163 static uint8_t prpa_cache_try_resolve(bt_addr_t *rpa);
164 static void prpa_cache_resolve(struct k_work *work);
165 static void target_resolve(struct k_work *work);
166 static void trpa_cache_clear(void);
167 static uint8_t trpa_cache_find(bt_addr_t *prpa_cache_addr, uint8_t rl_idx);
168 static void trpa_cache_add(bt_addr_t *prpa_cache_addr, uint8_t rl_idx);
169 #endif /* CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
170 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
171
172 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
173 #define PAL_ADDR_MATCH(type, addr) \
174 (pal[i].taken && \
175 (pal[i].id_addr_type == (type & 0x1)) && \
176 !memcmp(pal[i].id_addr.val, addr, BDADDR_SIZE))
177
178 #define PAL_MATCH(type, addr, sid) \
179 (PAL_ADDR_MATCH(type, addr) && \
180 (pal[i].sid == sid))
181
182 /* Periodic Advertising Accept List */
183 #define PAL_SIZE CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST_SIZE
184 static struct lll_pal pal[PAL_SIZE];
185
186 static void pal_clear(void);
187 #if defined(CONFIG_BT_CTLR_PRIVACY)
188 static uint8_t pal_addr_find(const uint8_t addr_type,
189 const uint8_t *const addr);
190 #endif /* CONFIG_BT_CTLR_PRIVACY */
191 static uint8_t pal_find(const uint8_t addr_type, const uint8_t *const addr,
192 const uint8_t sid, uint8_t *const free_idx);
193 static uint32_t pal_add(const bt_addr_le_t *const id_addr, const uint8_t sid);
194 static uint32_t pal_remove(const bt_addr_le_t *const id_addr,
195 const uint8_t sid);
196 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
197
198 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
ll_fal_size_get(void)199 uint8_t ll_fal_size_get(void)
200 {
201 return CONFIG_BT_CTLR_FAL_SIZE;
202 }
203
ll_fal_clear(void)204 uint8_t ll_fal_clear(void)
205 {
206 #if defined(CONFIG_BT_BROADCASTER)
207 if (ull_adv_filter_pol_get(0)) {
208 return BT_HCI_ERR_CMD_DISALLOWED;
209 }
210 #endif /* CONFIG_BT_BROADCASTER */
211
212 #if defined(CONFIG_BT_OBSERVER)
213 if (ull_scan_filter_pol_get(0) & 0x1) {
214 return BT_HCI_ERR_CMD_DISALLOWED;
215 }
216 #endif /* CONFIG_BT_OBSERVER */
217
218 #if defined(CONFIG_BT_CTLR_PRIVACY)
219 fal_clear();
220 #else
221 filter_clear(&fal_filter);
222 #endif /* CONFIG_BT_CTLR_PRIVACY */
223
224 return 0;
225 }
226
ll_fal_add(bt_addr_le_t * addr)227 uint8_t ll_fal_add(bt_addr_le_t *addr)
228 {
229 #if defined(CONFIG_BT_BROADCASTER)
230 if (ull_adv_filter_pol_get(0)) {
231 return BT_HCI_ERR_CMD_DISALLOWED;
232 }
233 #endif /* CONFIG_BT_BROADCASTER */
234
235 #if defined(CONFIG_BT_OBSERVER)
236 if (ull_scan_filter_pol_get(0) & 0x1) {
237 return BT_HCI_ERR_CMD_DISALLOWED;
238 }
239 #endif /* CONFIG_BT_OBSERVER */
240
241 if (addr->type == ADDR_TYPE_ANON) {
242 return 0;
243 }
244
245 #if defined(CONFIG_BT_CTLR_PRIVACY)
246 return fal_add(addr);
247 #else
248 return filter_add(&fal_filter, addr->type, addr->a.val);
249 #endif /* CONFIG_BT_CTLR_PRIVACY */
250 }
251
ll_fal_remove(bt_addr_le_t * addr)252 uint8_t ll_fal_remove(bt_addr_le_t *addr)
253 {
254 #if defined(CONFIG_BT_BROADCASTER)
255 if (ull_adv_filter_pol_get(0)) {
256 return BT_HCI_ERR_CMD_DISALLOWED;
257 }
258 #endif /* CONFIG_BT_BROADCASTER */
259
260 #if defined(CONFIG_BT_OBSERVER)
261 if (ull_scan_filter_pol_get(0) & 0x1) {
262 return BT_HCI_ERR_CMD_DISALLOWED;
263 }
264 #endif /* CONFIG_BT_OBSERVER */
265
266 if (addr->type == ADDR_TYPE_ANON) {
267 return 0;
268 }
269
270 #if defined(CONFIG_BT_CTLR_PRIVACY)
271 return fal_remove(addr);
272 #else
273 return filter_remove(&fal_filter, addr->type, addr->a.val);
274 #endif /* CONFIG_BT_CTLR_PRIVACY */
275 }
276
277 #if defined(CONFIG_BT_CTLR_PRIVACY)
ll_rl_id_addr_get(uint8_t rl_idx,uint8_t * id_addr_type,uint8_t * id_addr)278 void ll_rl_id_addr_get(uint8_t rl_idx, uint8_t *id_addr_type, uint8_t *id_addr)
279 {
280 LL_ASSERT(rl_idx < CONFIG_BT_CTLR_RL_SIZE);
281 LL_ASSERT(rl[rl_idx].taken);
282
283 *id_addr_type = rl[rl_idx].id_addr_type;
284 (void)memcpy(id_addr, rl[rl_idx].id_addr.val, BDADDR_SIZE);
285 }
286
ll_rl_size_get(void)287 uint8_t ll_rl_size_get(void)
288 {
289 return CONFIG_BT_CTLR_RL_SIZE;
290 }
291
ll_rl_clear(void)292 uint8_t ll_rl_clear(void)
293 {
294 if (!rl_access_check(false)) {
295 return BT_HCI_ERR_CMD_DISALLOWED;
296 }
297
298 rl_clear();
299
300 return 0;
301 }
302
ll_rl_add(bt_addr_le_t * id_addr,const uint8_t pirk[IRK_SIZE],const uint8_t lirk[IRK_SIZE])303 uint8_t ll_rl_add(bt_addr_le_t *id_addr, const uint8_t pirk[IRK_SIZE],
304 const uint8_t lirk[IRK_SIZE])
305 {
306 uint8_t i, j;
307
308 if (!rl_access_check(false)) {
309 return BT_HCI_ERR_CMD_DISALLOWED;
310 }
311
312 i = ull_filter_rl_find(id_addr->type, id_addr->a.val, &j);
313
314 /* Duplicate check */
315 if (i < ARRAY_SIZE(rl)) {
316 return BT_HCI_ERR_INVALID_PARAM;
317 } else if (j >= ARRAY_SIZE(rl)) {
318 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
319 }
320
321 /* Device not found but empty slot found */
322 i = j;
323
324 bt_addr_copy(&rl[i].id_addr, &id_addr->a);
325 rl[i].id_addr_type = id_addr->type & 0x1;
326 rl[i].pirk = mem_nz((uint8_t *)pirk, IRK_SIZE);
327 rl[i].lirk = mem_nz((uint8_t *)lirk, IRK_SIZE);
328 if (rl[i].pirk) {
329 /* cross-reference */
330 rl[i].pirk_idx = peer_irk_count;
331 peer_irk_rl_ids[peer_irk_count] = i;
332 /* AAR requires big-endian IRKs */
333 sys_memcpy_swap(peer_irks[peer_irk_count++], pirk, IRK_SIZE);
334 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
335 /* a new key was added, invalidate the known/unknown list */
336 prpa_cache_clear();
337 trpa_cache_clear();
338 #endif
339 }
340 if (rl[i].lirk) {
341 (void)memcpy(rl[i].local_irk, lirk, IRK_SIZE);
342 rl[i].local_rpa = NULL;
343 }
344 memset(rl[i].curr_rpa.val, 0x00, sizeof(rl[i].curr_rpa));
345 rl[i].rpas_ready = 0U;
346 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
347 memset(rl[i].target_rpa.val, 0x00, sizeof(rl[i].target_rpa));
348 #endif
349 /* Default to Network Privacy */
350 rl[i].dev = 0U;
351 /* Add reference to a Filter Accept List entry */
352 j = fal_find(id_addr->type, id_addr->a.val, NULL);
353 if (j < ARRAY_SIZE(fal)) {
354 fal[j].rl_idx = i;
355 rl[i].fal = 1U;
356 } else {
357 rl[i].fal = 0U;
358 }
359
360 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
361 /* Add reference to a periodic list entry */
362 j = pal_addr_find(id_addr->type, id_addr->a.val);
363 if (j < ARRAY_SIZE(pal)) {
364 pal[j].rl_idx = i;
365 rl[i].pal = j + 1U;
366 } else {
367 rl[i].pal = 0U;
368 }
369 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
370
371 rl[i].taken = 1U;
372
373 return 0;
374 }
375
ll_rl_remove(bt_addr_le_t * id_addr)376 uint8_t ll_rl_remove(bt_addr_le_t *id_addr)
377 {
378 uint8_t i;
379
380 if (!rl_access_check(false)) {
381 return BT_HCI_ERR_CMD_DISALLOWED;
382 }
383
384 /* find the device and mark it as empty */
385 i = ull_filter_rl_find(id_addr->type, id_addr->a.val, NULL);
386 if (i < ARRAY_SIZE(rl)) {
387 uint8_t j, k;
388
389 if (rl[i].pirk) {
390 /* Swap with last item */
391 uint8_t pi = rl[i].pirk_idx, pj = peer_irk_count - 1;
392
393 if (pj && pi != pj) {
394 (void)memcpy(peer_irks[pi], peer_irks[pj],
395 IRK_SIZE);
396 for (k = 0U;
397 k < CONFIG_BT_CTLR_RL_SIZE;
398 k++) {
399
400 if (rl[k].taken && rl[k].pirk &&
401 rl[k].pirk_idx == pj) {
402 rl[k].pirk_idx = pi;
403 peer_irk_rl_ids[pi] = k;
404 break;
405 }
406 }
407 }
408 peer_irk_count--;
409 }
410
411 /* Check if referenced by a Filter Accept List entry */
412 j = fal_find(id_addr->type, id_addr->a.val, NULL);
413 if (j < ARRAY_SIZE(fal)) {
414 fal[j].rl_idx = FILTER_IDX_NONE;
415 }
416
417 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
418 /* Check if referenced by a periodic list entry */
419 j = pal_addr_find(id_addr->type, id_addr->a.val);
420 if (j < ARRAY_SIZE(pal)) {
421 pal[j].rl_idx = FILTER_IDX_NONE;
422 }
423 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
424
425 rl[i].taken = 0U;
426
427 return 0;
428 }
429
430 return BT_HCI_ERR_UNKNOWN_CONN_ID;
431 }
432
ll_rl_crpa_set(uint8_t id_addr_type,uint8_t * id_addr,uint8_t rl_idx,uint8_t * crpa)433 void ll_rl_crpa_set(uint8_t id_addr_type, uint8_t *id_addr, uint8_t rl_idx,
434 uint8_t *crpa)
435 {
436 if ((crpa[5] & 0xc0) == 0x40) {
437
438 if (id_addr) {
439 /* find the device and return its RPA */
440 rl_idx = ull_filter_rl_find(id_addr_type, id_addr,
441 NULL);
442 }
443
444 if (rl_idx < ARRAY_SIZE(rl) && rl[rl_idx].taken) {
445 (void)memcpy(rl[rl_idx].curr_rpa.val, crpa,
446 sizeof(bt_addr_t));
447 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
448 conn_rpa_update(rl_idx);
449 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN) */
450 }
451 }
452 }
453
ll_rl_crpa_get(bt_addr_le_t * id_addr,bt_addr_t * crpa)454 uint8_t ll_rl_crpa_get(bt_addr_le_t *id_addr, bt_addr_t *crpa)
455 {
456 uint8_t i;
457
458 /* find the device and return its RPA */
459 i = ull_filter_rl_find(id_addr->type, id_addr->a.val, NULL);
460 if (i < ARRAY_SIZE(rl) &&
461 mem_nz(rl[i].curr_rpa.val, sizeof(rl[i].curr_rpa.val))) {
462 bt_addr_copy(crpa, &rl[i].curr_rpa);
463 return 0;
464 }
465
466 return BT_HCI_ERR_UNKNOWN_CONN_ID;
467 }
468
ll_rl_lrpa_get(bt_addr_le_t * id_addr,bt_addr_t * lrpa)469 uint8_t ll_rl_lrpa_get(bt_addr_le_t *id_addr, bt_addr_t *lrpa)
470 {
471 uint8_t i;
472
473 /* find the device and return the local RPA */
474 i = ull_filter_rl_find(id_addr->type, id_addr->a.val, NULL);
475 if (i < ARRAY_SIZE(rl)) {
476 bt_addr_copy(lrpa, rl[i].local_rpa);
477 return 0;
478 }
479
480 return BT_HCI_ERR_UNKNOWN_CONN_ID;
481 }
482
ll_rl_enable(uint8_t enable)483 uint8_t ll_rl_enable(uint8_t enable)
484 {
485 if (!rl_access_check(false)) {
486 return BT_HCI_ERR_CMD_DISALLOWED;
487 }
488
489 switch (enable) {
490 case BT_HCI_ADDR_RES_DISABLE:
491 rl_enable = 0U;
492 break;
493 case BT_HCI_ADDR_RES_ENABLE:
494 rl_enable = 1U;
495 break;
496 default:
497 return BT_HCI_ERR_INVALID_PARAM;
498 }
499
500 return 0;
501 }
502
ll_rl_timeout_set(uint16_t timeout)503 void ll_rl_timeout_set(uint16_t timeout)
504 {
505 rpa_timeout_ms = timeout * 1000U;
506 }
507
ll_priv_mode_set(bt_addr_le_t * id_addr,uint8_t mode)508 uint8_t ll_priv_mode_set(bt_addr_le_t *id_addr, uint8_t mode)
509 {
510 uint8_t i;
511
512 if (!rl_access_check(false)) {
513 return BT_HCI_ERR_CMD_DISALLOWED;
514 }
515
516 /* find the device and mark it as empty */
517 i = ull_filter_rl_find(id_addr->type, id_addr->a.val, NULL);
518 if (i < ARRAY_SIZE(rl)) {
519 switch (mode) {
520 case BT_HCI_LE_PRIVACY_MODE_NETWORK:
521 rl[i].dev = 0U;
522 break;
523 case BT_HCI_LE_PRIVACY_MODE_DEVICE:
524 rl[i].dev = 1U;
525 break;
526 default:
527 return BT_HCI_ERR_INVALID_PARAM;
528 }
529 } else {
530 return BT_HCI_ERR_UNKNOWN_CONN_ID;
531 }
532
533 return 0;
534 }
535 #endif /* CONFIG_BT_CTLR_PRIVACY */
536 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
537
538 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
ll_pal_size_get(void)539 uint8_t ll_pal_size_get(void)
540 {
541 return PAL_SIZE;
542 }
543
ll_pal_clear(void)544 uint8_t ll_pal_clear(void)
545 {
546 /* FIXME: Check and fail if Periodic Advertising Create Sync is pending.
547 */
548
549 pal_clear();
550
551 return 0;
552 }
553
ll_pal_add(const bt_addr_le_t * const addr,const uint8_t sid)554 uint8_t ll_pal_add(const bt_addr_le_t *const addr, const uint8_t sid)
555 {
556 /* FIXME: Check and fail if Periodic Advertising Create Sync is pending.
557 */
558
559 if (addr->type == ADDR_TYPE_ANON) {
560 return 0;
561 }
562
563 return pal_add(addr, sid);
564 }
565
ll_pal_remove(const bt_addr_le_t * const addr,const uint8_t sid)566 uint8_t ll_pal_remove(const bt_addr_le_t *const addr, const uint8_t sid)
567 {
568 /* FIXME: Check and fail if Periodic Advertising Create Sync is pending.
569 */
570
571 if (addr->type == ADDR_TYPE_ANON) {
572 return 0;
573 }
574
575 return pal_remove(addr, sid);
576 }
577
ull_filter_ull_pal_addr_match(const uint8_t addr_type,const uint8_t * const addr)578 bool ull_filter_ull_pal_addr_match(const uint8_t addr_type,
579 const uint8_t *const addr)
580 {
581 for (int i = 0; i < PAL_SIZE; i++) {
582 if (PAL_ADDR_MATCH(addr_type, addr)) {
583 return true;
584 }
585 }
586
587 return false;
588 }
589
ull_filter_ull_pal_match(const uint8_t addr_type,const uint8_t * const addr,const uint8_t sid)590 bool ull_filter_ull_pal_match(const uint8_t addr_type,
591 const uint8_t *const addr, const uint8_t sid)
592 {
593 for (int i = 0; i < PAL_SIZE; i++) {
594 if (PAL_MATCH(addr_type, addr, sid)) {
595 return true;
596 }
597 }
598
599 return false;
600 }
601
602 #if defined(CONFIG_BT_CTLR_PRIVACY)
ull_filter_ull_pal_listed(const uint8_t rl_idx,uint8_t * const addr_type,uint8_t * const addr)603 bool ull_filter_ull_pal_listed(const uint8_t rl_idx, uint8_t *const addr_type,
604 uint8_t *const addr)
605 {
606 if (rl_idx >= ARRAY_SIZE(rl)) {
607 return false;
608 }
609
610 LL_ASSERT(rl[rl_idx].taken);
611
612 if (rl[rl_idx].pal) {
613 uint8_t pal_idx = rl[rl_idx].pal - 1;
614
615 *addr_type = pal[pal_idx].id_addr_type;
616 (void)memcpy(addr, pal[pal_idx].id_addr.val, BDADDR_SIZE);
617
618 return true;
619 }
620
621 return false;
622 }
623 #endif /* CONFIG_BT_CTLR_PRIVACY */
624 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
625
ull_filter_reset(bool init)626 void ull_filter_reset(bool init)
627 {
628 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
629 pal_clear();
630 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
631
632 #if defined(CONFIG_BT_CTLR_PRIVACY)
633 fal_clear();
634
635 rl_enable = 0U;
636 rpa_timeout_ms = DEFAULT_RPA_TIMEOUT_MS;
637 rpa_last_ms = -1;
638 rl_clear();
639 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
640 prpa_cache_clear();
641 trpa_cache_clear();
642 #endif
643 if (init) {
644 k_work_init_delayable(&rpa_work, rpa_timeout);
645 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
646 k_work_init(&(resolve_work.prpa_work), prpa_cache_resolve);
647 k_work_init(&(t_work.target_work), target_resolve);
648 #endif
649 } else {
650 k_work_cancel_delayable(&rpa_work);
651 }
652 #elif defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
653 filter_clear(&fal_filter);
654 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
655 }
656
657 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
ull_filter_lll_get(bool filter)658 struct lll_filter *ull_filter_lll_get(bool filter)
659 {
660 #if defined(CONFIG_BT_CTLR_PRIVACY)
661 if (filter) {
662 return &fal_filter;
663 }
664 return &rl_filter;
665 #else
666 LL_ASSERT(filter);
667 return &fal_filter;
668 #endif
669 }
670
ull_filter_lll_fal_match(const struct lll_filter * const filter,uint8_t addr_type,const uint8_t * const addr,uint8_t * devmatch_id)671 uint8_t ull_filter_lll_fal_match(const struct lll_filter *const filter,
672 uint8_t addr_type, const uint8_t *const addr,
673 uint8_t *devmatch_id)
674 {
675 *devmatch_id = filter_find(filter, addr_type, addr);
676
677 return (*devmatch_id) == FILTER_IDX_NONE ? 0U : 1U;
678 }
679
680 #if defined(CONFIG_BT_CTLR_PRIVACY)
ull_filter_adv_scan_state_cb(uint8_t bm)681 void ull_filter_adv_scan_state_cb(uint8_t bm)
682 {
683 if (bm) {
684 rpa_refresh_start();
685 } else {
686 rpa_refresh_stop();
687 }
688 }
689
ull_filter_adv_update(uint8_t adv_fp)690 void ull_filter_adv_update(uint8_t adv_fp)
691 {
692 /* Clear before populating filter */
693 filter_clear(&fal_filter);
694
695 /* enabling advertising */
696 if (adv_fp &&
697 (!IS_ENABLED(CONFIG_BT_OBSERVER) ||
698 !(ull_scan_filter_pol_get(0) & 0x1))) {
699 /* filter accept list not in use, update FAL */
700 fal_update();
701 }
702
703 /* Clear before populating rl filter */
704 filter_clear(&rl_filter);
705
706 if (rl_enable &&
707 (!IS_ENABLED(CONFIG_BT_OBSERVER) || !ull_scan_is_enabled(0))) {
708 /* rl not in use, update resolving list LUT */
709 rl_update();
710 }
711 }
712
ull_filter_scan_update(uint8_t scan_fp)713 void ull_filter_scan_update(uint8_t scan_fp)
714 {
715 /* Clear before populating filter */
716 filter_clear(&fal_filter);
717
718 /* enabling advertising */
719 if ((scan_fp & 0x1) &&
720 (!IS_ENABLED(CONFIG_BT_BROADCASTER) ||
721 !ull_adv_filter_pol_get(0))) {
722 /* Filter Accept List not in use, update FAL */
723 fal_update();
724 }
725
726 /* Clear before populating rl filter */
727 filter_clear(&rl_filter);
728
729 if (rl_enable &&
730 (!IS_ENABLED(CONFIG_BT_BROADCASTER) || !ull_adv_is_enabled(0))) {
731 /* rl not in use, update resolving list LUT */
732 rl_update();
733 }
734 }
735
ull_filter_rpa_update(bool timeout)736 void ull_filter_rpa_update(bool timeout)
737 {
738 uint8_t i;
739 int err;
740 int64_t now = k_uptime_get();
741 bool all = timeout || (rpa_last_ms == -1) ||
742 (now - rpa_last_ms >= rpa_timeout_ms);
743 LOG_DBG("");
744
745 for (i = 0U; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
746 if ((rl[i].taken) && (all || !rl[i].rpas_ready)) {
747
748 if (rl[i].pirk) {
749 uint8_t irk[IRK_SIZE];
750
751 /* TODO: move this swap to the driver level */
752 sys_memcpy_swap(irk, peer_irks[rl[i].pirk_idx],
753 IRK_SIZE);
754 err = bt_rpa_create(irk, &rl[i].peer_rpa);
755 LL_ASSERT(!err);
756 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
757 /* a new key was added,
758 * invalidate the known/unknown peer RPA cache
759 */
760 prpa_cache_clear();
761 trpa_cache_clear();
762 #endif
763 }
764
765 if (rl[i].lirk) {
766 bt_addr_t rpa;
767
768 err = bt_rpa_create(rl[i].local_irk, &rpa);
769 LL_ASSERT(!err);
770 /* pointer read/write assumed to be atomic
771 * so that if ISR fires the local_rpa pointer
772 * will always point to a valid full RPA
773 */
774 rl[i].local_rpa = &rpa;
775 bt_addr_copy(&local_rpas[i], &rpa);
776 rl[i].local_rpa = &local_rpas[i];
777 }
778
779 rl[i].rpas_ready = 1U;
780 }
781 }
782
783 if (all) {
784 rpa_last_ms = now;
785 }
786
787 if (timeout) {
788 #if defined(CONFIG_BT_BROADCASTER)
789 uint8_t handle;
790
791 for (handle = 0U; handle < BT_CTLR_ADV_SET; handle++) {
792 struct ll_adv_set *adv;
793
794 adv = ull_adv_is_enabled_get(handle);
795 if (adv) {
796 rpa_adv_refresh(adv);
797 }
798 }
799 #endif
800 }
801 }
802
803 #if defined(CONFIG_BT_BROADCASTER)
ull_filter_adva_get(uint8_t rl_idx)804 const uint8_t *ull_filter_adva_get(uint8_t rl_idx)
805 {
806 /* AdvA */
807 if (rl_idx < ARRAY_SIZE(rl) && rl[rl_idx].lirk) {
808 LL_ASSERT(rl[rl_idx].rpas_ready);
809 return rl[rl_idx].local_rpa->val;
810 }
811
812 return NULL;
813 }
814
ull_filter_tgta_get(uint8_t rl_idx)815 const uint8_t *ull_filter_tgta_get(uint8_t rl_idx)
816 {
817 /* TargetA */
818 if (rl_idx < ARRAY_SIZE(rl) && rl[rl_idx].pirk) {
819 return rl[rl_idx].peer_rpa.val;
820 }
821
822 return NULL;
823 }
824 #endif /* CONFIG_BT_BROADCASTER */
825
ull_filter_rl_find(uint8_t id_addr_type,uint8_t const * const id_addr,uint8_t * const free_idx)826 uint8_t ull_filter_rl_find(uint8_t id_addr_type, uint8_t const *const id_addr,
827 uint8_t *const free_idx)
828 {
829 uint8_t i;
830
831 if (free_idx) {
832 *free_idx = FILTER_IDX_NONE;
833 }
834
835 for (i = 0U; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
836 if (LIST_MATCH(rl, i, id_addr_type, id_addr)) {
837 return i;
838 } else if (free_idx && !rl[i].taken &&
839 (*free_idx == FILTER_IDX_NONE)) {
840 *free_idx = i;
841 }
842 }
843
844 return FILTER_IDX_NONE;
845 }
846
ull_filter_lll_lrpa_used(uint8_t rl_idx)847 bool ull_filter_lll_lrpa_used(uint8_t rl_idx)
848 {
849 return rl_idx < ARRAY_SIZE(rl) && rl[rl_idx].lirk;
850 }
851
ull_filter_lll_lrpa_get(uint8_t rl_idx)852 bt_addr_t *ull_filter_lll_lrpa_get(uint8_t rl_idx)
853 {
854 if ((rl_idx >= ARRAY_SIZE(rl)) || !rl[rl_idx].lirk ||
855 !rl[rl_idx].rpas_ready) {
856 return NULL;
857 }
858
859 return rl[rl_idx].local_rpa;
860 }
861
ull_filter_lll_irks_get(uint8_t * count)862 uint8_t *ull_filter_lll_irks_get(uint8_t *count)
863 {
864 *count = peer_irk_count;
865 return (uint8_t *)peer_irks;
866 }
867
ull_filter_lll_rl_idx(bool filter,uint8_t devmatch_id)868 uint8_t ull_filter_lll_rl_idx(bool filter, uint8_t devmatch_id)
869 {
870 uint8_t i;
871
872 if (filter) {
873 LL_ASSERT(devmatch_id < ARRAY_SIZE(fal));
874 LL_ASSERT(fal[devmatch_id].taken);
875 i = fal[devmatch_id].rl_idx;
876 } else {
877 LL_ASSERT(devmatch_id < ARRAY_SIZE(rl));
878 i = devmatch_id;
879 LL_ASSERT(rl[i].taken);
880 }
881
882 return i;
883 }
884
ull_filter_lll_rl_irk_idx(uint8_t irkmatch_id)885 uint8_t ull_filter_lll_rl_irk_idx(uint8_t irkmatch_id)
886 {
887 uint8_t i;
888
889 LL_ASSERT(irkmatch_id < peer_irk_count);
890 i = peer_irk_rl_ids[irkmatch_id];
891 LL_ASSERT(i < CONFIG_BT_CTLR_RL_SIZE);
892 LL_ASSERT(rl[i].taken);
893
894 return i;
895 }
896
ull_filter_lll_irk_in_fal(uint8_t rl_idx)897 bool ull_filter_lll_irk_in_fal(uint8_t rl_idx)
898 {
899 if (rl_idx >= ARRAY_SIZE(rl)) {
900 return false;
901 }
902
903 LL_ASSERT(rl[rl_idx].taken);
904
905 return rl[rl_idx].fal;
906 }
907
ull_filter_lll_fal_get(void)908 struct lll_fal *ull_filter_lll_fal_get(void)
909 {
910 return fal;
911 }
912
ull_filter_lll_resolve_list_get(void)913 struct lll_resolve_list *ull_filter_lll_resolve_list_get(void)
914 {
915 return rl;
916 }
917
ull_filter_lll_rl_idx_allowed(uint8_t irkmatch_ok,uint8_t rl_idx)918 bool ull_filter_lll_rl_idx_allowed(uint8_t irkmatch_ok, uint8_t rl_idx)
919 {
920 /* If AR is disabled or we don't know the device or we matched an IRK
921 * then we're all set.
922 */
923 if (!rl_enable || rl_idx >= ARRAY_SIZE(rl) || irkmatch_ok) {
924 return true;
925 }
926
927 LL_ASSERT(rl_idx < CONFIG_BT_CTLR_RL_SIZE);
928 LL_ASSERT(rl[rl_idx].taken);
929
930 return !rl[rl_idx].pirk || rl[rl_idx].dev;
931 }
932
ull_filter_lll_rl_addr_allowed(uint8_t id_addr_type,const uint8_t * id_addr,uint8_t * const rl_idx)933 bool ull_filter_lll_rl_addr_allowed(uint8_t id_addr_type,
934 const uint8_t *id_addr,
935 uint8_t *const rl_idx)
936 {
937 uint8_t i, j;
938
939 /* We matched an IRK then we're all set. No hw
940 * filters are used in this case.
941 */
942 if (*rl_idx != FILTER_IDX_NONE) {
943 return true;
944 }
945
946 for (i = 0U; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
947 if (rl[i].taken && (rl[i].id_addr_type == id_addr_type)) {
948 uint8_t *addr = rl[i].id_addr.val;
949
950 for (j = 0U; j < BDADDR_SIZE; j++) {
951 if (addr[j] != id_addr[j]) {
952 break;
953 }
954 }
955
956 if (j == BDADDR_SIZE) {
957 *rl_idx = i;
958 return !rl[i].pirk || rl[i].dev;
959 }
960 }
961 }
962
963 return true;
964 }
965
ull_filter_lll_rl_addr_resolve(uint8_t id_addr_type,const uint8_t * id_addr,uint8_t rl_idx)966 bool ull_filter_lll_rl_addr_resolve(uint8_t id_addr_type,
967 const uint8_t *id_addr, uint8_t rl_idx)
968 {
969 /* Unable to resolve if AR is disabled, no RL entry or no local IRK */
970 if (!rl_enable || rl_idx >= ARRAY_SIZE(rl) || !rl[rl_idx].lirk) {
971 return false;
972 }
973
974 if ((id_addr_type != 0U) && ((id_addr[5] & 0xc0) == 0x40)) {
975 return bt_rpa_irk_matches(rl[rl_idx].local_irk,
976 (bt_addr_t *)id_addr);
977 }
978
979 return false;
980 }
981
ull_filter_lll_rl_enabled(void)982 bool ull_filter_lll_rl_enabled(void)
983 {
984 return rl_enable;
985 }
986
987 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
ull_filter_deferred_resolve(bt_addr_t * rpa,resolve_callback_t cb)988 uint8_t ull_filter_deferred_resolve(bt_addr_t *rpa, resolve_callback_t cb)
989 {
990 if (rl_enable) {
991 if (!k_work_is_pending(&(resolve_work.prpa_work))) {
992 /* copy input param to work variable */
993 (void)memcpy(resolve_work.rpa.val, rpa->val,
994 sizeof(bt_addr_t));
995 resolve_work.cb = cb;
996
997 k_work_submit(&(resolve_work.prpa_work));
998
999 return 1;
1000 }
1001 }
1002
1003 return 0;
1004 }
1005
ull_filter_deferred_targeta_resolve(bt_addr_t * rpa,uint8_t rl_idx,resolve_callback_t cb)1006 uint8_t ull_filter_deferred_targeta_resolve(bt_addr_t *rpa, uint8_t rl_idx,
1007 resolve_callback_t cb)
1008 {
1009 if (rl_enable) {
1010 if (!k_work_is_pending(&(t_work.target_work))) {
1011 /* copy input param to work variable */
1012 (void)memcpy(t_work.rpa.val, rpa->val,
1013 sizeof(bt_addr_t));
1014 t_work.cb = cb;
1015 t_work.idx = rl_idx;
1016
1017 k_work_submit(&(t_work.target_work));
1018
1019 return 1;
1020 }
1021 }
1022 return 0;
1023 }
1024 #endif /* CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
1025
fal_clear(void)1026 static void fal_clear(void)
1027 {
1028 for (int i = 0; i < CONFIG_BT_CTLR_FAL_SIZE; i++) {
1029 uint8_t j = fal[i].rl_idx;
1030
1031 if (j < ARRAY_SIZE(rl)) {
1032 rl[j].fal = 0U;
1033 }
1034 fal[i].taken = 0U;
1035 }
1036 }
1037
fal_find(uint8_t addr_type,const uint8_t * const addr,uint8_t * const free_idx)1038 static uint8_t fal_find(uint8_t addr_type, const uint8_t *const addr,
1039 uint8_t *const free_idx)
1040 {
1041 int i;
1042
1043 if (free_idx) {
1044 *free_idx = FILTER_IDX_NONE;
1045 }
1046
1047 for (i = 0; i < CONFIG_BT_CTLR_FAL_SIZE; i++) {
1048 if (LIST_MATCH(fal, i, addr_type, addr)) {
1049 return i;
1050 } else if (free_idx && !fal[i].taken &&
1051 (*free_idx == FILTER_IDX_NONE)) {
1052 *free_idx = i;
1053 }
1054 }
1055
1056 return FILTER_IDX_NONE;
1057 }
1058
fal_add(bt_addr_le_t * id_addr)1059 static uint32_t fal_add(bt_addr_le_t *id_addr)
1060 {
1061 uint8_t i, j;
1062
1063 i = fal_find(id_addr->type, id_addr->a.val, &j);
1064
1065 /* Duplicate check */
1066 if (i < ARRAY_SIZE(fal)) {
1067 return 0;
1068 } else if (j >= ARRAY_SIZE(fal)) {
1069 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1070 }
1071
1072 i = j;
1073
1074 fal[i].id_addr_type = id_addr->type & 0x1;
1075 bt_addr_copy(&fal[i].id_addr, &id_addr->a);
1076 /* Get index to Resolving List if applicable */
1077 j = ull_filter_rl_find(id_addr->type, id_addr->a.val, NULL);
1078 if (j < ARRAY_SIZE(rl)) {
1079 fal[i].rl_idx = j;
1080 rl[j].fal = 1U;
1081 } else {
1082 fal[i].rl_idx = FILTER_IDX_NONE;
1083 }
1084 fal[i].taken = 1U;
1085
1086 return 0;
1087 }
1088
fal_remove(bt_addr_le_t * id_addr)1089 static uint32_t fal_remove(bt_addr_le_t *id_addr)
1090 {
1091 /* find the device and mark it as empty */
1092 uint8_t i = fal_find(id_addr->type, id_addr->a.val, NULL);
1093
1094 if (i < ARRAY_SIZE(fal)) {
1095 uint8_t j = fal[i].rl_idx;
1096
1097 if (j < ARRAY_SIZE(rl)) {
1098 rl[j].fal = 0U;
1099 }
1100 fal[i].taken = 0U;
1101
1102 return 0;
1103 }
1104
1105 return BT_HCI_ERR_UNKNOWN_CONN_ID;
1106 }
1107
fal_update(void)1108 static void fal_update(void)
1109 {
1110 uint8_t i;
1111
1112 /* Populate filter from fal peers */
1113 for (i = 0U; i < CONFIG_BT_CTLR_FAL_SIZE; i++) {
1114 uint8_t j;
1115
1116 if (!fal[i].taken) {
1117 continue;
1118 }
1119
1120 j = fal[i].rl_idx;
1121
1122 if (!rl_enable || j >= ARRAY_SIZE(rl) || !rl[j].pirk ||
1123 rl[j].dev) {
1124 filter_insert(&fal_filter, i, fal[i].id_addr_type,
1125 fal[i].id_addr.val);
1126 }
1127 }
1128 }
1129
rl_update(void)1130 static void rl_update(void)
1131 {
1132 uint8_t i;
1133
1134 /* Populate filter from rl peers */
1135 for (i = 0U; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
1136 if (rl[i].taken) {
1137 filter_insert(&rl_filter, i, rl[i].id_addr_type,
1138 rl[i].id_addr.val);
1139 }
1140 }
1141 }
1142
1143 #if defined(CONFIG_BT_BROADCASTER)
rpa_adv_refresh(struct ll_adv_set * adv)1144 static void rpa_adv_refresh(struct ll_adv_set *adv)
1145 {
1146 struct lll_adv_aux *lll_aux;
1147 struct pdu_adv *prev;
1148 struct lll_adv *lll;
1149 struct pdu_adv *pdu;
1150 uint8_t pri_idx;
1151
1152 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1153 uint8_t sec_idx;
1154 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1155
1156 if (adv->own_addr_type != BT_ADDR_LE_PUBLIC_ID &&
1157 adv->own_addr_type != BT_ADDR_LE_RANDOM_ID) {
1158 return;
1159 }
1160
1161 lll = &adv->lll;
1162 if (lll->rl_idx >= ARRAY_SIZE(rl)) {
1163 return;
1164 }
1165
1166
1167 pri_idx = UINT8_MAX;
1168 lll_aux = NULL;
1169 pdu = NULL;
1170 prev = lll_adv_data_peek(lll);
1171
1172 if (false) {
1173
1174 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1175 } else if (prev->type == PDU_ADV_TYPE_EXT_IND) {
1176 struct pdu_adv_com_ext_adv *pri_com_hdr;
1177 struct pdu_adv_ext_hdr pri_hdr_flags;
1178 struct pdu_adv_ext_hdr *pri_hdr;
1179
1180 /* Pick the primary PDU header flags */
1181 pri_com_hdr = (void *)&prev->adv_ext_ind;
1182 pri_hdr = (void *)pri_com_hdr->ext_hdr_adv_data;
1183 if (pri_com_hdr->ext_hdr_len) {
1184 pri_hdr_flags = *pri_hdr;
1185 } else {
1186 *(uint8_t *)&pri_hdr_flags = 0U;
1187 }
1188
1189 /* AdvA, in primary or auxiliary PDU */
1190 if (pri_hdr_flags.adv_addr) {
1191 pdu = lll_adv_data_alloc(lll, &pri_idx);
1192 (void)memcpy(pdu, prev, (PDU_AC_LL_HEADER_SIZE +
1193 prev->len));
1194
1195 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
1196 } else if (pri_hdr_flags.aux_ptr) {
1197 struct pdu_adv_com_ext_adv *sec_com_hdr;
1198 struct pdu_adv_ext_hdr sec_hdr_flags;
1199 struct pdu_adv_ext_hdr *sec_hdr;
1200 struct pdu_adv *sec_pdu;
1201
1202 lll_aux = lll->aux;
1203 sec_pdu = lll_adv_aux_data_peek(lll_aux);
1204
1205 sec_com_hdr = (void *)&sec_pdu->adv_ext_ind;
1206 sec_hdr = (void *)sec_com_hdr->ext_hdr_adv_data;
1207 if (sec_com_hdr->ext_hdr_len) {
1208 sec_hdr_flags = *sec_hdr;
1209 } else {
1210 *(uint8_t *)&sec_hdr_flags = 0U;
1211 }
1212
1213 if (sec_hdr_flags.adv_addr) {
1214 pdu = lll_adv_aux_data_alloc(lll_aux, &sec_idx);
1215 (void)memcpy(pdu, sec_pdu,
1216 (PDU_AC_LL_HEADER_SIZE +
1217 sec_pdu->len));
1218 }
1219 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
1220 }
1221 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1222
1223 } else {
1224 pdu = lll_adv_data_alloc(lll, &pri_idx);
1225 (void)memcpy(pdu, prev, (PDU_AC_LL_HEADER_SIZE + prev->len));
1226 }
1227
1228 if (pdu) {
1229 ull_adv_pdu_update_addrs(adv, pdu);
1230
1231 if (pri_idx != UINT8_MAX) {
1232 lll_adv_data_enqueue(lll, pri_idx);
1233
1234 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1235 } else {
1236 lll_adv_aux_data_enqueue(lll_aux, sec_idx);
1237 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1238
1239 }
1240 }
1241 }
1242 #endif /* CONFIG_BT_BROADCASTER */
1243
rl_clear(void)1244 static void rl_clear(void)
1245 {
1246 for (uint8_t i = 0; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
1247 rl[i].taken = 0U;
1248 }
1249
1250 peer_irk_count = 0U;
1251 }
1252
rl_access_check(bool check_ar)1253 static int rl_access_check(bool check_ar)
1254 {
1255 if (check_ar) {
1256 /* If address resolution is disabled, allow immediately */
1257 if (!rl_enable) {
1258 return -1;
1259 }
1260 }
1261
1262 /* NOTE: Allowed when passive scanning, otherwise deny if advertising,
1263 * active scanning, initiating or periodic sync create is active.
1264 */
1265 return ((IS_ENABLED(CONFIG_BT_BROADCASTER) && ull_adv_is_enabled(0)) ||
1266 (IS_ENABLED(CONFIG_BT_OBSERVER) &&
1267 (ull_scan_is_enabled(0) & ~ULL_SCAN_IS_PASSIVE)))
1268 ? 0 : 1;
1269 }
1270
rpa_timeout(struct k_work * work)1271 static void rpa_timeout(struct k_work *work)
1272 {
1273 ull_filter_rpa_update(true);
1274 k_work_schedule(&rpa_work, K_MSEC(rpa_timeout_ms));
1275 }
1276
rpa_refresh_start(void)1277 static void rpa_refresh_start(void)
1278 {
1279 LOG_DBG("");
1280 k_work_schedule(&rpa_work, K_MSEC(rpa_timeout_ms));
1281 }
1282
rpa_refresh_stop(void)1283 static void rpa_refresh_stop(void)
1284 {
1285 k_work_cancel_delayable(&rpa_work);
1286 }
1287
1288 #else /* !CONFIG_BT_CTLR_PRIVACY */
1289
filter_add(struct lll_filter * filter,uint8_t addr_type,uint8_t * bdaddr)1290 static uint32_t filter_add(struct lll_filter *filter, uint8_t addr_type,
1291 uint8_t *bdaddr)
1292 {
1293 int index;
1294
1295 if (filter->enable_bitmask == LLL_FILTER_BITMASK_ALL) {
1296 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1297 }
1298
1299 for (index = 0;
1300 (filter->enable_bitmask & BIT(index));
1301 index++) {
1302 }
1303
1304 filter_insert(filter, index, addr_type, bdaddr);
1305 return 0;
1306 }
1307
filter_remove(struct lll_filter * filter,uint8_t addr_type,uint8_t * bdaddr)1308 static uint32_t filter_remove(struct lll_filter *filter, uint8_t addr_type,
1309 uint8_t *bdaddr)
1310 {
1311 int index;
1312
1313 index = filter_find(filter, addr_type, bdaddr);
1314 if (index == FILTER_IDX_NONE) {
1315 return BT_HCI_ERR_INVALID_PARAM;
1316 }
1317
1318 filter->enable_bitmask &= ~BIT(index);
1319 filter->addr_type_bitmask &= ~BIT(index);
1320
1321 return 0;
1322 }
1323 #endif /* !CONFIG_BT_CTLR_PRIVACY */
1324
filter_find(const struct lll_filter * const filter,uint8_t addr_type,const uint8_t * const bdaddr)1325 static uint32_t filter_find(const struct lll_filter *const filter,
1326 uint8_t addr_type, const uint8_t *const bdaddr)
1327 {
1328 int index;
1329
1330 if (!filter->enable_bitmask) {
1331 return FILTER_IDX_NONE;
1332 }
1333
1334 index = LLL_FILTER_SIZE;
1335 while (index--) {
1336 if ((filter->enable_bitmask & BIT(index)) &&
1337 (((filter->addr_type_bitmask >> index) & 0x01) ==
1338 (addr_type & 0x01)) &&
1339 !memcmp(filter->bdaddr[index], bdaddr, BDADDR_SIZE)) {
1340 return index;
1341 }
1342 }
1343
1344 return FILTER_IDX_NONE;
1345 }
1346
filter_insert(struct lll_filter * const filter,int index,uint8_t addr_type,const uint8_t * const bdaddr)1347 static void filter_insert(struct lll_filter *const filter, int index,
1348 uint8_t addr_type, const uint8_t *const bdaddr)
1349 {
1350 filter->enable_bitmask |= BIT(index);
1351 filter->addr_type_bitmask |= ((addr_type & 0x01) << index);
1352 (void)memcpy(&filter->bdaddr[index][0], bdaddr, BDADDR_SIZE);
1353 }
1354
filter_clear(struct lll_filter * filter)1355 static void filter_clear(struct lll_filter *filter)
1356 {
1357 filter->enable_bitmask = 0;
1358 filter->addr_type_bitmask = 0;
1359 }
1360 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
1361
1362 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
pal_clear(void)1363 static void pal_clear(void)
1364 {
1365 for (int i = 0; i < PAL_SIZE; i++) {
1366
1367 #if defined(CONFIG_BT_CTLR_PRIVACY)
1368 uint8_t j = pal[i].rl_idx;
1369
1370 if (j < ARRAY_SIZE(pal)) {
1371 rl[j].pal = 0U;
1372 }
1373 #endif /* CONFIG_BT_CTLR_PRIVACY */
1374
1375 pal[i].taken = 0U;
1376 }
1377 }
1378
1379 #if defined(CONFIG_BT_CTLR_PRIVACY)
pal_addr_find(const uint8_t addr_type,const uint8_t * const addr)1380 static uint8_t pal_addr_find(const uint8_t addr_type, const uint8_t *const addr)
1381 {
1382 for (int i = 0; i < PAL_SIZE; i++) {
1383 if (PAL_ADDR_MATCH(addr_type, addr)) {
1384 return i;
1385 }
1386 }
1387
1388 return FILTER_IDX_NONE;
1389 }
1390 #endif /* CONFIG_BT_CTLR_PRIVACY */
1391
pal_find(const uint8_t addr_type,const uint8_t * const addr,const uint8_t sid,uint8_t * const free_idx)1392 static uint8_t pal_find(const uint8_t addr_type, const uint8_t *const addr,
1393 const uint8_t sid, uint8_t *const free_idx)
1394 {
1395 int i;
1396
1397 if (free_idx) {
1398 *free_idx = FILTER_IDX_NONE;
1399 }
1400
1401 for (i = 0; i < PAL_SIZE; i++) {
1402 if (PAL_MATCH(addr_type, addr, sid)) {
1403 return i;
1404 } else if (free_idx && !pal[i].taken &&
1405 (*free_idx == FILTER_IDX_NONE)) {
1406 *free_idx = i;
1407 }
1408 }
1409
1410 return FILTER_IDX_NONE;
1411 }
1412
pal_add(const bt_addr_le_t * const id_addr,const uint8_t sid)1413 static uint32_t pal_add(const bt_addr_le_t *const id_addr, const uint8_t sid)
1414 {
1415 uint8_t i, j;
1416
1417 i = pal_find(id_addr->type, id_addr->a.val, sid, &j);
1418
1419 /* Duplicate check */
1420 if (i < PAL_SIZE) {
1421 return BT_HCI_ERR_INVALID_PARAM;
1422 } else if (j >= PAL_SIZE) {
1423 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1424 }
1425
1426 i = j;
1427
1428 pal[i].id_addr_type = id_addr->type & 0x1;
1429 bt_addr_copy(&pal[i].id_addr, &id_addr->a);
1430 pal[i].sid = sid;
1431
1432 #if defined(CONFIG_BT_CTLR_PRIVACY)
1433 /* Get index to Resolving List if applicable */
1434 j = ull_filter_rl_find(id_addr->type, id_addr->a.val, NULL);
1435 if (j < ARRAY_SIZE(rl)) {
1436 pal[i].rl_idx = j;
1437 rl[j].pal = i + 1U;
1438 } else {
1439 pal[i].rl_idx = FILTER_IDX_NONE;
1440 }
1441 #endif /* CONFIG_BT_CTLR_PRIVACY */
1442
1443 pal[i].taken = 1U;
1444
1445 return 0;
1446 }
1447
pal_remove(const bt_addr_le_t * const id_addr,const uint8_t sid)1448 static uint32_t pal_remove(const bt_addr_le_t *const id_addr, const uint8_t sid)
1449 {
1450 /* find the device and mark it as empty */
1451 uint8_t i = pal_find(id_addr->type, id_addr->a.val, sid, NULL);
1452
1453 if (i < PAL_SIZE) {
1454
1455 #if defined(CONFIG_BT_CTLR_PRIVACY)
1456 uint8_t j = pal[i].rl_idx;
1457
1458 if (j < ARRAY_SIZE(rl)) {
1459 rl[j].pal = 0U;
1460 }
1461 #endif /* CONFIG_BT_CTLR_PRIVACY */
1462
1463 pal[i].taken = 0U;
1464
1465 return 0;
1466 }
1467
1468 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
1469 }
1470 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
1471
1472 #if defined(CONFIG_BT_CTLR_PRIVACY) && \
1473 defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
conn_rpa_update(uint8_t rl_idx)1474 static void conn_rpa_update(uint8_t rl_idx)
1475 {
1476 uint16_t handle;
1477
1478 for (handle = 0U; handle < CONFIG_BT_MAX_CONN; handle++) {
1479 struct ll_conn *conn = ll_connected_get(handle);
1480
1481 /* The RPA of the connection matches the RPA that was just
1482 * resolved
1483 */
1484 if (conn && !memcmp(conn->peer_id_addr, rl[rl_idx].curr_rpa.val,
1485 BDADDR_SIZE)) {
1486 (void)memcpy(conn->peer_id_addr, rl[rl_idx].id_addr.val,
1487 BDADDR_SIZE);
1488 break;
1489 }
1490 }
1491 }
1492 #endif /* CONFIG_BT_CTLR_PRIVACY && CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
1493
1494 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
target_resolve(struct k_work * work)1495 static void target_resolve(struct k_work *work)
1496 {
1497 uint8_t j, idx;
1498 bt_addr_t *search_rpa;
1499 struct target_resolve_work *twork;
1500 static memq_link_t link;
1501 static struct mayfly mfy = {0, 0, &link, 0, NULL};
1502
1503 twork = CONTAINER_OF(work, struct target_resolve_work, target_work);
1504 idx = twork->idx;
1505 search_rpa = &(twork->rpa);
1506
1507 if (rl[idx].taken && bt_addr_eq(&(rl[idx].target_rpa), search_rpa)) {
1508 j = idx;
1509 } else {
1510 uint8_t i;
1511
1512 /* No match - so not in list; Need to see if we can resolve */
1513
1514 i = trpa_cache_find(search_rpa, idx);
1515 if (i != FILTER_IDX_NONE) {
1516 /* Found a known unknown - do nothing */
1517 j = FILTER_IDX_NONE;
1518 } else if (bt_rpa_irk_matches(rl[idx].local_irk, search_rpa)) {
1519 /* Could resolve, store RPA */
1520 (void)memcpy(rl[idx].target_rpa.val, search_rpa->val,
1521 sizeof(bt_addr_t));
1522 j = idx;
1523 } else if (rl[idx].taken) {
1524 /* No match - thus cannot resolve, we have an unknown
1525 * so insert in known unknown list
1526 */
1527 trpa_cache_add(search_rpa, idx);
1528 j = FILTER_IDX_NONE;
1529 } else {
1530 /* Could not resolve, and not in table */
1531 j = FILTER_IDX_NONE;
1532 }
1533 }
1534
1535 /* Kick the callback in LLL (using the mayfly, tailchain it)
1536 * Pass param FILTER_IDX_NONE if RPA can not be resolved,
1537 * or index in cache if it can be resolved
1538 */
1539 if (twork->cb) {
1540 mfy.fp = twork->cb;
1541 mfy.param = (void *) ((unsigned int) j);
1542 (void)mayfly_enqueue(TICKER_USER_ID_THREAD,
1543 TICKER_USER_ID_LLL, 1, &mfy);
1544 }
1545 }
1546
prpa_cache_try_resolve(bt_addr_t * rpa)1547 static uint8_t prpa_cache_try_resolve(bt_addr_t *rpa)
1548 {
1549 uint8_t pi;
1550 uint8_t lpirk[IRK_SIZE];
1551
1552 for (uint8_t i = 0U; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
1553 if (rl[i].taken && rl[i].pirk) {
1554 pi = rl[i].pirk_idx;
1555 sys_memcpy_swap(lpirk, peer_irks[pi], IRK_SIZE);
1556 if (bt_rpa_irk_matches(lpirk, rpa)) {
1557 return i;
1558 }
1559 }
1560 }
1561
1562 return FILTER_IDX_NONE;
1563 }
1564
prpa_cache_resolve(struct k_work * work)1565 static void prpa_cache_resolve(struct k_work *work)
1566 {
1567 uint8_t i, j;
1568 bt_addr_t *search_rpa;
1569 struct prpa_resolve_work *rwork;
1570 static memq_link_t link;
1571 static struct mayfly mfy = {0, 0, &link, 0, NULL};
1572
1573 rwork = CONTAINER_OF(work, struct prpa_resolve_work, prpa_work);
1574 search_rpa = &(rwork->rpa);
1575
1576 i = prpa_cache_find(search_rpa);
1577
1578 if (i == FILTER_IDX_NONE) {
1579 /* No match - so not in known unknown list
1580 * Need to see if we can resolve
1581 */
1582 j = prpa_cache_try_resolve(search_rpa);
1583
1584 if (j == FILTER_IDX_NONE) {
1585 /* No match - thus cannot resolve, we have an unknown
1586 * so insert in known unkonown list
1587 */
1588 prpa_cache_add(search_rpa);
1589 } else {
1590 /* Address could be resolved, so update current RPA
1591 * in list
1592 */
1593 (void)memcpy(rl[j].curr_rpa.val, search_rpa->val,
1594 sizeof(bt_addr_t));
1595 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
1596 conn_rpa_update(j);
1597 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
1598 }
1599
1600 } else {
1601 /* Found a known unknown - do nothing */
1602 j = FILTER_IDX_NONE;
1603 }
1604
1605 /* Kick the callback in LLL (using the mayfly, tailchain it)
1606 * Pass param FILTER_IDX_NONE if RPA can not be resolved,
1607 * or index in cache if it can be resolved
1608 */
1609 if (rwork->cb) {
1610 mfy.fp = rwork->cb;
1611 mfy.param = (void *) ((unsigned int) j);
1612 (void)mayfly_enqueue(TICKER_USER_ID_THREAD,
1613 TICKER_USER_ID_LLL, 1, &mfy);
1614 }
1615 }
1616
prpa_cache_clear(void)1617 static void prpa_cache_clear(void)
1618 {
1619 /* Note the first element will not be in use before wrap around
1620 * is reached.
1621 * The first element in actual use will be at index 1.
1622 * There is no element waisted with this implementation, as
1623 * element 0 will eventually be allocated.
1624 */
1625 newest_prpa = 0U;
1626
1627 for (uint8_t i = 0; i < CONFIG_BT_CTLR_RPA_CACHE_SIZE; i++) {
1628 prpa_cache[i].taken = 0U;
1629 }
1630 }
1631
prpa_cache_add(bt_addr_t * rpa)1632 static void prpa_cache_add(bt_addr_t *rpa)
1633 {
1634 newest_prpa = (newest_prpa + 1) % CONFIG_BT_CTLR_RPA_CACHE_SIZE;
1635
1636 (void)memcpy(prpa_cache[newest_prpa].rpa.val, rpa->val,
1637 sizeof(bt_addr_t));
1638 prpa_cache[newest_prpa].taken = 1U;
1639 }
1640
prpa_cache_find(bt_addr_t * rpa)1641 static uint8_t prpa_cache_find(bt_addr_t *rpa)
1642 {
1643 for (uint8_t i = 0; i < CONFIG_BT_CTLR_RPA_CACHE_SIZE; i++) {
1644 if (prpa_cache[i].taken &&
1645 bt_addr_eq(&(prpa_cache[i].rpa), rpa)) {
1646 return i;
1647 }
1648 }
1649 return FILTER_IDX_NONE;
1650 }
1651
ull_filter_lll_prpa_cache_get(void)1652 const struct lll_prpa_cache *ull_filter_lll_prpa_cache_get(void)
1653 {
1654 return prpa_cache;
1655 }
1656
trpa_cache_clear(void)1657 static void trpa_cache_clear(void)
1658 {
1659 /* Note the first element will not be in use before wrap around
1660 * is reached.
1661 * The first element in actual use will be at index 1.
1662 * There is no element waisted with this implementation, as
1663 * element 0 will eventually be allocated.
1664 */
1665 newest_trpa = 0U;
1666
1667 for (uint8_t i = 0; i < CONFIG_BT_CTLR_TRPA_CACHE_SIZE; i++) {
1668 trpa_cache[i].rl_idx = FILTER_IDX_NONE;
1669 }
1670 }
1671
trpa_cache_add(bt_addr_t * rpa,uint8_t rl_idx)1672 static void trpa_cache_add(bt_addr_t *rpa, uint8_t rl_idx)
1673 {
1674 newest_trpa = (newest_trpa + 1) % CONFIG_BT_CTLR_TRPA_CACHE_SIZE;
1675
1676 (void)memcpy(trpa_cache[newest_trpa].rpa.val, rpa->val,
1677 sizeof(bt_addr_t));
1678 trpa_cache[newest_trpa].rl_idx = rl_idx;
1679 }
1680
trpa_cache_find(bt_addr_t * rpa,uint8_t rl_idx)1681 static uint8_t trpa_cache_find(bt_addr_t *rpa, uint8_t rl_idx)
1682 {
1683 for (uint8_t i = 0; i < CONFIG_BT_CTLR_TRPA_CACHE_SIZE; i++) {
1684 if (trpa_cache[i].rl_idx == rl_idx &&
1685 bt_addr_eq(&(trpa_cache[i].rpa), rpa)) {
1686 return i;
1687 }
1688 }
1689 return FILTER_IDX_NONE;
1690 }
1691
ull_filter_lll_trpa_cache_get(void)1692 const struct lll_trpa_cache *ull_filter_lll_trpa_cache_get(void)
1693 {
1694 return trpa_cache;
1695 }
1696
1697 #endif /* !CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
1698