1 /*
2 * Copyright (c) 2017-2019 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <string.h>
8
9 #include <zephyr.h>
10 #include <soc.h>
11 #include <bluetooth/hci.h>
12 #include <sys/byteorder.h>
13
14 #include "hal/cpu.h"
15 #include "hal/ccm.h"
16
17 #include "util/util.h"
18 #include "util/mem.h"
19 #include "util/memq.h"
20 #include "util/mayfly.h"
21
22 #include "pdu.h"
23
24 #include "lll.h"
25 #include "lll/lll_adv_types.h"
26 #include "lll_adv.h"
27 #include "lll/lll_adv_pdu.h"
28 #include "lll_scan.h"
29 #include "lll/lll_df_types.h"
30 #include "lll_conn.h"
31 #include "lll_filter.h"
32
33 #include "ull_adv_types.h"
34 #include "ull_scan_types.h"
35 #include "ull_conn_types.h"
36 #include "ull_filter.h"
37
38 #include "ull_internal.h"
39 #include "ull_adv_internal.h"
40 #include "ull_scan_internal.h"
41 #include "ull_conn_internal.h"
42
43 #include "ll.h"
44
45 #define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_HCI_DRIVER)
46 #define LOG_MODULE_NAME bt_ctlr_ull_filter
47 #include "common/log.h"
48 #include "hal/debug.h"
49
50 #define ADDR_TYPE_ANON 0xFF
51
52 /* Hardware Filter Accept List */
53 static struct lll_filter fal_filter;
54 uint8_t fal_anon;
55
56 #define IRK_SIZE 16
57
58 #if defined(CONFIG_BT_CTLR_PRIVACY)
59 #include "common/rpa.h"
60
61 /* Filter Accept List peer list */
62 static struct lll_fal fal[FAL_SIZE];
63
64 static uint8_t rl_enable;
65
66 static struct lll_resolvelist rl[CONFIG_BT_CTLR_RL_SIZE];
67
68 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
69 /* Cache of known unknown peer RPAs */
70 static uint8_t newest_prpa;
71 static struct prpa_cache_dev {
72 uint8_t taken:1;
73 bt_addr_t rpa;
74 } prpa_cache[CONFIG_BT_CTLR_RPA_CACHE_SIZE];
75
76 struct prpa_resolve_work {
77 struct k_work prpa_work;
78 bt_addr_t rpa;
79 resolve_callback_t cb;
80 };
81
82 struct target_resolve_work {
83 struct k_work target_work;
84 bt_addr_t rpa;
85 uint8_t idx;
86 resolve_callback_t cb;
87 };
88 #endif /* CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
89
90 static uint8_t peer_irks[CONFIG_BT_CTLR_RL_SIZE][IRK_SIZE];
91 static uint8_t peer_irk_rl_ids[CONFIG_BT_CTLR_RL_SIZE];
92 static uint8_t peer_irk_count;
93
94 static bt_addr_t local_rpas[CONFIG_BT_CTLR_RL_SIZE];
95
96 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
97 static struct prpa_resolve_work resolve_work;
98 static struct target_resolve_work t_work;
99
100 BUILD_ASSERT(ARRAY_SIZE(prpa_cache) < FILTER_IDX_NONE);
101 #endif /* CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
102 BUILD_ASSERT(ARRAY_SIZE(fal) < FILTER_IDX_NONE);
103 BUILD_ASSERT(ARRAY_SIZE(rl) < FILTER_IDX_NONE);
104
105 /* Hardware filter for the resolving list */
106 static struct lll_filter rl_filter;
107
108 #define DEFAULT_RPA_TIMEOUT_MS (900 * 1000)
109 static uint32_t rpa_timeout_ms;
110 static int64_t rpa_last_ms;
111
112 static struct k_work_delayable rpa_work;
113
114 #define LIST_MATCH(list, i, type, addr) (list[i].taken && \
115 (list[i].id_addr_type == (type & 0x1)) && \
116 !memcmp(list[i].id_addr.val, addr, BDADDR_SIZE))
117
118 static void fal_clear(void);
119 static uint8_t fal_find(uint8_t addr_type, uint8_t *addr, uint8_t *free);
120 static uint32_t fal_add(bt_addr_le_t *id_addr);
121 static uint32_t fal_remove(bt_addr_le_t *id_addr);
122 static void fal_update(void);
123
124 static void rl_clear(void);
125 static void rl_update(void);
126 static int rl_access_check(bool check_ar);
127
128 #if defined(CONFIG_BT_BROADCASTER)
129 static void rpa_adv_refresh(struct ll_adv_set *adv);
130 #endif
131 static void rpa_timeout(struct k_work *work);
132 static void rpa_refresh_start(void);
133 static void rpa_refresh_stop(void);
134 #else /* !CONFIG_BT_CTLR_PRIVACY */
135 static uint32_t filter_add(struct lll_filter *filter, uint8_t addr_type,
136 uint8_t *bdaddr);
137 static uint32_t filter_remove(struct lll_filter *filter, uint8_t addr_type,
138 uint8_t *bdaddr);
139 #endif /* !CONFIG_BT_CTLR_PRIVACY */
140
141 static void filter_insert(struct lll_filter *filter, int index, uint8_t addr_type,
142 uint8_t *bdaddr);
143 static void filter_clear(struct lll_filter *filter);
144
145 #if defined(CONFIG_BT_CTLR_PRIVACY) && \
146 defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
147 static void conn_rpa_update(uint8_t rl_idx);
148 #endif /* CONFIG_BT_CTLR_PRIVACY && CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
149
150 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
151 static void prpa_cache_clear(void);
152 static uint8_t prpa_cache_find(bt_addr_t *prpa_cache_addr);
153 static void prpa_cache_add(bt_addr_t *prpa_cache_addr);
154 static uint8_t prpa_cache_try_resolve(bt_addr_t *rpa);
155 static void prpa_cache_resolve(struct k_work *work);
156 static void target_resolve(struct k_work *work);
157 #endif /* CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
158
ll_fal_size_get(void)159 uint8_t ll_fal_size_get(void)
160 {
161 return FAL_SIZE;
162 }
163
ll_fal_clear(void)164 uint8_t ll_fal_clear(void)
165 {
166 #if defined(CONFIG_BT_BROADCASTER)
167 if (ull_adv_filter_pol_get(0)) {
168 return BT_HCI_ERR_CMD_DISALLOWED;
169 }
170 #endif /* CONFIG_BT_BROADCASTER */
171
172 #if defined(CONFIG_BT_OBSERVER)
173 if (ull_scan_filter_pol_get(0) & 0x1) {
174 return BT_HCI_ERR_CMD_DISALLOWED;
175 }
176 #endif /* CONFIG_BT_OBSERVER */
177
178 #if defined(CONFIG_BT_CTLR_PRIVACY)
179 fal_clear();
180 #else
181 filter_clear(&fal_filter);
182 #endif /* CONFIG_BT_CTLR_PRIVACY */
183
184 fal_anon = 0U;
185
186 return 0;
187 }
188
ll_fal_add(bt_addr_le_t * addr)189 uint8_t ll_fal_add(bt_addr_le_t *addr)
190 {
191 #if defined(CONFIG_BT_BROADCASTER)
192 if (ull_adv_filter_pol_get(0)) {
193 return BT_HCI_ERR_CMD_DISALLOWED;
194 }
195 #endif /* CONFIG_BT_BROADCASTER */
196
197 #if defined(CONFIG_BT_OBSERVER)
198 if (ull_scan_filter_pol_get(0) & 0x1) {
199 return BT_HCI_ERR_CMD_DISALLOWED;
200 }
201 #endif /* CONFIG_BT_OBSERVER */
202
203 if (addr->type == ADDR_TYPE_ANON) {
204 fal_anon = 1U;
205 return 0;
206 }
207
208 #if defined(CONFIG_BT_CTLR_PRIVACY)
209 return fal_add(addr);
210 #else
211 return filter_add(&fal_filter, addr->type, addr->a.val);
212 #endif /* CONFIG_BT_CTLR_PRIVACY */
213 }
214
ll_fal_remove(bt_addr_le_t * addr)215 uint8_t ll_fal_remove(bt_addr_le_t *addr)
216 {
217 #if defined(CONFIG_BT_BROADCASTER)
218 if (ull_adv_filter_pol_get(0)) {
219 return BT_HCI_ERR_CMD_DISALLOWED;
220 }
221 #endif /* CONFIG_BT_BROADCASTER */
222
223 #if defined(CONFIG_BT_OBSERVER)
224 if (ull_scan_filter_pol_get(0) & 0x1) {
225 return BT_HCI_ERR_CMD_DISALLOWED;
226 }
227 #endif /* CONFIG_BT_OBSERVER */
228
229 if (addr->type == ADDR_TYPE_ANON) {
230 fal_anon = 0U;
231 return 0;
232 }
233
234 #if defined(CONFIG_BT_CTLR_PRIVACY)
235 return fal_remove(addr);
236 #else
237 return filter_remove(&fal_filter, addr->type, addr->a.val);
238 #endif /* CONFIG_BT_CTLR_PRIVACY */
239 }
240
241 #if defined(CONFIG_BT_CTLR_PRIVACY)
ll_rl_id_addr_get(uint8_t rl_idx,uint8_t * id_addr_type,uint8_t * id_addr)242 void ll_rl_id_addr_get(uint8_t rl_idx, uint8_t *id_addr_type, uint8_t *id_addr)
243 {
244 LL_ASSERT(rl_idx < CONFIG_BT_CTLR_RL_SIZE);
245 LL_ASSERT(rl[rl_idx].taken);
246
247 *id_addr_type = rl[rl_idx].id_addr_type;
248 memcpy(id_addr, rl[rl_idx].id_addr.val, BDADDR_SIZE);
249 }
250
ll_rl_size_get(void)251 uint8_t ll_rl_size_get(void)
252 {
253 return CONFIG_BT_CTLR_RL_SIZE;
254 }
255
ll_rl_clear(void)256 uint8_t ll_rl_clear(void)
257 {
258 if (!rl_access_check(false)) {
259 return BT_HCI_ERR_CMD_DISALLOWED;
260 }
261
262 rl_clear();
263
264 return 0;
265 }
266
ll_rl_add(bt_addr_le_t * id_addr,const uint8_t pirk[IRK_SIZE],const uint8_t lirk[IRK_SIZE])267 uint8_t ll_rl_add(bt_addr_le_t *id_addr, const uint8_t pirk[IRK_SIZE],
268 const uint8_t lirk[IRK_SIZE])
269 {
270 uint8_t i, j;
271
272 if (!rl_access_check(false)) {
273 return BT_HCI_ERR_CMD_DISALLOWED;
274 }
275
276 i = ull_filter_rl_find(id_addr->type, id_addr->a.val, &j);
277
278 /* Duplicate check */
279 if (i < ARRAY_SIZE(rl)) {
280 return BT_HCI_ERR_INVALID_PARAM;
281 } else if (j >= ARRAY_SIZE(rl)) {
282 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
283 }
284
285 /* Device not found but empty slot found */
286 i = j;
287
288 bt_addr_copy(&rl[i].id_addr, &id_addr->a);
289 rl[i].id_addr_type = id_addr->type & 0x1;
290 rl[i].pirk = mem_nz((uint8_t *)pirk, IRK_SIZE);
291 rl[i].lirk = mem_nz((uint8_t *)lirk, IRK_SIZE);
292 if (rl[i].pirk) {
293 /* cross-reference */
294 rl[i].pirk_idx = peer_irk_count;
295 peer_irk_rl_ids[peer_irk_count] = i;
296 /* AAR requires big-endian IRKs */
297 sys_memcpy_swap(peer_irks[peer_irk_count++], pirk, IRK_SIZE);
298 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
299 /* a new key was added, invalidate the known/unknown list */
300 prpa_cache_clear();
301 #endif
302 }
303 if (rl[i].lirk) {
304 memcpy(rl[i].local_irk, lirk, IRK_SIZE);
305 rl[i].local_rpa = NULL;
306 }
307 memset(rl[i].curr_rpa.val, 0x00, sizeof(rl[i].curr_rpa));
308 rl[i].rpas_ready = 0U;
309 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
310 memset(rl[i].target_rpa.val, 0x00, sizeof(rl[i].target_rpa));
311 #endif
312 /* Default to Network Privacy */
313 rl[i].dev = 0U;
314 /* Add reference to a Filter Accept List entry */
315 j = fal_find(id_addr->type, id_addr->a.val, NULL);
316 if (j < ARRAY_SIZE(fal)) {
317 fal[j].rl_idx = i;
318 rl[i].fal = 1U;
319 } else {
320 rl[i].fal = 0U;
321 }
322 rl[i].taken = 1U;
323
324 return 0;
325 }
326
ll_rl_remove(bt_addr_le_t * id_addr)327 uint8_t ll_rl_remove(bt_addr_le_t *id_addr)
328 {
329 uint8_t i;
330
331 if (!rl_access_check(false)) {
332 return BT_HCI_ERR_CMD_DISALLOWED;
333 }
334
335 /* find the device and mark it as empty */
336 i = ull_filter_rl_find(id_addr->type, id_addr->a.val, NULL);
337 if (i < ARRAY_SIZE(rl)) {
338 uint8_t j, k;
339
340 if (rl[i].pirk) {
341 /* Swap with last item */
342 uint8_t pi = rl[i].pirk_idx, pj = peer_irk_count - 1;
343
344 if (pj && pi != pj) {
345 memcpy(peer_irks[pi], peer_irks[pj], IRK_SIZE);
346 for (k = 0U;
347 k < CONFIG_BT_CTLR_RL_SIZE;
348 k++) {
349
350 if (rl[k].taken && rl[k].pirk &&
351 rl[k].pirk_idx == pj) {
352 rl[k].pirk_idx = pi;
353 peer_irk_rl_ids[pi] = k;
354 break;
355 }
356 }
357 }
358 peer_irk_count--;
359 }
360
361 /* Check if referenced by a Filter Accept List entry */
362 j = fal_find(id_addr->type, id_addr->a.val, NULL);
363 if (j < ARRAY_SIZE(fal)) {
364 fal[j].rl_idx = FILTER_IDX_NONE;
365 }
366 rl[i].taken = 0U;
367 return 0;
368 }
369
370 return BT_HCI_ERR_UNKNOWN_CONN_ID;
371 }
372
ll_rl_crpa_set(uint8_t id_addr_type,uint8_t * id_addr,uint8_t rl_idx,uint8_t * crpa)373 void ll_rl_crpa_set(uint8_t id_addr_type, uint8_t *id_addr, uint8_t rl_idx, uint8_t *crpa)
374 {
375 if ((crpa[5] & 0xc0) == 0x40) {
376
377 if (id_addr) {
378 /* find the device and return its RPA */
379 rl_idx = ull_filter_rl_find(id_addr_type, id_addr, NULL);
380 }
381
382 if (rl_idx < ARRAY_SIZE(rl) && rl[rl_idx].taken) {
383 memcpy(rl[rl_idx].curr_rpa.val, crpa,
384 sizeof(bt_addr_t));
385 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
386 conn_rpa_update(rl_idx);
387 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN) */
388 }
389 }
390 }
391
ll_rl_crpa_get(bt_addr_le_t * id_addr,bt_addr_t * crpa)392 uint8_t ll_rl_crpa_get(bt_addr_le_t *id_addr, bt_addr_t *crpa)
393 {
394 uint8_t i;
395
396 /* find the device and return its RPA */
397 i = ull_filter_rl_find(id_addr->type, id_addr->a.val, NULL);
398 if (i < ARRAY_SIZE(rl) &&
399 mem_nz(rl[i].curr_rpa.val, sizeof(rl[i].curr_rpa.val))) {
400 bt_addr_copy(crpa, &rl[i].curr_rpa);
401 return 0;
402 }
403
404 return BT_HCI_ERR_UNKNOWN_CONN_ID;
405 }
406
ll_rl_lrpa_get(bt_addr_le_t * id_addr,bt_addr_t * lrpa)407 uint8_t ll_rl_lrpa_get(bt_addr_le_t *id_addr, bt_addr_t *lrpa)
408 {
409 uint8_t i;
410
411 /* find the device and return the local RPA */
412 i = ull_filter_rl_find(id_addr->type, id_addr->a.val, NULL);
413 if (i < ARRAY_SIZE(rl)) {
414 bt_addr_copy(lrpa, rl[i].local_rpa);
415 return 0;
416 }
417
418 return BT_HCI_ERR_UNKNOWN_CONN_ID;
419 }
420
ll_rl_enable(uint8_t enable)421 uint8_t ll_rl_enable(uint8_t enable)
422 {
423 if (!rl_access_check(false)) {
424 return BT_HCI_ERR_CMD_DISALLOWED;
425 }
426
427 switch (enable) {
428 case BT_HCI_ADDR_RES_DISABLE:
429 rl_enable = 0U;
430 break;
431 case BT_HCI_ADDR_RES_ENABLE:
432 rl_enable = 1U;
433 break;
434 default:
435 return BT_HCI_ERR_INVALID_PARAM;
436 }
437
438 return 0;
439 }
440
ll_rl_timeout_set(uint16_t timeout)441 void ll_rl_timeout_set(uint16_t timeout)
442 {
443 rpa_timeout_ms = timeout * 1000U;
444 }
445
ll_priv_mode_set(bt_addr_le_t * id_addr,uint8_t mode)446 uint8_t ll_priv_mode_set(bt_addr_le_t *id_addr, uint8_t mode)
447 {
448 uint8_t i;
449
450 if (!rl_access_check(false)) {
451 return BT_HCI_ERR_CMD_DISALLOWED;
452 }
453
454 /* find the device and mark it as empty */
455 i = ull_filter_rl_find(id_addr->type, id_addr->a.val, NULL);
456 if (i < ARRAY_SIZE(rl)) {
457 switch (mode) {
458 case BT_HCI_LE_PRIVACY_MODE_NETWORK:
459 rl[i].dev = 0U;
460 break;
461 case BT_HCI_LE_PRIVACY_MODE_DEVICE:
462 rl[i].dev = 1U;
463 break;
464 default:
465 return BT_HCI_ERR_INVALID_PARAM;
466 }
467 } else {
468 return BT_HCI_ERR_UNKNOWN_CONN_ID;
469 }
470
471 return 0;
472 }
473
ull_filter_adv_scan_state_cb(uint8_t bm)474 void ull_filter_adv_scan_state_cb(uint8_t bm)
475 {
476 if (bm) {
477 rpa_refresh_start();
478 } else {
479 rpa_refresh_stop();
480 }
481 }
482
ull_filter_adv_update(uint8_t adv_fp)483 void ull_filter_adv_update(uint8_t adv_fp)
484 {
485 /* Clear before populating filter */
486 filter_clear(&fal_filter);
487
488 /* enabling advertising */
489 if (adv_fp &&
490 (!IS_ENABLED(CONFIG_BT_OBSERVER) ||
491 !(ull_scan_filter_pol_get(0) & 0x1))) {
492 /* filter accept list not in use, update FAL */
493 fal_update();
494 }
495
496 /* Clear before populating rl filter */
497 filter_clear(&rl_filter);
498
499 if (rl_enable &&
500 (!IS_ENABLED(CONFIG_BT_OBSERVER) || !ull_scan_is_enabled(0))) {
501 /* rl not in use, update resolving list LUT */
502 rl_update();
503 }
504 }
505
ull_filter_scan_update(uint8_t scan_fp)506 void ull_filter_scan_update(uint8_t scan_fp)
507 {
508 /* Clear before populating filter */
509 filter_clear(&fal_filter);
510
511 /* enabling advertising */
512 if ((scan_fp & 0x1) &&
513 (!IS_ENABLED(CONFIG_BT_BROADCASTER) ||
514 !ull_adv_filter_pol_get(0))) {
515 /* Filter Accept List not in use, update FAL */
516 fal_update();
517 }
518
519 /* Clear before populating rl filter */
520 filter_clear(&rl_filter);
521
522 if (rl_enable &&
523 (!IS_ENABLED(CONFIG_BT_BROADCASTER) || !ull_adv_is_enabled(0))) {
524 /* rl not in use, update resolving list LUT */
525 rl_update();
526 }
527 }
528
ull_filter_rpa_update(bool timeout)529 void ull_filter_rpa_update(bool timeout)
530 {
531 uint8_t i;
532 int err;
533 int64_t now = k_uptime_get();
534 bool all = timeout || (rpa_last_ms == -1) ||
535 (now - rpa_last_ms >= rpa_timeout_ms);
536 BT_DBG("");
537
538 for (i = 0U; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
539 if ((rl[i].taken) && (all || !rl[i].rpas_ready)) {
540
541 if (rl[i].pirk) {
542 uint8_t irk[IRK_SIZE];
543
544 /* TODO: move this swap to the driver level */
545 sys_memcpy_swap(irk, peer_irks[rl[i].pirk_idx],
546 IRK_SIZE);
547 err = bt_rpa_create(irk, &rl[i].peer_rpa);
548 LL_ASSERT(!err);
549 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
550 /* a new key was added,
551 * invalidate the known/unknown peer RPA cache
552 */
553 prpa_cache_clear();
554 #endif
555 }
556
557 if (rl[i].lirk) {
558 bt_addr_t rpa;
559
560 err = bt_rpa_create(rl[i].local_irk, &rpa);
561 LL_ASSERT(!err);
562 /* pointer read/write assumed to be atomic
563 * so that if ISR fires the local_rpa pointer
564 * will always point to a valid full RPA
565 */
566 rl[i].local_rpa = &rpa;
567 bt_addr_copy(&local_rpas[i], &rpa);
568 rl[i].local_rpa = &local_rpas[i];
569 }
570
571 rl[i].rpas_ready = 1U;
572 }
573 }
574
575 if (all) {
576 rpa_last_ms = now;
577 }
578
579 if (timeout) {
580 #if defined(CONFIG_BT_BROADCASTER)
581 struct ll_adv_set *adv;
582
583 /* TODO: foreach adv set */
584 adv = ull_adv_is_enabled_get(0);
585 if (adv) {
586 rpa_adv_refresh(adv);
587 }
588 #endif
589 }
590 }
591
592 #if defined(CONFIG_BT_BROADCASTER)
ull_filter_adva_get(struct ll_adv_set * adv)593 const uint8_t *ull_filter_adva_get(struct ll_adv_set *adv)
594 {
595 uint8_t idx = adv->lll.rl_idx;
596
597 /* AdvA */
598 if (idx < ARRAY_SIZE(rl) && rl[idx].lirk) {
599 LL_ASSERT(rl[idx].rpas_ready);
600 return rl[idx].local_rpa->val;
601 }
602
603 return NULL;
604 }
605
ull_filter_tgta_get(struct ll_adv_set * adv)606 const uint8_t *ull_filter_tgta_get(struct ll_adv_set *adv)
607 {
608 uint8_t idx = adv->lll.rl_idx;
609
610 /* TargetA */
611 if (idx < ARRAY_SIZE(rl) && rl[idx].pirk) {
612 return rl[idx].peer_rpa.val;
613 }
614
615 return NULL;
616 }
617 #endif /* CONFIG_BT_BROADCASTER */
618
ull_filter_rl_find(uint8_t id_addr_type,uint8_t const * const id_addr,uint8_t * const free)619 uint8_t ull_filter_rl_find(uint8_t id_addr_type, uint8_t const *const id_addr,
620 uint8_t *const free)
621 {
622 uint8_t i;
623
624 if (free) {
625 *free = FILTER_IDX_NONE;
626 }
627
628 for (i = 0U; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
629 if (LIST_MATCH(rl, i, id_addr_type, id_addr)) {
630 return i;
631 } else if (free && !rl[i].taken && (*free == FILTER_IDX_NONE)) {
632 *free = i;
633 }
634 }
635
636 return FILTER_IDX_NONE;
637 }
638 #endif /* CONFIG_BT_CTLR_PRIVACY */
639
ull_filter_reset(bool init)640 void ull_filter_reset(bool init)
641 {
642 fal_anon = 0U;
643
644 #if defined(CONFIG_BT_CTLR_PRIVACY)
645 fal_clear();
646
647 rl_enable = 0U;
648 rpa_timeout_ms = DEFAULT_RPA_TIMEOUT_MS;
649 rpa_last_ms = -1;
650 rl_clear();
651 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
652 prpa_cache_clear();
653 #endif
654 if (init) {
655 k_work_init_delayable(&rpa_work, rpa_timeout);
656 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
657 k_work_init(&(resolve_work.prpa_work), prpa_cache_resolve);
658 k_work_init(&(t_work.target_work), target_resolve);
659 #endif
660 } else {
661 k_work_cancel_delayable(&rpa_work);
662 }
663 #else
664 filter_clear(&fal_filter);
665 #endif /* CONFIG_BT_CTLR_PRIVACY */
666 }
667
668 #if defined(CONFIG_BT_CTLR_PRIVACY)
ull_filter_lll_lrpa_used(uint8_t rl_idx)669 bool ull_filter_lll_lrpa_used(uint8_t rl_idx)
670 {
671 return rl_idx < ARRAY_SIZE(rl) && rl[rl_idx].lirk;
672 }
673
ull_filter_lll_lrpa_get(uint8_t rl_idx)674 bt_addr_t *ull_filter_lll_lrpa_get(uint8_t rl_idx)
675 {
676 if ((rl_idx >= ARRAY_SIZE(rl)) || !rl[rl_idx].lirk ||
677 !rl[rl_idx].rpas_ready) {
678 return NULL;
679 }
680
681 return rl[rl_idx].local_rpa;
682 }
683
ull_filter_lll_irks_get(uint8_t * count)684 uint8_t *ull_filter_lll_irks_get(uint8_t *count)
685 {
686 *count = peer_irk_count;
687 return (uint8_t *)peer_irks;
688 }
689
ull_filter_lll_rl_idx(bool filter,uint8_t devmatch_id)690 uint8_t ull_filter_lll_rl_idx(bool filter, uint8_t devmatch_id)
691 {
692 uint8_t i;
693
694 if (filter) {
695 LL_ASSERT(devmatch_id < ARRAY_SIZE(fal));
696 LL_ASSERT(fal[devmatch_id].taken);
697 i = fal[devmatch_id].rl_idx;
698 } else {
699 LL_ASSERT(devmatch_id < ARRAY_SIZE(rl));
700 i = devmatch_id;
701 LL_ASSERT(rl[i].taken);
702 }
703
704 return i;
705 }
706
ull_filter_lll_rl_irk_idx(uint8_t irkmatch_id)707 uint8_t ull_filter_lll_rl_irk_idx(uint8_t irkmatch_id)
708 {
709 uint8_t i;
710
711 LL_ASSERT(irkmatch_id < peer_irk_count);
712 i = peer_irk_rl_ids[irkmatch_id];
713 LL_ASSERT(i < CONFIG_BT_CTLR_RL_SIZE);
714 LL_ASSERT(rl[i].taken);
715
716 return i;
717 }
718
ull_filter_lll_irk_in_fal(uint8_t rl_idx)719 bool ull_filter_lll_irk_in_fal(uint8_t rl_idx)
720 {
721 if (rl_idx >= ARRAY_SIZE(rl)) {
722 return false;
723 }
724
725 LL_ASSERT(rl[rl_idx].taken);
726
727 return rl[rl_idx].fal;
728 }
729 #endif /* CONFIG_BT_CTLR_PRIVACY */
730
ull_filter_lll_get(bool filter)731 struct lll_filter *ull_filter_lll_get(bool filter)
732 {
733 #if defined(CONFIG_BT_CTLR_PRIVACY)
734 if (filter) {
735 return &fal_filter;
736 }
737 return &rl_filter;
738 #else
739 LL_ASSERT(filter);
740 return &fal_filter;
741 #endif
742 }
743
744 #if defined(CONFIG_BT_CTLR_PRIVACY)
ull_filter_lll_fal_get(void)745 struct lll_fal *ull_filter_lll_fal_get(void)
746 {
747 return fal;
748 }
749
ull_filter_lll_resolvelist_get(void)750 struct lll_resolvelist *ull_filter_lll_resolvelist_get(void)
751 {
752 return rl;
753 }
754
ull_filter_lll_rl_idx_allowed(uint8_t irkmatch_ok,uint8_t rl_idx)755 bool ull_filter_lll_rl_idx_allowed(uint8_t irkmatch_ok, uint8_t rl_idx)
756 {
757 /* If AR is disabled or we don't know the device or we matched an IRK
758 * then we're all set.
759 */
760 if (!rl_enable || rl_idx >= ARRAY_SIZE(rl) || irkmatch_ok) {
761 return true;
762 }
763
764 LL_ASSERT(rl_idx < CONFIG_BT_CTLR_RL_SIZE);
765 LL_ASSERT(rl[rl_idx].taken);
766
767 return !rl[rl_idx].pirk || rl[rl_idx].dev;
768 }
769
ull_filter_lll_rl_addr_allowed(uint8_t id_addr_type,uint8_t * id_addr,uint8_t * rl_idx)770 bool ull_filter_lll_rl_addr_allowed(uint8_t id_addr_type, uint8_t *id_addr, uint8_t *rl_idx)
771 {
772 uint8_t i, j;
773
774 /* We matched an IRK then we're all set. No hw
775 * filters are used in this case.
776 */
777 if (*rl_idx != FILTER_IDX_NONE) {
778 return true;
779 }
780
781 for (i = 0U; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
782 if (rl[i].taken && (rl[i].id_addr_type == id_addr_type)) {
783 uint8_t *addr = rl[i].id_addr.val;
784
785 for (j = 0U; j < BDADDR_SIZE; j++) {
786 if (addr[j] != id_addr[j]) {
787 break;
788 }
789 }
790
791 if (j == BDADDR_SIZE) {
792 *rl_idx = i;
793 return !rl[i].pirk || rl[i].dev;
794 }
795 }
796 }
797
798 return true;
799 }
800
ull_filter_lll_rl_addr_resolve(uint8_t id_addr_type,uint8_t * id_addr,uint8_t rl_idx)801 bool ull_filter_lll_rl_addr_resolve(uint8_t id_addr_type, uint8_t *id_addr, uint8_t rl_idx)
802 {
803 /* Unable to resolve if AR is disabled, no RL entry or no local IRK */
804 if (!rl_enable || rl_idx >= ARRAY_SIZE(rl) || !rl[rl_idx].lirk) {
805 return false;
806 }
807
808 if ((id_addr_type != 0U) && ((id_addr[5] & 0xc0) == 0x40)) {
809 return bt_rpa_irk_matches(rl[rl_idx].local_irk,
810 (bt_addr_t *)id_addr);
811 }
812
813 return false;
814 }
815
ull_filter_lll_rl_enabled(void)816 bool ull_filter_lll_rl_enabled(void)
817 {
818 return rl_enable;
819 }
820
821 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
ull_filter_deferred_resolve(bt_addr_t * rpa,resolve_callback_t cb)822 uint8_t ull_filter_deferred_resolve(bt_addr_t *rpa, resolve_callback_t cb)
823 {
824 if (rl_enable) {
825 if (!k_work_is_pending(&(resolve_work.prpa_work))) {
826 /* copy input param to work variable */
827 memcpy(resolve_work.rpa.val, rpa->val, sizeof(bt_addr_t));
828 resolve_work.cb = cb;
829
830 k_work_submit(&(resolve_work.prpa_work));
831
832 return 1;
833 }
834 }
835
836 return 0;
837 }
838
ull_filter_deferred_targeta_resolve(bt_addr_t * rpa,uint8_t rl_idx,resolve_callback_t cb)839 uint8_t ull_filter_deferred_targeta_resolve(bt_addr_t *rpa, uint8_t rl_idx,
840 resolve_callback_t cb)
841 {
842 if (rl_enable) {
843 if (!k_work_is_pending(&(t_work.target_work))) {
844 /* copy input param to work variable */
845 memcpy(t_work.rpa.val, rpa->val, sizeof(bt_addr_t));
846 t_work.cb = cb;
847 t_work.idx = rl_idx;
848
849 k_work_submit(&(t_work.target_work));
850
851 return 1;
852 }
853 }
854 return 0;
855 }
856 #endif /* CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
857
fal_clear(void)858 static void fal_clear(void)
859 {
860 for (int i = 0; i < FAL_SIZE; i++) {
861 uint8_t j = fal[i].rl_idx;
862
863 if (j < ARRAY_SIZE(rl)) {
864 rl[j].fal = 0U;
865 }
866 fal[i].taken = 0U;
867 }
868 }
869
fal_find(uint8_t addr_type,uint8_t * addr,uint8_t * free_idx)870 static uint8_t fal_find(uint8_t addr_type, uint8_t *addr, uint8_t *free_idx)
871 {
872 int i;
873
874 if (free_idx) {
875 *free_idx = FILTER_IDX_NONE;
876 }
877
878 for (i = 0; i < FAL_SIZE; i++) {
879 if (LIST_MATCH(fal, i, addr_type, addr)) {
880 return i;
881 } else if (free_idx && !fal[i].taken &&
882 (*free_idx == FILTER_IDX_NONE)) {
883 *free_idx = i;
884 }
885 }
886
887 return FILTER_IDX_NONE;
888 }
889
fal_add(bt_addr_le_t * id_addr)890 static uint32_t fal_add(bt_addr_le_t *id_addr)
891 {
892 uint8_t i, j;
893
894 i = fal_find(id_addr->type, id_addr->a.val, &j);
895
896 /* Duplicate check */
897 if (i < ARRAY_SIZE(fal)) {
898 return 0;
899 } else if (j >= ARRAY_SIZE(fal)) {
900 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
901 }
902
903 i = j;
904
905 fal[i].id_addr_type = id_addr->type & 0x1;
906 bt_addr_copy(&fal[i].id_addr, &id_addr->a);
907 /* Get index to Resolving List if applicable */
908 j = ull_filter_rl_find(id_addr->type, id_addr->a.val, NULL);
909 if (j < ARRAY_SIZE(rl)) {
910 fal[i].rl_idx = j;
911 rl[j].fal = 1U;
912 } else {
913 fal[i].rl_idx = FILTER_IDX_NONE;
914 }
915 fal[i].taken = 1U;
916
917 return 0;
918 }
919
fal_remove(bt_addr_le_t * id_addr)920 static uint32_t fal_remove(bt_addr_le_t *id_addr)
921 {
922 /* find the device and mark it as empty */
923 uint8_t i = fal_find(id_addr->type, id_addr->a.val, NULL);
924
925 if (i < ARRAY_SIZE(fal)) {
926 uint8_t j = fal[i].rl_idx;
927
928 if (j < ARRAY_SIZE(rl)) {
929 rl[j].fal = 0U;
930 }
931 fal[i].taken = 0U;
932 return 0;
933 }
934
935 return BT_HCI_ERR_UNKNOWN_CONN_ID;
936 }
937
fal_update(void)938 static void fal_update(void)
939 {
940 uint8_t i;
941
942 /* Populate filter from fal peers */
943 for (i = 0U; i < FAL_SIZE; i++) {
944 uint8_t j;
945
946 if (!fal[i].taken) {
947 continue;
948 }
949
950 j = fal[i].rl_idx;
951
952 if (!rl_enable || j >= ARRAY_SIZE(rl) || !rl[j].pirk ||
953 rl[j].dev) {
954 filter_insert(&fal_filter, i, fal[i].id_addr_type,
955 fal[i].id_addr.val);
956 }
957 }
958 }
959
rl_update(void)960 static void rl_update(void)
961 {
962 uint8_t i;
963
964 /* Populate filter from rl peers */
965 for (i = 0U; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
966 if (rl[i].taken) {
967 filter_insert(&rl_filter, i, rl[i].id_addr_type,
968 rl[i].id_addr.val);
969 }
970 }
971 }
972
973 #if defined(CONFIG_BT_BROADCASTER)
rpa_adv_refresh(struct ll_adv_set * adv)974 static void rpa_adv_refresh(struct ll_adv_set *adv)
975 {
976 struct pdu_adv *prev;
977 struct pdu_adv *pdu;
978 uint8_t idx;
979
980 if (adv->own_addr_type != BT_ADDR_LE_PUBLIC_ID &&
981 adv->own_addr_type != BT_ADDR_LE_RANDOM_ID) {
982 return;
983 }
984
985 if (adv->lll.rl_idx >= ARRAY_SIZE(rl)) {
986 return;
987 }
988
989 prev = lll_adv_data_peek(&adv->lll);
990 pdu = lll_adv_data_alloc(&adv->lll, &idx);
991
992 memcpy(pdu, prev, PDU_AC_LL_HEADER_SIZE + prev->len);
993 ull_adv_pdu_update_addrs(adv, pdu);
994
995 lll_adv_data_enqueue(&adv->lll, idx);
996 }
997 #endif /* CONFIG_BT_BROADCASTER */
998
rl_clear(void)999 static void rl_clear(void)
1000 {
1001 for (uint8_t i = 0; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
1002 rl[i].taken = 0U;
1003 }
1004
1005 peer_irk_count = 0U;
1006 }
1007
rl_access_check(bool check_ar)1008 static int rl_access_check(bool check_ar)
1009 {
1010 if (check_ar) {
1011 /* If address resolution is disabled, allow immediately */
1012 if (!rl_enable) {
1013 return -1;
1014 }
1015 }
1016
1017 /* NOTE: Allowed when passive scanning, otherwise deny if advertising,
1018 * active scanning, initiating or periodic sync create is active.
1019 */
1020 return ((IS_ENABLED(CONFIG_BT_BROADCASTER) && ull_adv_is_enabled(0)) ||
1021 (IS_ENABLED(CONFIG_BT_OBSERVER) &&
1022 (ull_scan_is_enabled(0) & ~ULL_SCAN_IS_PASSIVE)))
1023 ? 0 : 1;
1024 }
1025
rpa_timeout(struct k_work * work)1026 static void rpa_timeout(struct k_work *work)
1027 {
1028 ull_filter_rpa_update(true);
1029 k_work_schedule(&rpa_work, K_MSEC(rpa_timeout_ms));
1030 }
1031
rpa_refresh_start(void)1032 static void rpa_refresh_start(void)
1033 {
1034 BT_DBG("");
1035 k_work_schedule(&rpa_work, K_MSEC(rpa_timeout_ms));
1036 }
1037
rpa_refresh_stop(void)1038 static void rpa_refresh_stop(void)
1039 {
1040 k_work_cancel_delayable(&rpa_work);
1041 }
1042
1043 #else /* !CONFIG_BT_CTLR_PRIVACY */
1044
filter_add(struct lll_filter * filter,uint8_t addr_type,uint8_t * bdaddr)1045 static uint32_t filter_add(struct lll_filter *filter, uint8_t addr_type,
1046 uint8_t *bdaddr)
1047 {
1048 int index;
1049
1050 if (filter->enable_bitmask == LLL_FILTER_BITMASK_ALL) {
1051 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1052 }
1053
1054 for (index = 0;
1055 (filter->enable_bitmask & BIT(index));
1056 index++) {
1057 }
1058
1059 filter_insert(filter, index, addr_type, bdaddr);
1060 return 0;
1061 }
1062
filter_remove(struct lll_filter * filter,uint8_t addr_type,uint8_t * bdaddr)1063 static uint32_t filter_remove(struct lll_filter *filter, uint8_t addr_type,
1064 uint8_t *bdaddr)
1065 {
1066 int index;
1067
1068 if (!filter->enable_bitmask) {
1069 return BT_HCI_ERR_INVALID_PARAM;
1070 }
1071
1072 index = FAL_SIZE;
1073 while (index--) {
1074 if ((filter->enable_bitmask & BIT(index)) &&
1075 (((filter->addr_type_bitmask >> index) & 0x01) ==
1076 (addr_type & 0x01)) &&
1077 !memcmp(filter->bdaddr[index], bdaddr, BDADDR_SIZE)) {
1078 filter->enable_bitmask &= ~BIT(index);
1079 filter->addr_type_bitmask &= ~BIT(index);
1080 return 0;
1081 }
1082 }
1083
1084 return BT_HCI_ERR_INVALID_PARAM;
1085 }
1086 #endif /* !CONFIG_BT_CTLR_PRIVACY */
1087
filter_insert(struct lll_filter * filter,int index,uint8_t addr_type,uint8_t * bdaddr)1088 static void filter_insert(struct lll_filter *filter, int index, uint8_t addr_type,
1089 uint8_t *bdaddr)
1090 {
1091 filter->enable_bitmask |= BIT(index);
1092 filter->addr_type_bitmask |= ((addr_type & 0x01) << index);
1093 memcpy(&filter->bdaddr[index][0], bdaddr, BDADDR_SIZE);
1094 }
1095
filter_clear(struct lll_filter * filter)1096 static void filter_clear(struct lll_filter *filter)
1097 {
1098 filter->enable_bitmask = 0;
1099 filter->addr_type_bitmask = 0;
1100 }
1101
1102 #if defined(CONFIG_BT_CTLR_PRIVACY) && \
1103 defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
conn_rpa_update(uint8_t rl_idx)1104 static void conn_rpa_update(uint8_t rl_idx)
1105 {
1106 uint16_t handle;
1107
1108 for (handle = 0U; handle < CONFIG_BT_MAX_CONN; handle++) {
1109 struct ll_conn *conn = ll_connected_get(handle);
1110
1111 /* The RPA of the connection matches the RPA that was just
1112 * resolved
1113 */
1114 if (conn && !memcmp(conn->peer_id_addr, rl[rl_idx].curr_rpa.val,
1115 BDADDR_SIZE)) {
1116 (void)memcpy(conn->peer_id_addr, rl[rl_idx].id_addr.val,
1117 BDADDR_SIZE);
1118 break;
1119 }
1120 }
1121 }
1122 #endif /* CONFIG_BT_CTLR_PRIVACY && CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
1123
1124 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
target_resolve(struct k_work * work)1125 static void target_resolve(struct k_work *work)
1126 {
1127 uint8_t j, idx;
1128 bt_addr_t *search_rpa;
1129 struct target_resolve_work *twork;
1130 static memq_link_t link;
1131 static struct mayfly mfy = {0, 0, &link, 0, NULL};
1132
1133 twork = CONTAINER_OF(work, struct target_resolve_work, target_work);
1134 idx = twork->idx;
1135 search_rpa = &(twork->rpa);
1136
1137 if (rl[idx].taken && !bt_addr_cmp(&(rl[idx].target_rpa), search_rpa)) {
1138 j = idx;
1139 } else {
1140 /* No match - so not in list Need to see if we can resolve */
1141 if (bt_rpa_irk_matches(rl[idx].local_irk, search_rpa)) {
1142 /* Could resolve, store RPA */
1143 memcpy(rl[idx].target_rpa.val, search_rpa->val,
1144 sizeof(bt_addr_t));
1145 j = idx;
1146 } else {
1147 /* Could not resolve, and not in table */
1148 j = FILTER_IDX_NONE;
1149 }
1150 }
1151
1152 /* Kick the callback in LLL (using the mayfly, tailchain it)
1153 * Pass param FILTER_IDX_NONE if RPA can not be resolved,
1154 * or index in cache if it can be resolved
1155 */
1156 if (twork->cb) {
1157 mfy.fp = twork->cb;
1158 mfy.param = (void *) ((unsigned int) j);
1159 (void)mayfly_enqueue(TICKER_USER_ID_THREAD,
1160 TICKER_USER_ID_LLL, 1, &mfy);
1161 }
1162 }
1163
prpa_cache_try_resolve(bt_addr_t * rpa)1164 static uint8_t prpa_cache_try_resolve(bt_addr_t *rpa)
1165 {
1166 uint8_t pi;
1167 uint8_t lpirk[IRK_SIZE];
1168
1169 for (uint8_t i = 0U; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
1170 if (rl[i].taken && rl[i].pirk) {
1171 pi = rl[i].pirk_idx;
1172 sys_memcpy_swap(lpirk, peer_irks[pi], IRK_SIZE);
1173 if (bt_rpa_irk_matches(lpirk, rpa)) {
1174 return i;
1175 }
1176 }
1177 }
1178
1179 return FILTER_IDX_NONE;
1180 }
1181
prpa_cache_resolve(struct k_work * work)1182 static void prpa_cache_resolve(struct k_work *work)
1183 {
1184 uint8_t i, j;
1185 bt_addr_t *search_rpa;
1186 struct prpa_resolve_work *rwork;
1187 static memq_link_t link;
1188 static struct mayfly mfy = {0, 0, &link, 0, NULL};
1189
1190 rwork = CONTAINER_OF(work, struct prpa_resolve_work, prpa_work);
1191 search_rpa = &(rwork->rpa);
1192
1193 i = prpa_cache_find(search_rpa);
1194
1195 if (i == FILTER_IDX_NONE) {
1196 /* No match - so not in known unknown list
1197 * Need to see if we can resolve
1198 */
1199 j = prpa_cache_try_resolve(search_rpa);
1200
1201 if (j == FILTER_IDX_NONE) {
1202 /* No match - thus cannot resolve, we have an unknown
1203 * so insert in known unkonown list
1204 */
1205 prpa_cache_add(search_rpa);
1206 } else {
1207 /* Address could be resolved, so update current RPA
1208 * in list
1209 */
1210 memcpy(rl[j].curr_rpa.val, search_rpa->val,
1211 sizeof(bt_addr_t));
1212 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
1213 conn_rpa_update(j);
1214 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
1215 }
1216
1217 } else {
1218 /* Found a known unknown - do nothing */
1219 j = FILTER_IDX_NONE;
1220 }
1221
1222 /* Kick the callback in LLL (using the mayfly, tailchain it)
1223 * Pass param FILTER_IDX_NONE if RPA can not be resolved,
1224 * or index in cache if it can be resolved
1225 */
1226 if (rwork->cb) {
1227 mfy.fp = rwork->cb;
1228 mfy.param = (void *) ((unsigned int) j);
1229 (void)mayfly_enqueue(TICKER_USER_ID_THREAD,
1230 TICKER_USER_ID_LLL, 1, &mfy);
1231 }
1232 }
1233
prpa_cache_clear(void)1234 static void prpa_cache_clear(void)
1235 {
1236 /* Note the first element will not be in use before wrap around
1237 * is reached.
1238 * The first element in actual use will be at index 1.
1239 * There is no element waisted with this implementation, as
1240 * element 0 will eventually be allocated.
1241 */
1242 newest_prpa = 0U;
1243
1244 for (uint8_t i = 0; i < CONFIG_BT_CTLR_RPA_CACHE_SIZE; i++) {
1245 prpa_cache[i].taken = 0U;
1246 }
1247 }
1248
prpa_cache_add(bt_addr_t * rpa)1249 static void prpa_cache_add(bt_addr_t *rpa)
1250 {
1251 newest_prpa = (newest_prpa + 1) % CONFIG_BT_CTLR_RPA_CACHE_SIZE;
1252
1253 memcpy(prpa_cache[newest_prpa].rpa.val, rpa->val, sizeof(bt_addr_t));
1254 prpa_cache[newest_prpa].taken = 1U;
1255 }
1256
prpa_cache_find(bt_addr_t * rpa)1257 static uint8_t prpa_cache_find(bt_addr_t *rpa)
1258 {
1259 for (uint8_t i = 0; i < CONFIG_BT_CTLR_RPA_CACHE_SIZE; i++) {
1260 if (prpa_cache[i].taken &&
1261 !bt_addr_cmp(&(prpa_cache[i].rpa), rpa)) {
1262 return i;
1263 }
1264 }
1265 return FILTER_IDX_NONE;
1266 }
1267 #endif /* !CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
1268