1 /*
2  * Copyright (c) 2017-2019 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <string.h>
8 
9 #include <zephyr/kernel.h>
10 #include <soc.h>
11 #include <zephyr/bluetooth/hci_types.h>
12 #include <zephyr/sys/byteorder.h>
13 
14 #include "hal/cpu.h"
15 #include "hal/ccm.h"
16 
17 #include "util/util.h"
18 #include "util/mem.h"
19 #include "util/memq.h"
20 #include "util/mayfly.h"
21 #include "util/dbuf.h"
22 
23 #include "pdu_df.h"
24 #include "lll/pdu_vendor.h"
25 #include "pdu.h"
26 
27 #include "lll.h"
28 #include "lll/lll_adv_types.h"
29 #include "lll_adv.h"
30 #include "lll/lll_adv_pdu.h"
31 #include "lll_scan.h"
32 #include "lll/lll_df_types.h"
33 #include "lll_conn.h"
34 #include "lll_filter.h"
35 
36 #include "ll_sw/ull_tx_queue.h"
37 
38 #include "ull_adv_types.h"
39 #include "ull_scan_types.h"
40 #include "ull_conn_types.h"
41 #include "ull_filter.h"
42 
43 #include "ull_internal.h"
44 #include "ull_adv_internal.h"
45 #include "ull_scan_internal.h"
46 #include "ull_conn_internal.h"
47 
48 #include "ll.h"
49 
50 #include "hal/debug.h"
51 
52 #define LOG_LEVEL CONFIG_BT_HCI_DRIVER_LOG_LEVEL
53 #include <zephyr/logging/log.h>
54 LOG_MODULE_REGISTER(bt_ctlr_ull_filter);
55 
56 #define ADDR_TYPE_ANON 0xFF
57 
58 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
59 /* Hardware Filter Accept List */
60 static struct lll_filter fal_filter;
61 
62 #if defined(CONFIG_BT_CTLR_PRIVACY)
63 #include "common/rpa.h"
64 
65 /* Filter Accept List peer list */
66 static struct lll_fal fal[CONFIG_BT_CTLR_FAL_SIZE];
67 
68 /* Resolving list */
69 static struct lll_resolve_list rl[CONFIG_BT_CTLR_RL_SIZE];
70 static uint8_t rl_enable;
71 
72 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
73 /* Cache of known unknown peer RPAs */
74 static uint8_t newest_prpa;
75 static struct lll_prpa_cache prpa_cache[CONFIG_BT_CTLR_RPA_CACHE_SIZE];
76 
77 /* Cache of known unknown target RPAs */
78 static uint8_t newest_trpa;
79 static struct lll_trpa_cache trpa_cache[CONFIG_BT_CTLR_TRPA_CACHE_SIZE];
80 
81 struct prpa_resolve_work {
82 	struct k_work prpa_work;
83 	bt_addr_t     rpa;
84 	resolve_callback_t cb;
85 };
86 
87 struct target_resolve_work {
88 	struct k_work target_work;
89 	bt_addr_t rpa;
90 	uint8_t      idx;
91 	resolve_callback_t cb;
92 };
93 #endif /* CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
94 
95 static uint8_t peer_irks[CONFIG_BT_CTLR_RL_SIZE][IRK_SIZE];
96 static uint8_t peer_irk_rl_ids[CONFIG_BT_CTLR_RL_SIZE];
97 static uint8_t peer_irk_count;
98 
99 static bt_addr_t local_rpas[CONFIG_BT_CTLR_RL_SIZE];
100 
101 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
102 static struct prpa_resolve_work resolve_work;
103 static struct target_resolve_work t_work;
104 
105 BUILD_ASSERT(ARRAY_SIZE(prpa_cache) < FILTER_IDX_NONE);
106 BUILD_ASSERT(ARRAY_SIZE(trpa_cache) < FILTER_IDX_NONE);
107 #endif /* CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
108 BUILD_ASSERT(ARRAY_SIZE(fal) < FILTER_IDX_NONE);
109 BUILD_ASSERT(ARRAY_SIZE(rl) < FILTER_IDX_NONE);
110 
111 /* Hardware filter for the resolving list */
112 static struct lll_filter rl_filter;
113 
114 #define DEFAULT_RPA_TIMEOUT_MS (900 * 1000)
115 static uint32_t rpa_timeout_ms;
116 static int64_t rpa_last_ms;
117 
118 static struct k_work_delayable rpa_work;
119 
120 #define LIST_MATCH(list, i, type, addr) (list[i].taken && \
121 		    (list[i].id_addr_type == (type & 0x1)) && \
122 		    !memcmp(list[i].id_addr.val, addr, BDADDR_SIZE))
123 
124 static void fal_clear(void);
125 static uint8_t fal_find(uint8_t addr_type, const uint8_t *const addr,
126 			uint8_t *const free_idx);
127 static uint32_t fal_add(bt_addr_le_t *id_addr);
128 static uint32_t fal_remove(bt_addr_le_t *id_addr);
129 static void fal_update(void);
130 
131 static void rl_clear(void);
132 static void rl_update(void);
133 static int rl_access_check(bool check_ar);
134 
135 #if defined(CONFIG_BT_BROADCASTER)
136 static void rpa_adv_refresh(struct ll_adv_set *adv);
137 #endif
138 static void rpa_timeout(struct k_work *work);
139 static void rpa_refresh_start(void);
140 static void rpa_refresh_stop(void);
141 #else /* !CONFIG_BT_CTLR_PRIVACY */
142 static uint32_t filter_add(struct lll_filter *filter, uint8_t addr_type,
143 			uint8_t *bdaddr);
144 static uint32_t filter_remove(struct lll_filter *filter, uint8_t addr_type,
145 			   uint8_t *bdaddr);
146 #endif /* !CONFIG_BT_CTLR_PRIVACY */
147 
148 static uint32_t filter_find(const struct lll_filter *const filter,
149 			    uint8_t addr_type, const uint8_t *const bdaddr);
150 static void filter_insert(struct lll_filter *const filter, int index,
151 			  uint8_t addr_type, const uint8_t *const bdaddr);
152 static void filter_clear(struct lll_filter *filter);
153 
154 #if defined(CONFIG_BT_CTLR_PRIVACY) && \
155 	defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
156 static void conn_rpa_update(uint8_t rl_idx);
157 #endif /* CONFIG_BT_CTLR_PRIVACY && CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
158 
159 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
160 static void prpa_cache_clear(void);
161 static uint8_t prpa_cache_find(bt_addr_t *prpa_cache_addr);
162 static void prpa_cache_add(bt_addr_t *prpa_cache_addr);
163 static uint8_t prpa_cache_try_resolve(bt_addr_t *rpa);
164 static void prpa_cache_resolve(struct k_work *work);
165 static void target_resolve(struct k_work *work);
166 static void trpa_cache_clear(void);
167 static uint8_t trpa_cache_find(bt_addr_t *prpa_cache_addr, uint8_t rl_idx);
168 static void trpa_cache_add(bt_addr_t *prpa_cache_addr, uint8_t rl_idx);
169 #endif /* CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
170 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
171 
172 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
173 #define PAL_ADDR_MATCH(type, addr) \
174 		(pal[i].taken && \
175 		 (pal[i].id_addr_type == (type & 0x1)) && \
176 		 !memcmp(pal[i].id_addr.val, addr, BDADDR_SIZE))
177 
178 #define PAL_MATCH(type, addr, sid) \
179 		(PAL_ADDR_MATCH(type, addr) && \
180 		 (pal[i].sid == sid))
181 
182 /* Periodic Advertising Accept List */
183 #define PAL_SIZE CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST_SIZE
184 static struct lll_pal pal[PAL_SIZE];
185 
186 static void pal_clear(void);
187 #if defined(CONFIG_BT_CTLR_PRIVACY)
188 static uint8_t pal_addr_find(const uint8_t addr_type,
189 			     const uint8_t *const addr);
190 #endif /* CONFIG_BT_CTLR_PRIVACY */
191 static uint8_t pal_find(const uint8_t addr_type, const uint8_t *const addr,
192 			const uint8_t sid, uint8_t *const free_idx);
193 static uint32_t pal_add(const bt_addr_le_t *const id_addr, const uint8_t sid);
194 static uint32_t pal_remove(const bt_addr_le_t *const id_addr,
195 			   const uint8_t sid);
196 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
197 
198 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
ll_fal_size_get(void)199 uint8_t ll_fal_size_get(void)
200 {
201 	return CONFIG_BT_CTLR_FAL_SIZE;
202 }
203 
ll_fal_clear(void)204 uint8_t ll_fal_clear(void)
205 {
206 #if defined(CONFIG_BT_BROADCASTER)
207 	if (ull_adv_filter_pol_get(0)) {
208 		return BT_HCI_ERR_CMD_DISALLOWED;
209 	}
210 #endif /* CONFIG_BT_BROADCASTER */
211 
212 #if defined(CONFIG_BT_OBSERVER)
213 	if (ull_scan_filter_pol_get(0) & 0x1) {
214 		return BT_HCI_ERR_CMD_DISALLOWED;
215 	}
216 #endif /* CONFIG_BT_OBSERVER */
217 
218 #if defined(CONFIG_BT_CTLR_PRIVACY)
219 	fal_clear();
220 #else
221 	filter_clear(&fal_filter);
222 #endif /* CONFIG_BT_CTLR_PRIVACY */
223 
224 	return 0;
225 }
226 
ll_fal_add(bt_addr_le_t * addr)227 uint8_t ll_fal_add(bt_addr_le_t *addr)
228 {
229 #if defined(CONFIG_BT_BROADCASTER)
230 	if (ull_adv_filter_pol_get(0)) {
231 		return BT_HCI_ERR_CMD_DISALLOWED;
232 	}
233 #endif /* CONFIG_BT_BROADCASTER */
234 
235 #if defined(CONFIG_BT_OBSERVER)
236 	if (ull_scan_filter_pol_get(0) & 0x1) {
237 		return BT_HCI_ERR_CMD_DISALLOWED;
238 	}
239 #endif /* CONFIG_BT_OBSERVER */
240 
241 	if (addr->type == ADDR_TYPE_ANON) {
242 		return 0;
243 	}
244 
245 #if defined(CONFIG_BT_CTLR_PRIVACY)
246 	return fal_add(addr);
247 #else
248 	return filter_add(&fal_filter, addr->type, addr->a.val);
249 #endif /* CONFIG_BT_CTLR_PRIVACY */
250 }
251 
ll_fal_remove(bt_addr_le_t * addr)252 uint8_t ll_fal_remove(bt_addr_le_t *addr)
253 {
254 #if defined(CONFIG_BT_BROADCASTER)
255 	if (ull_adv_filter_pol_get(0)) {
256 		return BT_HCI_ERR_CMD_DISALLOWED;
257 	}
258 #endif /* CONFIG_BT_BROADCASTER */
259 
260 #if defined(CONFIG_BT_OBSERVER)
261 	if (ull_scan_filter_pol_get(0) & 0x1) {
262 		return BT_HCI_ERR_CMD_DISALLOWED;
263 	}
264 #endif /* CONFIG_BT_OBSERVER */
265 
266 	if (addr->type == ADDR_TYPE_ANON) {
267 		return 0;
268 	}
269 
270 #if defined(CONFIG_BT_CTLR_PRIVACY)
271 	return fal_remove(addr);
272 #else
273 	return filter_remove(&fal_filter, addr->type, addr->a.val);
274 #endif /* CONFIG_BT_CTLR_PRIVACY */
275 }
276 
277 #if defined(CONFIG_BT_CTLR_PRIVACY)
ll_rl_id_addr_get(uint8_t rl_idx,uint8_t * id_addr_type,uint8_t * id_addr)278 void ll_rl_id_addr_get(uint8_t rl_idx, uint8_t *id_addr_type, uint8_t *id_addr)
279 {
280 	LL_ASSERT(rl_idx < CONFIG_BT_CTLR_RL_SIZE);
281 	LL_ASSERT(rl[rl_idx].taken);
282 
283 	*id_addr_type = rl[rl_idx].id_addr_type;
284 	(void)memcpy(id_addr, rl[rl_idx].id_addr.val, BDADDR_SIZE);
285 }
286 
ll_rl_size_get(void)287 uint8_t ll_rl_size_get(void)
288 {
289 	return CONFIG_BT_CTLR_RL_SIZE;
290 }
291 
ll_rl_clear(void)292 uint8_t ll_rl_clear(void)
293 {
294 	if (!rl_access_check(false)) {
295 		return BT_HCI_ERR_CMD_DISALLOWED;
296 	}
297 
298 	rl_clear();
299 
300 	return 0;
301 }
302 
ll_rl_add(bt_addr_le_t * id_addr,const uint8_t pirk[IRK_SIZE],const uint8_t lirk[IRK_SIZE])303 uint8_t ll_rl_add(bt_addr_le_t *id_addr, const uint8_t pirk[IRK_SIZE],
304 	       const uint8_t lirk[IRK_SIZE])
305 {
306 	uint8_t i, j;
307 
308 	if (!rl_access_check(false)) {
309 		return BT_HCI_ERR_CMD_DISALLOWED;
310 	}
311 
312 	i = ull_filter_rl_find(id_addr->type, id_addr->a.val, &j);
313 
314 	/* Duplicate check */
315 	if (i < ARRAY_SIZE(rl)) {
316 		return BT_HCI_ERR_INVALID_PARAM;
317 	} else if (j >= ARRAY_SIZE(rl)) {
318 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
319 	}
320 
321 	/* Device not found but empty slot found */
322 	i = j;
323 
324 	bt_addr_copy(&rl[i].id_addr, &id_addr->a);
325 	rl[i].id_addr_type = id_addr->type & 0x1;
326 	rl[i].pirk = mem_nz((uint8_t *)pirk, IRK_SIZE);
327 	rl[i].lirk = mem_nz((uint8_t *)lirk, IRK_SIZE);
328 	if (rl[i].pirk) {
329 		/* cross-reference */
330 		rl[i].pirk_idx = peer_irk_count;
331 		peer_irk_rl_ids[peer_irk_count] = i;
332 		/* AAR requires big-endian IRKs */
333 		sys_memcpy_swap(peer_irks[peer_irk_count++], pirk, IRK_SIZE);
334 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
335 		/* a new key was added, invalidate the known/unknown list */
336 		prpa_cache_clear();
337 		trpa_cache_clear();
338 #endif
339 	}
340 	if (rl[i].lirk) {
341 		(void)memcpy(rl[i].local_irk, lirk, IRK_SIZE);
342 		rl[i].local_rpa = NULL;
343 	}
344 	memset(rl[i].curr_rpa.val, 0x00, sizeof(rl[i].curr_rpa));
345 	rl[i].rpas_ready = 0U;
346 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
347 	memset(rl[i].target_rpa.val, 0x00, sizeof(rl[i].target_rpa));
348 #endif
349 	/* Default to Network Privacy */
350 	rl[i].dev = 0U;
351 	/* Add reference to  a Filter Accept List entry */
352 	j = fal_find(id_addr->type, id_addr->a.val, NULL);
353 	if (j < ARRAY_SIZE(fal)) {
354 		fal[j].rl_idx = i;
355 		rl[i].fal = 1U;
356 	} else {
357 		rl[i].fal = 0U;
358 	}
359 
360 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
361 	/* Add reference to a periodic list entry */
362 	j = pal_addr_find(id_addr->type, id_addr->a.val);
363 	if (j < ARRAY_SIZE(pal)) {
364 		pal[j].rl_idx = i;
365 		rl[i].pal = j + 1U;
366 	} else {
367 		rl[i].pal = 0U;
368 	}
369 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
370 
371 	rl[i].taken = 1U;
372 
373 	return 0;
374 }
375 
ll_rl_remove(bt_addr_le_t * id_addr)376 uint8_t ll_rl_remove(bt_addr_le_t *id_addr)
377 {
378 	uint8_t i;
379 
380 	if (!rl_access_check(false)) {
381 		return BT_HCI_ERR_CMD_DISALLOWED;
382 	}
383 
384 	/* find the device and mark it as empty */
385 	i = ull_filter_rl_find(id_addr->type, id_addr->a.val, NULL);
386 	if (i < ARRAY_SIZE(rl)) {
387 		uint8_t j, k;
388 
389 		if (rl[i].pirk) {
390 			/* Swap with last item */
391 			uint8_t pi = rl[i].pirk_idx, pj = peer_irk_count - 1;
392 
393 			if (pj && (pj < ARRAY_SIZE(peer_irks)) && (pi != pj)) {
394 				(void)memcpy(peer_irks[pi], peer_irks[pj],
395 					     IRK_SIZE);
396 				for (k = 0U;
397 				     k < CONFIG_BT_CTLR_RL_SIZE;
398 				     k++) {
399 
400 					if (rl[k].taken && rl[k].pirk &&
401 					    rl[k].pirk_idx == pj) {
402 						rl[k].pirk_idx = pi;
403 						peer_irk_rl_ids[pi] = k;
404 						break;
405 					}
406 				}
407 			}
408 			peer_irk_count--;
409 		}
410 
411 		/* Check if referenced by a Filter Accept List entry */
412 		j = fal_find(id_addr->type, id_addr->a.val, NULL);
413 		if (j < ARRAY_SIZE(fal)) {
414 			fal[j].rl_idx = FILTER_IDX_NONE;
415 		}
416 
417 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
418 		/* Check if referenced by a periodic list entry */
419 		j = pal_addr_find(id_addr->type, id_addr->a.val);
420 		if (j < ARRAY_SIZE(pal)) {
421 			pal[j].rl_idx = FILTER_IDX_NONE;
422 		}
423 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
424 
425 		rl[i].taken = 0U;
426 
427 		return 0;
428 	}
429 
430 	return BT_HCI_ERR_UNKNOWN_CONN_ID;
431 }
432 
ll_rl_crpa_set(uint8_t id_addr_type,uint8_t * id_addr,uint8_t rl_idx,uint8_t * crpa)433 void ll_rl_crpa_set(uint8_t id_addr_type, uint8_t *id_addr, uint8_t rl_idx,
434 		    uint8_t *crpa)
435 {
436 	if ((crpa[5] & 0xc0) == 0x40) {
437 
438 		if (id_addr) {
439 			/* find the device and return its RPA */
440 			rl_idx = ull_filter_rl_find(id_addr_type, id_addr,
441 						    NULL);
442 		}
443 
444 		if (rl_idx < ARRAY_SIZE(rl) && rl[rl_idx].taken) {
445 			(void)memcpy(rl[rl_idx].curr_rpa.val, crpa,
446 				     sizeof(bt_addr_t));
447 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
448 			conn_rpa_update(rl_idx);
449 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN) */
450 		}
451 	}
452 }
453 
ll_rl_crpa_get(bt_addr_le_t * id_addr,bt_addr_t * crpa)454 uint8_t ll_rl_crpa_get(bt_addr_le_t *id_addr, bt_addr_t *crpa)
455 {
456 	uint8_t i;
457 
458 	/* find the device and return its RPA */
459 	i = ull_filter_rl_find(id_addr->type, id_addr->a.val, NULL);
460 	if (i < ARRAY_SIZE(rl) &&
461 	    mem_nz(rl[i].curr_rpa.val, sizeof(rl[i].curr_rpa.val))) {
462 		bt_addr_copy(crpa, &rl[i].curr_rpa);
463 		return 0;
464 	}
465 
466 	return BT_HCI_ERR_UNKNOWN_CONN_ID;
467 }
468 
ll_rl_lrpa_get(bt_addr_le_t * id_addr,bt_addr_t * lrpa)469 uint8_t ll_rl_lrpa_get(bt_addr_le_t *id_addr, bt_addr_t *lrpa)
470 {
471 	uint8_t i;
472 
473 	/* find the device and return the local RPA */
474 	i = ull_filter_rl_find(id_addr->type, id_addr->a.val, NULL);
475 	if (i < ARRAY_SIZE(rl)) {
476 		bt_addr_copy(lrpa, rl[i].local_rpa);
477 		return 0;
478 	}
479 
480 	return BT_HCI_ERR_UNKNOWN_CONN_ID;
481 }
482 
ll_rl_enable(uint8_t enable)483 uint8_t ll_rl_enable(uint8_t enable)
484 {
485 	if (!rl_access_check(false)) {
486 		return BT_HCI_ERR_CMD_DISALLOWED;
487 	}
488 
489 	switch (enable) {
490 	case BT_HCI_ADDR_RES_DISABLE:
491 		rl_enable = 0U;
492 		break;
493 	case BT_HCI_ADDR_RES_ENABLE:
494 		rl_enable = 1U;
495 		break;
496 	default:
497 		return BT_HCI_ERR_INVALID_PARAM;
498 	}
499 
500 	return 0;
501 }
502 
ll_rl_timeout_set(uint16_t timeout)503 void ll_rl_timeout_set(uint16_t timeout)
504 {
505 	rpa_timeout_ms = timeout * 1000U;
506 }
507 
ll_priv_mode_set(bt_addr_le_t * id_addr,uint8_t mode)508 uint8_t ll_priv_mode_set(bt_addr_le_t *id_addr, uint8_t mode)
509 {
510 	uint8_t i;
511 
512 	if (!rl_access_check(false)) {
513 		return BT_HCI_ERR_CMD_DISALLOWED;
514 	}
515 
516 	/* find the device and mark it as empty */
517 	i = ull_filter_rl_find(id_addr->type, id_addr->a.val, NULL);
518 	if (i < ARRAY_SIZE(rl)) {
519 		switch (mode) {
520 		case BT_HCI_LE_PRIVACY_MODE_NETWORK:
521 			rl[i].dev = 0U;
522 			break;
523 		case BT_HCI_LE_PRIVACY_MODE_DEVICE:
524 			rl[i].dev = 1U;
525 			break;
526 		default:
527 			return BT_HCI_ERR_INVALID_PARAM;
528 		}
529 	} else {
530 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
531 	}
532 
533 	return 0;
534 }
535 #endif /* CONFIG_BT_CTLR_PRIVACY */
536 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
537 
538 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
ll_pal_size_get(void)539 uint8_t ll_pal_size_get(void)
540 {
541 	return PAL_SIZE;
542 }
543 
ll_pal_clear(void)544 uint8_t ll_pal_clear(void)
545 {
546 	/* FIXME: Check and fail if Periodic Advertising Create Sync is pending.
547 	 */
548 
549 	pal_clear();
550 
551 	return 0;
552 }
553 
ll_pal_add(const bt_addr_le_t * const addr,const uint8_t sid)554 uint8_t ll_pal_add(const bt_addr_le_t *const addr, const uint8_t sid)
555 {
556 	/* FIXME: Check and fail if Periodic Advertising Create Sync is pending.
557 	 */
558 
559 	if (addr->type == ADDR_TYPE_ANON) {
560 		return 0;
561 	}
562 
563 	return pal_add(addr, sid);
564 }
565 
ll_pal_remove(const bt_addr_le_t * const addr,const uint8_t sid)566 uint8_t ll_pal_remove(const bt_addr_le_t *const addr, const uint8_t sid)
567 {
568 	/* FIXME: Check and fail if Periodic Advertising Create Sync is pending.
569 	 */
570 
571 	if (addr->type == ADDR_TYPE_ANON) {
572 		return 0;
573 	}
574 
575 	return pal_remove(addr, sid);
576 }
577 
ull_filter_ull_pal_addr_match(const uint8_t addr_type,const uint8_t * const addr)578 bool ull_filter_ull_pal_addr_match(const uint8_t addr_type,
579 				   const uint8_t *const addr)
580 {
581 	for (int i = 0; i < PAL_SIZE; i++) {
582 		if (PAL_ADDR_MATCH(addr_type, addr)) {
583 			return true;
584 		}
585 	}
586 
587 	return false;
588 }
589 
ull_filter_ull_pal_match(const uint8_t addr_type,const uint8_t * const addr,const uint8_t sid)590 bool ull_filter_ull_pal_match(const uint8_t addr_type,
591 			      const uint8_t *const addr, const uint8_t sid)
592 {
593 	for (int i = 0; i < PAL_SIZE; i++) {
594 		if (PAL_MATCH(addr_type, addr, sid)) {
595 			return true;
596 		}
597 	}
598 
599 	return false;
600 }
601 
602 #if defined(CONFIG_BT_CTLR_PRIVACY)
ull_filter_ull_pal_listed(const uint8_t rl_idx,uint8_t * const addr_type,uint8_t * const addr)603 bool ull_filter_ull_pal_listed(const uint8_t rl_idx, uint8_t *const addr_type,
604 			      uint8_t *const addr)
605 {
606 	if (rl_idx >= ARRAY_SIZE(rl)) {
607 		return false;
608 	}
609 
610 	LL_ASSERT(rl[rl_idx].taken);
611 
612 	if (rl[rl_idx].pal) {
613 		uint8_t pal_idx = rl[rl_idx].pal - 1;
614 
615 		*addr_type = pal[pal_idx].id_addr_type;
616 		(void)memcpy(addr, pal[pal_idx].id_addr.val, BDADDR_SIZE);
617 
618 		return true;
619 	}
620 
621 	return false;
622 }
623 #endif /* CONFIG_BT_CTLR_PRIVACY */
624 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
625 
ull_filter_reset(bool init)626 void ull_filter_reset(bool init)
627 {
628 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
629 	pal_clear();
630 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
631 
632 #if defined(CONFIG_BT_CTLR_PRIVACY)
633 	fal_clear();
634 
635 	rl_enable = 0U;
636 	rpa_timeout_ms = DEFAULT_RPA_TIMEOUT_MS;
637 	rpa_last_ms = -1;
638 	rl_clear();
639 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
640 	prpa_cache_clear();
641 	trpa_cache_clear();
642 #endif
643 	if (init) {
644 		k_work_init_delayable(&rpa_work, rpa_timeout);
645 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
646 		k_work_init(&(resolve_work.prpa_work), prpa_cache_resolve);
647 		k_work_init(&(t_work.target_work), target_resolve);
648 #endif
649 	} else {
650 		k_work_cancel_delayable(&rpa_work);
651 	}
652 #elif defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
653 	filter_clear(&fal_filter);
654 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
655 }
656 
657 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
ull_filter_lll_get(bool filter)658 struct lll_filter *ull_filter_lll_get(bool filter)
659 {
660 #if defined(CONFIG_BT_CTLR_PRIVACY)
661 	if (filter) {
662 		return &fal_filter;
663 	}
664 	return &rl_filter;
665 #else
666 	LL_ASSERT(filter);
667 	return &fal_filter;
668 #endif
669 }
670 
ull_filter_lll_fal_match(const struct lll_filter * const filter,uint8_t addr_type,const uint8_t * const addr,uint8_t * devmatch_id)671 uint8_t ull_filter_lll_fal_match(const struct lll_filter *const filter,
672 				 uint8_t addr_type, const uint8_t *const addr,
673 				 uint8_t *devmatch_id)
674 {
675 	*devmatch_id = filter_find(filter, addr_type, addr);
676 
677 	return (*devmatch_id) == FILTER_IDX_NONE ? 0U : 1U;
678 }
679 
680 #if defined(CONFIG_BT_CTLR_PRIVACY)
ull_filter_adv_scan_state_cb(uint8_t bm)681 void ull_filter_adv_scan_state_cb(uint8_t bm)
682 {
683 	if (bm) {
684 		rpa_refresh_start();
685 	} else {
686 		rpa_refresh_stop();
687 	}
688 }
689 
ull_filter_adv_update(uint8_t adv_fp)690 void ull_filter_adv_update(uint8_t adv_fp)
691 {
692 	/* Clear before populating filter */
693 	filter_clear(&fal_filter);
694 
695 	/* enabling advertising */
696 	if (adv_fp &&
697 	    (!IS_ENABLED(CONFIG_BT_OBSERVER) ||
698 	     !(ull_scan_filter_pol_get(0) & 0x1))) {
699 		/* filter accept list not in use, update FAL */
700 		fal_update();
701 	}
702 
703 	/* Clear before populating rl filter */
704 	filter_clear(&rl_filter);
705 
706 	if (rl_enable &&
707 	    (!IS_ENABLED(CONFIG_BT_OBSERVER) || !ull_scan_is_enabled(0))) {
708 		/* rl not in use, update resolving list LUT */
709 		rl_update();
710 	}
711 }
712 
ull_filter_scan_update(uint8_t scan_fp)713 void ull_filter_scan_update(uint8_t scan_fp)
714 {
715 	/* Clear before populating filter */
716 	filter_clear(&fal_filter);
717 
718 	/* enabling advertising */
719 	if ((scan_fp & 0x1) &&
720 	    (!IS_ENABLED(CONFIG_BT_BROADCASTER) ||
721 	     !ull_adv_filter_pol_get(0))) {
722 		/* Filter Accept List not in use, update FAL */
723 		fal_update();
724 	}
725 
726 	/* Clear before populating rl filter */
727 	filter_clear(&rl_filter);
728 
729 	if (rl_enable &&
730 	    (!IS_ENABLED(CONFIG_BT_BROADCASTER) || !ull_adv_is_enabled(0))) {
731 		/* rl not in use, update resolving list LUT */
732 		rl_update();
733 	}
734 }
735 
ull_filter_rpa_update(bool timeout)736 void ull_filter_rpa_update(bool timeout)
737 {
738 	uint8_t i;
739 	int err;
740 	int64_t now = k_uptime_get();
741 	bool all = timeout || (rpa_last_ms == -1) ||
742 		   (now - rpa_last_ms >= rpa_timeout_ms);
743 	LOG_DBG("");
744 
745 	for (i = 0U; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
746 		if ((rl[i].taken) && (all || !rl[i].rpas_ready)) {
747 
748 			if (rl[i].pirk) {
749 				uint8_t irk[IRK_SIZE];
750 
751 				/* TODO: move this swap to the driver level */
752 				sys_memcpy_swap(irk, peer_irks[rl[i].pirk_idx],
753 						IRK_SIZE);
754 				err = bt_rpa_create(irk, &rl[i].peer_rpa);
755 				LL_ASSERT(!err);
756 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
757 				/* a new key was added,
758 				 * invalidate the known/unknown peer RPA cache
759 				 */
760 				prpa_cache_clear();
761 				trpa_cache_clear();
762 #endif
763 			}
764 
765 			if (rl[i].lirk) {
766 				bt_addr_t rpa;
767 
768 				err = bt_rpa_create(rl[i].local_irk, &rpa);
769 				LL_ASSERT(!err);
770 				/* pointer read/write assumed to be atomic
771 				 * so that if ISR fires the local_rpa pointer
772 				 * will always point to a valid full RPA
773 				 */
774 				rl[i].local_rpa = &rpa;
775 				bt_addr_copy(&local_rpas[i], &rpa);
776 				rl[i].local_rpa = &local_rpas[i];
777 			}
778 
779 			rl[i].rpas_ready = 1U;
780 		}
781 	}
782 
783 	if (all) {
784 		rpa_last_ms = now;
785 	}
786 
787 	if (timeout) {
788 #if defined(CONFIG_BT_BROADCASTER)
789 		uint8_t handle;
790 
791 		for (handle = 0U; handle < BT_CTLR_ADV_SET; handle++) {
792 			struct ll_adv_set *adv;
793 
794 			adv = ull_adv_is_enabled_get(handle);
795 			if (adv) {
796 				rpa_adv_refresh(adv);
797 			}
798 		}
799 #endif
800 	}
801 }
802 
803 #if defined(CONFIG_BT_BROADCASTER)
ull_filter_adva_get(uint8_t rl_idx)804 const uint8_t *ull_filter_adva_get(uint8_t rl_idx)
805 {
806 	/* AdvA */
807 	if (rl_idx < ARRAY_SIZE(rl) && rl[rl_idx].lirk) {
808 		LL_ASSERT(rl[rl_idx].rpas_ready);
809 		return rl[rl_idx].local_rpa->val;
810 	}
811 
812 	return NULL;
813 }
814 
ull_filter_tgta_get(uint8_t rl_idx)815 const uint8_t *ull_filter_tgta_get(uint8_t rl_idx)
816 {
817 	/* TargetA */
818 	if (rl_idx < ARRAY_SIZE(rl) && rl[rl_idx].pirk) {
819 		return rl[rl_idx].peer_rpa.val;
820 	}
821 
822 	return NULL;
823 }
824 #endif /* CONFIG_BT_BROADCASTER */
825 
ull_filter_rl_find(uint8_t id_addr_type,uint8_t const * const id_addr,uint8_t * const free_idx)826 uint8_t ull_filter_rl_find(uint8_t id_addr_type, uint8_t const *const id_addr,
827 			   uint8_t *const free_idx)
828 {
829 	uint8_t i;
830 
831 	if (free_idx) {
832 		*free_idx = FILTER_IDX_NONE;
833 	}
834 
835 	for (i = 0U; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
836 		if (LIST_MATCH(rl, i, id_addr_type, id_addr)) {
837 			return i;
838 		} else if (free_idx && !rl[i].taken &&
839 			   (*free_idx == FILTER_IDX_NONE)) {
840 			*free_idx = i;
841 		}
842 	}
843 
844 	return FILTER_IDX_NONE;
845 }
846 
ull_filter_lll_lrpa_used(uint8_t rl_idx)847 bool ull_filter_lll_lrpa_used(uint8_t rl_idx)
848 {
849 	return rl_idx < ARRAY_SIZE(rl) && rl[rl_idx].lirk;
850 }
851 
ull_filter_lll_lrpa_get(uint8_t rl_idx)852 bt_addr_t *ull_filter_lll_lrpa_get(uint8_t rl_idx)
853 {
854 	if ((rl_idx >= ARRAY_SIZE(rl)) || !rl[rl_idx].lirk ||
855 	    !rl[rl_idx].rpas_ready) {
856 		return NULL;
857 	}
858 
859 	return rl[rl_idx].local_rpa;
860 }
861 
ull_filter_lll_id_addr_get(uint8_t rl_idx,uint8_t * id_addr_type)862 bt_addr_t *ull_filter_lll_id_addr_get(uint8_t rl_idx, uint8_t *id_addr_type)
863 {
864 	struct lll_resolve_list *rl_entry;
865 
866 	if (rl_idx >= ARRAY_SIZE(rl)) {
867 		return NULL;
868 	}
869 
870 	rl_entry = &rl[rl_idx];
871 	*id_addr_type = rl_entry->id_addr_type;
872 
873 	return &rl_entry->id_addr;
874 }
875 
ull_filter_lll_irks_get(uint8_t * count)876 uint8_t *ull_filter_lll_irks_get(uint8_t *count)
877 {
878 	*count = peer_irk_count;
879 	return (uint8_t *)peer_irks;
880 }
881 
ull_filter_lll_rl_idx(bool filter,uint8_t devmatch_id)882 uint8_t ull_filter_lll_rl_idx(bool filter, uint8_t devmatch_id)
883 {
884 	uint8_t i;
885 
886 	if (filter) {
887 		LL_ASSERT(devmatch_id < ARRAY_SIZE(fal));
888 		LL_ASSERT(fal[devmatch_id].taken);
889 		i = fal[devmatch_id].rl_idx;
890 	} else {
891 		LL_ASSERT(devmatch_id < ARRAY_SIZE(rl));
892 		i = devmatch_id;
893 		LL_ASSERT(rl[i].taken);
894 	}
895 
896 	return i;
897 }
898 
ull_filter_lll_rl_irk_idx(uint8_t irkmatch_id)899 uint8_t ull_filter_lll_rl_irk_idx(uint8_t irkmatch_id)
900 {
901 	uint8_t i;
902 
903 	LL_ASSERT(irkmatch_id < peer_irk_count);
904 	i = peer_irk_rl_ids[irkmatch_id];
905 	LL_ASSERT(i < CONFIG_BT_CTLR_RL_SIZE);
906 	LL_ASSERT(rl[i].taken);
907 
908 	return i;
909 }
910 
ull_filter_lll_irk_in_fal(uint8_t rl_idx)911 bool ull_filter_lll_irk_in_fal(uint8_t rl_idx)
912 {
913 	if (rl_idx >= ARRAY_SIZE(rl)) {
914 		return false;
915 	}
916 
917 	LL_ASSERT(rl[rl_idx].taken);
918 
919 	return rl[rl_idx].fal;
920 }
921 
ull_filter_lll_fal_get(void)922 struct lll_fal *ull_filter_lll_fal_get(void)
923 {
924 	return fal;
925 }
926 
ull_filter_lll_resolve_list_get(void)927 struct lll_resolve_list *ull_filter_lll_resolve_list_get(void)
928 {
929 	return rl;
930 }
931 
ull_filter_lll_rl_idx_allowed(uint8_t irkmatch_ok,uint8_t rl_idx)932 bool ull_filter_lll_rl_idx_allowed(uint8_t irkmatch_ok, uint8_t rl_idx)
933 {
934 	/* If AR is disabled or we don't know the device or we matched an IRK
935 	 * then we're all set.
936 	 */
937 	if (!rl_enable || rl_idx >= ARRAY_SIZE(rl) || irkmatch_ok) {
938 		return true;
939 	}
940 
941 	LL_ASSERT(rl_idx < CONFIG_BT_CTLR_RL_SIZE);
942 	LL_ASSERT(rl[rl_idx].taken);
943 
944 	return !rl[rl_idx].pirk || rl[rl_idx].dev;
945 }
946 
ull_filter_lll_rl_addr_allowed(uint8_t id_addr_type,const uint8_t * id_addr,uint8_t * const rl_idx)947 bool ull_filter_lll_rl_addr_allowed(uint8_t id_addr_type,
948 				    const uint8_t *id_addr,
949 				    uint8_t *const rl_idx)
950 {
951 	uint8_t i, j;
952 
953 	/* We matched an IRK then we're all set. No hw
954 	 * filters are used in this case.
955 	 */
956 	if (*rl_idx != FILTER_IDX_NONE) {
957 		return true;
958 	}
959 
960 	for (i = 0U; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
961 		if (rl[i].taken && (rl[i].id_addr_type == id_addr_type)) {
962 			uint8_t *addr = rl[i].id_addr.val;
963 
964 			for (j = 0U; j < BDADDR_SIZE; j++) {
965 				if (addr[j] != id_addr[j]) {
966 					break;
967 				}
968 			}
969 
970 			if (j == BDADDR_SIZE) {
971 				*rl_idx = i;
972 				return !rl[i].pirk || rl[i].dev;
973 			}
974 		}
975 	}
976 
977 	return true;
978 }
979 
ull_filter_lll_rl_addr_resolve(uint8_t id_addr_type,const uint8_t * id_addr,uint8_t rl_idx)980 bool ull_filter_lll_rl_addr_resolve(uint8_t id_addr_type,
981 				    const uint8_t *id_addr, uint8_t rl_idx)
982 {
983 	/* Unable to resolve if AR is disabled, no RL entry or no local IRK */
984 	if (!rl_enable || rl_idx >= ARRAY_SIZE(rl) || !rl[rl_idx].lirk) {
985 		return false;
986 	}
987 
988 	if ((id_addr_type != 0U) && ((id_addr[5] & 0xc0) == 0x40)) {
989 		return bt_rpa_irk_matches(rl[rl_idx].local_irk,
990 					  (bt_addr_t *)id_addr);
991 	}
992 
993 	return false;
994 }
995 
ull_filter_lll_rl_enabled(void)996 bool ull_filter_lll_rl_enabled(void)
997 {
998 	return rl_enable;
999 }
1000 
1001 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
ull_filter_deferred_resolve(bt_addr_t * rpa,resolve_callback_t cb)1002 uint8_t ull_filter_deferred_resolve(bt_addr_t *rpa, resolve_callback_t cb)
1003 {
1004 	if (rl_enable) {
1005 		if (!k_work_is_pending(&(resolve_work.prpa_work))) {
1006 			/* copy input param to work variable */
1007 			(void)memcpy(resolve_work.rpa.val, rpa->val,
1008 				     sizeof(bt_addr_t));
1009 			resolve_work.cb = cb;
1010 
1011 			k_work_submit(&(resolve_work.prpa_work));
1012 
1013 			return 1;
1014 		}
1015 	}
1016 
1017 	return 0;
1018 }
1019 
ull_filter_deferred_targeta_resolve(bt_addr_t * rpa,uint8_t rl_idx,resolve_callback_t cb)1020 uint8_t ull_filter_deferred_targeta_resolve(bt_addr_t *rpa, uint8_t rl_idx,
1021 					 resolve_callback_t cb)
1022 {
1023 	if (rl_enable) {
1024 		if (!k_work_is_pending(&(t_work.target_work))) {
1025 			/* copy input param to work variable */
1026 			(void)memcpy(t_work.rpa.val, rpa->val,
1027 				     sizeof(bt_addr_t));
1028 			t_work.cb = cb;
1029 			t_work.idx = rl_idx;
1030 
1031 			k_work_submit(&(t_work.target_work));
1032 
1033 			return 1;
1034 		}
1035 	}
1036 	return 0;
1037 }
1038 #endif /* CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
1039 
fal_clear(void)1040 static void fal_clear(void)
1041 {
1042 	for (int i = 0; i < CONFIG_BT_CTLR_FAL_SIZE; i++) {
1043 		uint8_t j = fal[i].rl_idx;
1044 
1045 		if (j < ARRAY_SIZE(rl)) {
1046 			rl[j].fal = 0U;
1047 		}
1048 		fal[i].taken = 0U;
1049 	}
1050 }
1051 
fal_find(uint8_t addr_type,const uint8_t * const addr,uint8_t * const free_idx)1052 static uint8_t fal_find(uint8_t addr_type, const uint8_t *const addr,
1053 			uint8_t *const free_idx)
1054 {
1055 	int i;
1056 
1057 	if (free_idx) {
1058 		*free_idx = FILTER_IDX_NONE;
1059 	}
1060 
1061 	for (i = 0; i < CONFIG_BT_CTLR_FAL_SIZE; i++) {
1062 		if (LIST_MATCH(fal, i, addr_type, addr)) {
1063 			return i;
1064 		} else if (free_idx && !fal[i].taken &&
1065 			   (*free_idx == FILTER_IDX_NONE)) {
1066 			*free_idx = i;
1067 		}
1068 	}
1069 
1070 	return FILTER_IDX_NONE;
1071 }
1072 
fal_add(bt_addr_le_t * id_addr)1073 static uint32_t fal_add(bt_addr_le_t *id_addr)
1074 {
1075 	uint8_t i, j;
1076 
1077 	i = fal_find(id_addr->type, id_addr->a.val, &j);
1078 
1079 	/* Duplicate  check */
1080 	if (i < ARRAY_SIZE(fal)) {
1081 		return 0;
1082 	} else if (j >= ARRAY_SIZE(fal)) {
1083 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1084 	}
1085 
1086 	i = j;
1087 
1088 	fal[i].id_addr_type = id_addr->type & 0x1;
1089 	bt_addr_copy(&fal[i].id_addr, &id_addr->a);
1090 	/* Get index to Resolving List if applicable */
1091 	j = ull_filter_rl_find(id_addr->type, id_addr->a.val, NULL);
1092 	if (j < ARRAY_SIZE(rl)) {
1093 		fal[i].rl_idx = j;
1094 		rl[j].fal = 1U;
1095 	} else {
1096 		fal[i].rl_idx = FILTER_IDX_NONE;
1097 	}
1098 	fal[i].taken = 1U;
1099 
1100 	return 0;
1101 }
1102 
fal_remove(bt_addr_le_t * id_addr)1103 static uint32_t fal_remove(bt_addr_le_t *id_addr)
1104 {
1105 	/* find the device and mark it as empty */
1106 	uint8_t i = fal_find(id_addr->type, id_addr->a.val, NULL);
1107 
1108 	if (i < ARRAY_SIZE(fal)) {
1109 		uint8_t j = fal[i].rl_idx;
1110 
1111 		if (j < ARRAY_SIZE(rl)) {
1112 			rl[j].fal = 0U;
1113 		}
1114 		fal[i].taken = 0U;
1115 
1116 		return 0;
1117 	}
1118 
1119 	return BT_HCI_ERR_UNKNOWN_CONN_ID;
1120 }
1121 
fal_update(void)1122 static void fal_update(void)
1123 {
1124 	uint8_t i;
1125 
1126 	/* Populate filter from fal peers */
1127 	for (i = 0U; i < CONFIG_BT_CTLR_FAL_SIZE; i++) {
1128 		uint8_t j;
1129 
1130 		if (!fal[i].taken) {
1131 			continue;
1132 		}
1133 
1134 		j = fal[i].rl_idx;
1135 
1136 		if (!rl_enable || j >= ARRAY_SIZE(rl) || !rl[j].pirk ||
1137 		    rl[j].dev) {
1138 			filter_insert(&fal_filter, i, fal[i].id_addr_type,
1139 				      fal[i].id_addr.val);
1140 		}
1141 	}
1142 }
1143 
rl_update(void)1144 static void rl_update(void)
1145 {
1146 	uint8_t i;
1147 
1148 	/* Populate filter from rl peers */
1149 	for (i = 0U; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
1150 		if (rl[i].taken) {
1151 			filter_insert(&rl_filter, i, rl[i].id_addr_type,
1152 				      rl[i].id_addr.val);
1153 		}
1154 	}
1155 }
1156 
1157 #if defined(CONFIG_BT_BROADCASTER)
rpa_adv_refresh(struct ll_adv_set * adv)1158 static void rpa_adv_refresh(struct ll_adv_set *adv)
1159 {
1160 	struct lll_adv_aux *lll_aux;
1161 	struct pdu_adv *prev;
1162 	struct lll_adv *lll;
1163 	struct pdu_adv *pdu;
1164 	uint8_t pri_idx;
1165 
1166 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1167 	uint8_t sec_idx = UINT8_MAX;
1168 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1169 
1170 	if (adv->own_addr_type != BT_HCI_OWN_ADDR_RPA_OR_PUBLIC &&
1171 	    adv->own_addr_type != BT_HCI_OWN_ADDR_RPA_OR_RANDOM) {
1172 		return;
1173 	}
1174 
1175 	lll = &adv->lll;
1176 	if (lll->rl_idx >= ARRAY_SIZE(rl)) {
1177 		return;
1178 	}
1179 
1180 
1181 	pri_idx = UINT8_MAX;
1182 	lll_aux = NULL;
1183 	pdu = NULL;
1184 	prev = lll_adv_data_peek(lll);
1185 
1186 	if (false) {
1187 
1188 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1189 	} else if (prev->type == PDU_ADV_TYPE_EXT_IND) {
1190 		struct pdu_adv_com_ext_adv *pri_com_hdr;
1191 		struct pdu_adv_ext_hdr pri_hdr_flags;
1192 		struct pdu_adv_ext_hdr *pri_hdr;
1193 
1194 		/* Pick the primary PDU header flags */
1195 		pri_com_hdr = (void *)&prev->adv_ext_ind;
1196 		pri_hdr = (void *)pri_com_hdr->ext_hdr_adv_data;
1197 		if (pri_com_hdr->ext_hdr_len) {
1198 			pri_hdr_flags = *pri_hdr;
1199 		} else {
1200 			*(uint8_t *)&pri_hdr_flags = 0U;
1201 		}
1202 
1203 		/* AdvA, in primary or auxiliary PDU */
1204 		if (pri_hdr_flags.adv_addr) {
1205 			pdu = lll_adv_data_alloc(lll, &pri_idx);
1206 			(void)memcpy(pdu, prev, (PDU_AC_LL_HEADER_SIZE +
1207 						 prev->len));
1208 
1209 #if (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
1210 		} else if (pri_hdr_flags.aux_ptr) {
1211 			struct pdu_adv_com_ext_adv *sec_com_hdr;
1212 			struct pdu_adv_ext_hdr sec_hdr_flags;
1213 			struct pdu_adv_ext_hdr *sec_hdr;
1214 			struct pdu_adv *sec_pdu;
1215 
1216 			lll_aux = lll->aux;
1217 			sec_pdu = lll_adv_aux_data_peek(lll_aux);
1218 
1219 			sec_com_hdr = (void *)&sec_pdu->adv_ext_ind;
1220 			sec_hdr = (void *)sec_com_hdr->ext_hdr_adv_data;
1221 			if (sec_com_hdr->ext_hdr_len) {
1222 				sec_hdr_flags = *sec_hdr;
1223 			} else {
1224 				*(uint8_t *)&sec_hdr_flags = 0U;
1225 			}
1226 
1227 			if (sec_hdr_flags.adv_addr) {
1228 				pdu = lll_adv_aux_data_alloc(lll_aux, &sec_idx);
1229 				(void)memcpy(pdu, sec_pdu,
1230 					     (PDU_AC_LL_HEADER_SIZE +
1231 					      sec_pdu->len));
1232 			}
1233 #endif /* (CONFIG_BT_CTLR_ADV_AUX_SET > 0) */
1234 		}
1235 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1236 
1237 	} else {
1238 		pdu = lll_adv_data_alloc(lll, &pri_idx);
1239 		(void)memcpy(pdu, prev, (PDU_AC_LL_HEADER_SIZE + prev->len));
1240 	}
1241 
1242 	if (pdu) {
1243 		ull_adv_pdu_update_addrs(adv, pdu);
1244 
1245 		if (pri_idx != UINT8_MAX) {
1246 			lll_adv_data_enqueue(lll, pri_idx);
1247 
1248 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1249 		} else {
1250 			lll_adv_aux_data_enqueue(lll_aux, sec_idx);
1251 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1252 
1253 		}
1254 	}
1255 }
1256 #endif /* CONFIG_BT_BROADCASTER */
1257 
rl_clear(void)1258 static void rl_clear(void)
1259 {
1260 	for (uint8_t i = 0; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
1261 		rl[i].taken = 0U;
1262 	}
1263 
1264 	peer_irk_count = 0U;
1265 }
1266 
rl_access_check(bool check_ar)1267 static int rl_access_check(bool check_ar)
1268 {
1269 	if (check_ar) {
1270 		/* If address resolution is disabled, allow immediately */
1271 		if (!rl_enable) {
1272 			return -1;
1273 		}
1274 	}
1275 
1276 	/* NOTE: Allowed when passive scanning, otherwise deny if advertising,
1277 	 *       active scanning, initiating or periodic sync create is active.
1278 	 */
1279 	return ((IS_ENABLED(CONFIG_BT_BROADCASTER) && ull_adv_is_enabled(0)) ||
1280 		(IS_ENABLED(CONFIG_BT_OBSERVER) &&
1281 		 (ull_scan_is_enabled(0) & ~ULL_SCAN_IS_PASSIVE)))
1282 		? 0 : 1;
1283 }
1284 
rpa_timeout(struct k_work * work)1285 static void rpa_timeout(struct k_work *work)
1286 {
1287 	ull_filter_rpa_update(true);
1288 	k_work_schedule(&rpa_work, K_MSEC(rpa_timeout_ms));
1289 }
1290 
rpa_refresh_start(void)1291 static void rpa_refresh_start(void)
1292 {
1293 	LOG_DBG("");
1294 	k_work_schedule(&rpa_work, K_MSEC(rpa_timeout_ms));
1295 }
1296 
rpa_refresh_stop(void)1297 static void rpa_refresh_stop(void)
1298 {
1299 	k_work_cancel_delayable(&rpa_work);
1300 }
1301 
1302 #else /* !CONFIG_BT_CTLR_PRIVACY */
1303 
filter_add(struct lll_filter * filter,uint8_t addr_type,uint8_t * bdaddr)1304 static uint32_t filter_add(struct lll_filter *filter, uint8_t addr_type,
1305 			uint8_t *bdaddr)
1306 {
1307 	int index;
1308 
1309 	if (filter->enable_bitmask == LLL_FILTER_BITMASK_ALL) {
1310 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1311 	}
1312 
1313 	for (index = 0;
1314 	     (filter->enable_bitmask & BIT(index));
1315 	     index++) {
1316 	}
1317 
1318 	filter_insert(filter, index, addr_type, bdaddr);
1319 	return 0;
1320 }
1321 
filter_remove(struct lll_filter * filter,uint8_t addr_type,uint8_t * bdaddr)1322 static uint32_t filter_remove(struct lll_filter *filter, uint8_t addr_type,
1323 			   uint8_t *bdaddr)
1324 {
1325 	int index;
1326 
1327 	index = filter_find(filter, addr_type, bdaddr);
1328 	if (index == FILTER_IDX_NONE) {
1329 		return BT_HCI_ERR_INVALID_PARAM;
1330 	}
1331 
1332 	filter->enable_bitmask &= ~BIT(index);
1333 	filter->addr_type_bitmask &= ~BIT(index);
1334 
1335 	return 0;
1336 }
1337 #endif /* !CONFIG_BT_CTLR_PRIVACY */
1338 
filter_find(const struct lll_filter * const filter,uint8_t addr_type,const uint8_t * const bdaddr)1339 static uint32_t filter_find(const struct lll_filter *const filter,
1340 			    uint8_t addr_type, const uint8_t *const bdaddr)
1341 {
1342 	int index;
1343 
1344 	if (!filter->enable_bitmask) {
1345 		return FILTER_IDX_NONE;
1346 	}
1347 
1348 	index = LLL_FILTER_SIZE;
1349 	while (index--) {
1350 		if ((filter->enable_bitmask & BIT(index)) &&
1351 		    (((filter->addr_type_bitmask >> index) & 0x01) ==
1352 		     (addr_type & 0x01)) &&
1353 		    !memcmp(filter->bdaddr[index], bdaddr, BDADDR_SIZE)) {
1354 			return index;
1355 		}
1356 	}
1357 
1358 	return FILTER_IDX_NONE;
1359 }
1360 
filter_insert(struct lll_filter * const filter,int index,uint8_t addr_type,const uint8_t * const bdaddr)1361 static void filter_insert(struct lll_filter *const filter, int index,
1362 			  uint8_t addr_type, const uint8_t *const bdaddr)
1363 {
1364 	filter->enable_bitmask |= BIT(index);
1365 	filter->addr_type_bitmask |= ((addr_type & 0x01) << index);
1366 	(void)memcpy(&filter->bdaddr[index][0], bdaddr, BDADDR_SIZE);
1367 }
1368 
filter_clear(struct lll_filter * filter)1369 static void filter_clear(struct lll_filter *filter)
1370 {
1371 	filter->enable_bitmask = 0;
1372 	filter->addr_type_bitmask = 0;
1373 }
1374 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
1375 
1376 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST)
pal_clear(void)1377 static void pal_clear(void)
1378 {
1379 	for (int i = 0; i < PAL_SIZE; i++) {
1380 
1381 #if defined(CONFIG_BT_CTLR_PRIVACY)
1382 		uint8_t j = pal[i].rl_idx;
1383 
1384 		if (j < ARRAY_SIZE(pal)) {
1385 			rl[j].pal = 0U;
1386 		}
1387 #endif /* CONFIG_BT_CTLR_PRIVACY */
1388 
1389 		pal[i].taken = 0U;
1390 	}
1391 }
1392 
1393 #if defined(CONFIG_BT_CTLR_PRIVACY)
pal_addr_find(const uint8_t addr_type,const uint8_t * const addr)1394 static uint8_t pal_addr_find(const uint8_t addr_type, const uint8_t *const addr)
1395 {
1396 	for (int i = 0; i < PAL_SIZE; i++) {
1397 		if (PAL_ADDR_MATCH(addr_type, addr)) {
1398 			return i;
1399 		}
1400 	}
1401 
1402 	return FILTER_IDX_NONE;
1403 }
1404 #endif /* CONFIG_BT_CTLR_PRIVACY */
1405 
pal_find(const uint8_t addr_type,const uint8_t * const addr,const uint8_t sid,uint8_t * const free_idx)1406 static uint8_t pal_find(const uint8_t addr_type, const uint8_t *const addr,
1407 			const uint8_t sid, uint8_t *const free_idx)
1408 {
1409 	int i;
1410 
1411 	if (free_idx) {
1412 		*free_idx = FILTER_IDX_NONE;
1413 	}
1414 
1415 	for (i = 0; i < PAL_SIZE; i++) {
1416 		if (PAL_MATCH(addr_type, addr, sid)) {
1417 			return i;
1418 		} else if (free_idx && !pal[i].taken &&
1419 			   (*free_idx == FILTER_IDX_NONE)) {
1420 			*free_idx = i;
1421 		}
1422 	}
1423 
1424 	return FILTER_IDX_NONE;
1425 }
1426 
pal_add(const bt_addr_le_t * const id_addr,const uint8_t sid)1427 static uint32_t pal_add(const bt_addr_le_t *const id_addr, const uint8_t sid)
1428 {
1429 	uint8_t i, j;
1430 
1431 	i = pal_find(id_addr->type, id_addr->a.val, sid, &j);
1432 
1433 	/* Duplicate  check */
1434 	if (i < PAL_SIZE) {
1435 		return BT_HCI_ERR_INVALID_PARAM;
1436 	} else if (j >= PAL_SIZE) {
1437 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1438 	}
1439 
1440 	i = j;
1441 
1442 	pal[i].id_addr_type = id_addr->type & 0x1;
1443 	bt_addr_copy(&pal[i].id_addr, &id_addr->a);
1444 	pal[i].sid = sid;
1445 
1446 #if defined(CONFIG_BT_CTLR_PRIVACY)
1447 	/* Get index to Resolving List if applicable */
1448 	j = ull_filter_rl_find(id_addr->type, id_addr->a.val, NULL);
1449 	if (j < ARRAY_SIZE(rl)) {
1450 		pal[i].rl_idx = j;
1451 		rl[j].pal = i + 1U;
1452 	} else {
1453 		pal[i].rl_idx = FILTER_IDX_NONE;
1454 	}
1455 #endif /* CONFIG_BT_CTLR_PRIVACY */
1456 
1457 	pal[i].taken = 1U;
1458 
1459 	return 0;
1460 }
1461 
pal_remove(const bt_addr_le_t * const id_addr,const uint8_t sid)1462 static uint32_t pal_remove(const bt_addr_le_t *const id_addr, const uint8_t sid)
1463 {
1464 	/* find the device and mark it as empty */
1465 	uint8_t i = pal_find(id_addr->type, id_addr->a.val, sid, NULL);
1466 
1467 	if (i < PAL_SIZE) {
1468 
1469 #if defined(CONFIG_BT_CTLR_PRIVACY)
1470 		uint8_t j = pal[i].rl_idx;
1471 
1472 		if (j < ARRAY_SIZE(rl)) {
1473 			rl[j].pal = 0U;
1474 		}
1475 #endif /* CONFIG_BT_CTLR_PRIVACY */
1476 
1477 		pal[i].taken = 0U;
1478 
1479 		return 0;
1480 	}
1481 
1482 	return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
1483 }
1484 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST */
1485 
1486 #if defined(CONFIG_BT_CTLR_PRIVACY) && \
1487 	defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
conn_rpa_update(uint8_t rl_idx)1488 static void conn_rpa_update(uint8_t rl_idx)
1489 {
1490 	uint16_t handle;
1491 
1492 	for (handle = 0U; handle < CONFIG_BT_MAX_CONN; handle++) {
1493 		struct ll_conn *conn = ll_connected_get(handle);
1494 
1495 		/* The RPA of the connection matches the RPA that was just
1496 		 * resolved
1497 		 */
1498 		if (conn && !memcmp(conn->peer_id_addr, rl[rl_idx].curr_rpa.val,
1499 				    BDADDR_SIZE)) {
1500 			(void)memcpy(conn->peer_id_addr, rl[rl_idx].id_addr.val,
1501 				     BDADDR_SIZE);
1502 			break;
1503 		}
1504 	}
1505 }
1506 #endif /* CONFIG_BT_CTLR_PRIVACY && CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
1507 
1508 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
target_resolve(struct k_work * work)1509 static void target_resolve(struct k_work *work)
1510 {
1511 	uint8_t j, idx;
1512 	bt_addr_t *search_rpa;
1513 	struct target_resolve_work *twork;
1514 	static memq_link_t link;
1515 	static struct mayfly mfy = {0, 0, &link, 0, NULL};
1516 
1517 	twork = CONTAINER_OF(work, struct target_resolve_work, target_work);
1518 	idx = twork->idx;
1519 	search_rpa = &(twork->rpa);
1520 
1521 	if (rl[idx].taken && bt_addr_eq(&(rl[idx].target_rpa), search_rpa)) {
1522 		j = idx;
1523 	} else {
1524 		uint8_t i;
1525 
1526 		/* No match - so not in list; Need to see if we can resolve */
1527 
1528 		i = trpa_cache_find(search_rpa, idx);
1529 		if (i != FILTER_IDX_NONE) {
1530 			/* Found a known unknown - do nothing */
1531 			j = FILTER_IDX_NONE;
1532 		} else if (bt_rpa_irk_matches(rl[idx].local_irk, search_rpa)) {
1533 			/* Could resolve, store RPA */
1534 			(void)memcpy(rl[idx].target_rpa.val, search_rpa->val,
1535 				     sizeof(bt_addr_t));
1536 			j = idx;
1537 		} else if (rl[idx].taken) {
1538 			/* No match - thus cannot resolve, we have an unknown
1539 			 * so insert in known unknown list
1540 			 */
1541 			trpa_cache_add(search_rpa, idx);
1542 			j = FILTER_IDX_NONE;
1543 		} else {
1544 			/* Could not resolve, and not in table */
1545 			j = FILTER_IDX_NONE;
1546 		}
1547 	}
1548 
1549 	/* Kick the callback in LLL (using the mayfly, tailchain it)
1550 	 * Pass param FILTER_IDX_NONE if RPA can not be resolved,
1551 	 * or index in cache if it can be resolved
1552 	 */
1553 	if (twork->cb) {
1554 		mfy.fp = twork->cb;
1555 		mfy.param = (void *) ((unsigned int) j);
1556 		(void)mayfly_enqueue(TICKER_USER_ID_THREAD,
1557 				     TICKER_USER_ID_LLL, 1, &mfy);
1558 	}
1559 }
1560 
prpa_cache_try_resolve(bt_addr_t * rpa)1561 static uint8_t prpa_cache_try_resolve(bt_addr_t *rpa)
1562 {
1563 	uint8_t pi;
1564 	uint8_t lpirk[IRK_SIZE];
1565 
1566 	for (uint8_t i = 0U; i < CONFIG_BT_CTLR_RL_SIZE; i++) {
1567 		if (rl[i].taken && rl[i].pirk) {
1568 			pi = rl[i].pirk_idx;
1569 			sys_memcpy_swap(lpirk, peer_irks[pi], IRK_SIZE);
1570 			if (bt_rpa_irk_matches(lpirk, rpa)) {
1571 				return i;
1572 			}
1573 		}
1574 	}
1575 
1576 	return FILTER_IDX_NONE;
1577 }
1578 
prpa_cache_resolve(struct k_work * work)1579 static void prpa_cache_resolve(struct k_work *work)
1580 {
1581 	uint8_t i, j;
1582 	bt_addr_t *search_rpa;
1583 	struct prpa_resolve_work *rwork;
1584 	static memq_link_t link;
1585 	static struct mayfly mfy = {0, 0, &link, 0, NULL};
1586 
1587 	rwork = CONTAINER_OF(work, struct prpa_resolve_work, prpa_work);
1588 	search_rpa = &(rwork->rpa);
1589 
1590 	i = prpa_cache_find(search_rpa);
1591 
1592 	if (i == FILTER_IDX_NONE) {
1593 		/* No match - so not in known unknown list
1594 		 * Need to see if we can resolve
1595 		 */
1596 		j = prpa_cache_try_resolve(search_rpa);
1597 
1598 		if (j == FILTER_IDX_NONE) {
1599 			/* No match - thus cannot resolve, we have an unknown
1600 			 * so insert in known unkonown list
1601 			 */
1602 			prpa_cache_add(search_rpa);
1603 		} else {
1604 			/* Address could be resolved, so update current RPA
1605 			 * in list
1606 			 */
1607 			(void)memcpy(rl[j].curr_rpa.val, search_rpa->val,
1608 				     sizeof(bt_addr_t));
1609 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
1610 			conn_rpa_update(j);
1611 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
1612 		}
1613 
1614 	} else {
1615 		/* Found a known unknown - do nothing */
1616 		j = FILTER_IDX_NONE;
1617 	}
1618 
1619 	/* Kick the callback in LLL (using the mayfly, tailchain it)
1620 	 * Pass param FILTER_IDX_NONE if RPA can not be resolved,
1621 	 * or index in cache if it can be resolved
1622 	 */
1623 	if (rwork->cb) {
1624 		mfy.fp = rwork->cb;
1625 		mfy.param = (void *) ((unsigned int) j);
1626 		(void)mayfly_enqueue(TICKER_USER_ID_THREAD,
1627 				     TICKER_USER_ID_LLL, 1, &mfy);
1628 	}
1629 }
1630 
prpa_cache_clear(void)1631 static void prpa_cache_clear(void)
1632 {
1633 	/* Note the first element will not be in use before wrap around
1634 	 * is reached.
1635 	 * The first element in actual use will be at index 1.
1636 	 * There is no element waisted with this implementation, as
1637 	 * element 0 will eventually be allocated.
1638 	 */
1639 	newest_prpa = 0U;
1640 
1641 	for (uint8_t i = 0; i < CONFIG_BT_CTLR_RPA_CACHE_SIZE; i++) {
1642 		prpa_cache[i].taken = 0U;
1643 	}
1644 }
1645 
prpa_cache_add(bt_addr_t * rpa)1646 static void prpa_cache_add(bt_addr_t *rpa)
1647 {
1648 	newest_prpa = (newest_prpa + 1) % CONFIG_BT_CTLR_RPA_CACHE_SIZE;
1649 
1650 	(void)memcpy(prpa_cache[newest_prpa].rpa.val, rpa->val,
1651 		     sizeof(bt_addr_t));
1652 	prpa_cache[newest_prpa].taken = 1U;
1653 }
1654 
prpa_cache_find(bt_addr_t * rpa)1655 static uint8_t prpa_cache_find(bt_addr_t *rpa)
1656 {
1657 	for (uint8_t i = 0; i < CONFIG_BT_CTLR_RPA_CACHE_SIZE; i++) {
1658 		if (prpa_cache[i].taken &&
1659 		    bt_addr_eq(&(prpa_cache[i].rpa), rpa)) {
1660 			return i;
1661 		}
1662 	}
1663 	return FILTER_IDX_NONE;
1664 }
1665 
ull_filter_lll_prpa_cache_get(void)1666 const struct lll_prpa_cache *ull_filter_lll_prpa_cache_get(void)
1667 {
1668 	return prpa_cache;
1669 }
1670 
trpa_cache_clear(void)1671 static void trpa_cache_clear(void)
1672 {
1673 	/* Note the first element will not be in use before wrap around
1674 	 * is reached.
1675 	 * The first element in actual use will be at index 1.
1676 	 * There is no element waisted with this implementation, as
1677 	 * element 0 will eventually be allocated.
1678 	 */
1679 	newest_trpa = 0U;
1680 
1681 	for (uint8_t i = 0; i < CONFIG_BT_CTLR_TRPA_CACHE_SIZE; i++) {
1682 		trpa_cache[i].rl_idx = FILTER_IDX_NONE;
1683 	}
1684 }
1685 
trpa_cache_add(bt_addr_t * rpa,uint8_t rl_idx)1686 static void trpa_cache_add(bt_addr_t *rpa, uint8_t rl_idx)
1687 {
1688 	newest_trpa = (newest_trpa + 1) % CONFIG_BT_CTLR_TRPA_CACHE_SIZE;
1689 
1690 	(void)memcpy(trpa_cache[newest_trpa].rpa.val, rpa->val,
1691 		     sizeof(bt_addr_t));
1692 	trpa_cache[newest_trpa].rl_idx = rl_idx;
1693 }
1694 
trpa_cache_find(bt_addr_t * rpa,uint8_t rl_idx)1695 static uint8_t trpa_cache_find(bt_addr_t *rpa, uint8_t rl_idx)
1696 {
1697 	for (uint8_t i = 0; i < CONFIG_BT_CTLR_TRPA_CACHE_SIZE; i++) {
1698 		if (trpa_cache[i].rl_idx == rl_idx &&
1699 		    bt_addr_eq(&(trpa_cache[i].rpa), rpa)) {
1700 			return i;
1701 		}
1702 	}
1703 	return FILTER_IDX_NONE;
1704 }
1705 
ull_filter_lll_trpa_cache_get(void)1706 const struct lll_trpa_cache *ull_filter_lll_trpa_cache_get(void)
1707 {
1708 	return trpa_cache;
1709 }
1710 
1711 #endif /* !CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
1712