1 /*
2  * Copyright (c) 2018-2021 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stdint.h>
8 #include <stdbool.h>
9 #include <stddef.h>
10 
11 #include <zephyr/bluetooth/hci_types.h>
12 #include <zephyr/sys/byteorder.h>
13 
14 #include "hal/cpu.h"
15 #include "hal/ccm.h"
16 #include "hal/radio.h"
17 #include "hal/ticker.h"
18 
19 #include "util/util.h"
20 #include "util/mem.h"
21 #include "util/memq.h"
22 #include "util/mfifo.h"
23 #include "util/mayfly.h"
24 #include "util/dbuf.h"
25 
26 #include "ticker/ticker.h"
27 
28 #include "pdu_df.h"
29 #include "pdu_vendor.h"
30 #include "pdu.h"
31 
32 #include "lll.h"
33 #include "lll_vendor.h"
34 #include "lll_clock.h"
35 #include "lll_adv_types.h"
36 #include "lll_adv.h"
37 #include "lll_adv_pdu.h"
38 #include "lll_adv_aux.h"
39 #include "lll_adv_sync.h"
40 #include "lll_df_types.h"
41 #include "lll_conn.h"
42 #include "lll_chan.h"
43 #include "lll_filter.h"
44 
45 #include "lll_internal.h"
46 #include "lll_tim_internal.h"
47 #include "lll_adv_internal.h"
48 #include "lll_prof_internal.h"
49 #include "lll_df_internal.h"
50 
51 #include "hal/debug.h"
52 
53 #define PDU_FREE_TIMEOUT K_SECONDS(5)
54 
55 static int init_reset(void);
56 static void pdu_free_sem_give(void);
57 
58 #if defined(CONFIG_BT_CTLR_ADV_EXT_PDU_EXTRA_DATA_MEMORY)
59 static inline void adv_extra_data_release(struct lll_adv_pdu *pdu, int idx);
60 static void *adv_extra_data_allocate(struct lll_adv_pdu *pdu, uint8_t last);
61 static int adv_extra_data_free(struct lll_adv_pdu *pdu, uint8_t last);
62 static void extra_data_free_sem_give(void);
63 #endif /* CONFIG_BT_CTLR_ADV_EXT_PDU_EXTRA_DATA_MEMORY */
64 
65 static int prepare_cb(struct lll_prepare_param *p);
66 static int is_abort_cb(void *next, void *curr,
67 		       lll_prepare_cb_t *resume_cb);
68 static void abort_cb(struct lll_prepare_param *prepare_param, void *param);
69 static void isr_tx(void *param);
70 static void isr_rx(void *param);
71 static void isr_done(void *param);
72 static void isr_abort(void *param);
73 static struct pdu_adv *chan_prepare(struct lll_adv *lll);
74 
75 static inline int isr_rx_pdu(struct lll_adv *lll,
76 			     uint8_t devmatch_ok, uint8_t devmatch_id,
77 			     uint8_t irkmatch_ok, uint8_t irkmatch_id,
78 			     uint8_t rssi_ready);
79 static bool isr_rx_sr_adva_check(uint8_t tx_addr, uint8_t *addr,
80 				 struct pdu_adv *sr);
81 
82 
83 static inline bool isr_rx_ci_tgta_check(struct lll_adv *lll,
84 					uint8_t rx_addr, uint8_t *tgt_addr,
85 					struct pdu_adv *ci, uint8_t rl_idx);
86 static inline bool isr_rx_ci_adva_check(uint8_t tx_addr, uint8_t *addr,
87 					struct pdu_adv *ci);
88 
89 #if defined(CONFIG_BT_CTLR_ADV_EXT)
90 #define PAYLOAD_BASED_FRAG_COUNT \
91 		DIV_ROUND_UP(CONFIG_BT_CTLR_ADV_DATA_LEN_MAX, \
92 			     PDU_AC_PAYLOAD_SIZE_MAX)
93 #define PAYLOAD_FRAG_COUNT \
94 		MAX(PAYLOAD_BASED_FRAG_COUNT, BT_CTLR_DF_PER_ADV_CTE_NUM_MAX)
95 #define BT_CTLR_ADV_AUX_SET  CONFIG_BT_CTLR_ADV_AUX_SET
96 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
97 #define BT_CTLR_ADV_SYNC_SET CONFIG_BT_CTLR_ADV_SYNC_SET
98 #else /* !CONFIG_BT_CTLR_ADV_PERIODIC */
99 #define BT_CTLR_ADV_SYNC_SET 0
100 #endif /* !CONFIG_BT_CTLR_ADV_PERIODIC */
101 #else
102 #define PAYLOAD_BASED_FRAG_COUNT 1
103 #define PAYLOAD_FRAG_COUNT       (PAYLOAD_BASED_FRAG_COUNT)
104 #define BT_CTLR_ADV_AUX_SET  0
105 #define BT_CTLR_ADV_SYNC_SET 0
106 #endif
107 
108 #define PDU_MEM_SIZE       PDU_ADV_MEM_SIZE
109 
110 /* AD data and Scan Response Data need 2 PDU buffers each in the double buffer
111  * implementation. Allocate 3 PDU buffers plus CONFIG_BT_CTLR_ADV_DATA_BUF_MAX
112  * defined buffer count as the minimum number of buffers that meet the legacy
113  * advertising needs. Add 1 each for Extended and Periodic Advertising, needed
114  * extra for double buffers for these is kept as configurable, by increasing
115  * CONFIG_BT_CTLR_ADV_DATA_BUF_MAX.
116  */
117 #define PDU_MEM_COUNT_MIN  (((BT_CTLR_ADV_SET) * 3) + \
118 			    ((BT_CTLR_ADV_AUX_SET) * \
119 			     PAYLOAD_BASED_FRAG_COUNT))
120 
121 /* Maximum advertising PDU buffers to allocate, which is the sum of minimum
122  * plus configured additional count in CONFIG_BT_CTLR_ADV_DATA_BUF_MAX.
123  */
124 #if defined(CONFIG_BT_CTLR_ADV_EXT)
125 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
126 /* NOTE: When Periodic Advertising is supported then one chain of PDU buffers
127  *       plus the additional CONFIG_BT_CTLR_ADV_DATA_BUF_MAX amount of chain
128  *       buffers is allocated.
129  *       Set CONFIG_BT_CTLR_ADV_DATA_BUF_MAX to (BT_CTLR_ADV_AUX_SET +
130  *       BT_CTLR_ADV_SYNC_SET) if PDU data is updated more frequently compare to
131  *       the advertising interval with random delay included.
132  */
133 #define PDU_MEM_COUNT_MAX ((PDU_MEM_COUNT_MIN) + \
134 			   ((BT_CTLR_ADV_SYNC_SET) * \
135 			    PAYLOAD_FRAG_COUNT) + \
136 			   (CONFIG_BT_CTLR_ADV_DATA_BUF_MAX * \
137 			    PAYLOAD_FRAG_COUNT))
138 #else /* !CONFIG_BT_CTLR_ADV_PERIODIC */
139 /* NOTE: When Extended Advertising is supported but no Periodic Advertising
140  *       then additional CONFIG_BT_CTLR_ADV_DATA_BUF_MAX amount of chain buffers
141  *       is allocated.
142  *       Set CONFIG_BT_CTLR_ADV_DATA_BUF_MAX to BT_CTLR_ADV_AUX_SET if
143  *       PDU data is updated more frequently compare to the advertising
144  *       interval with random delay included.
145  */
146 #define PDU_MEM_COUNT_MAX (PDU_MEM_COUNT_MIN + \
147 			   (CONFIG_BT_CTLR_ADV_DATA_BUF_MAX * \
148 			    PAYLOAD_BASED_FRAG_COUNT))
149 #endif /* !CONFIG_BT_CTLR_ADV_PERIODIC */
150 #else /* !CONFIG_BT_CTLR_ADV_EXT */
151 /* NOTE: When Extended Advertising is not supported then
152  *       CONFIG_BT_CTLR_ADV_DATA_BUF_MAX is restricted to 1 in Kconfig file.
153  */
154 #define PDU_MEM_COUNT_MAX (PDU_MEM_COUNT_MIN + CONFIG_BT_CTLR_ADV_DATA_BUF_MAX)
155 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
156 
157 /* FIFO element count, that returns the consumed advertising PDUs (AD and Scan
158  * Response). 1 each for primary channel PDU (AD and Scan Response), plus one
159  * each for Extended Advertising and Periodic Advertising times the number of
160  * chained fragments that would get returned.
161  */
162 #define PDU_MEM_FIFO_COUNT ((BT_CTLR_ADV_SET) + 1 + \
163 			    ((BT_CTLR_ADV_AUX_SET) * \
164 			     PAYLOAD_BASED_FRAG_COUNT) + \
165 			    ((BT_CTLR_ADV_SYNC_SET) * \
166 			     PAYLOAD_FRAG_COUNT))
167 
168 #define PDU_POOL_SIZE      (PDU_MEM_SIZE * PDU_MEM_COUNT_MAX)
169 
170 /* Free AD data PDU buffer pool */
171 static struct {
172 	void *free;
173 	uint8_t pool[PDU_POOL_SIZE];
174 } mem_pdu;
175 
176 /* FIFO to return stale AD data PDU buffers from LLL to thread context */
177 static MFIFO_DEFINE(pdu_free, sizeof(void *), PDU_MEM_FIFO_COUNT);
178 
179 /* Semaphore to wakeup thread waiting for free AD data PDU buffers */
180 static struct k_sem sem_pdu_free;
181 
182 #if defined(CONFIG_BT_CTLR_ADV_EXT_PDU_EXTRA_DATA_MEMORY)
183 #if defined(CONFIG_BT_CTLR_DF_ADV_CTE_TX)
184 #define EXTRA_DATA_MEM_SIZE MROUND(sizeof(struct lll_df_adv_cfg))
185 #else
186 #define EXTRA_DATA_MEM_SIZE 0
187 #endif /* CONFIG_BT_CTLR_DF_ADV_CTE_TX */
188 
189 /* ToDo check if number of fragments is not smaller than number of CTE
190  * to be transmitted. Pay attention it would depend on the chain PDU storage
191  *
192  * Currently we can send only single CTE with AUX_SYNC_IND.
193  * Number is equal to allowed adv sync sets * 2 (double buffering).
194  */
195 #define EXTRA_DATA_MEM_COUNT (BT_CTLR_ADV_SYNC_SET * PAYLOAD_FRAG_COUNT + 1)
196 #define EXTRA_DATA_MEM_FIFO_COUNT (EXTRA_DATA_MEM_COUNT * 2)
197 #define EXTRA_DATA_POOL_SIZE (EXTRA_DATA_MEM_SIZE * EXTRA_DATA_MEM_COUNT * 2)
198 
199 /* Free extra data buffer pool */
200 static struct {
201 	void *free;
202 	uint8_t pool[EXTRA_DATA_POOL_SIZE];
203 } mem_extra_data;
204 
205 /* FIFO to return stale extra data buffers from LLL to thread context. */
206 static MFIFO_DEFINE(extra_data_free, sizeof(void *), EXTRA_DATA_MEM_FIFO_COUNT);
207 static struct k_sem sem_extra_data_free;
208 #endif /* CONFIG_BT_CTLR_ADV_EXT_PDU_EXTRA_DATA_MEMORY */
209 
lll_adv_init(void)210 int lll_adv_init(void)
211 {
212 	int err;
213 
214 #if defined(CONFIG_BT_CTLR_ADV_EXT)
215 #if (BT_CTLR_ADV_AUX_SET > 0)
216 	err = lll_adv_aux_init();
217 	if (err) {
218 		return err;
219 	}
220 #endif /* BT_CTLR_ADV_AUX_SET > 0 */
221 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
222 	err = lll_adv_sync_init();
223 	if (err) {
224 		return err;
225 	}
226 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
227 #endif /* CONFIG_BT_CTLR_ADV_EXT */
228 
229 	err = init_reset();
230 	if (err) {
231 		return err;
232 	}
233 
234 	return 0;
235 }
236 
lll_adv_reset(void)237 int lll_adv_reset(void)
238 {
239 	int err;
240 
241 #if defined(CONFIG_BT_CTLR_ADV_EXT)
242 #if (BT_CTLR_ADV_AUX_SET > 0)
243 	err = lll_adv_aux_reset();
244 	if (err) {
245 		return err;
246 	}
247 #endif /* BT_CTLR_ADV_AUX_SET > 0 */
248 #if defined(CONFIG_BT_CTLR_ADV_PERIODIC)
249 	err = lll_adv_sync_reset();
250 	if (err) {
251 		return err;
252 	}
253 #endif /* CONFIG_BT_CTLR_ADV_PERIODIC */
254 #endif /* CONFIG_BT_CTLR_ADV_EXT */
255 
256 	err = init_reset();
257 	if (err) {
258 		return err;
259 	}
260 
261 	return 0;
262 }
263 
lll_adv_data_init(struct lll_adv_pdu * pdu)264 int lll_adv_data_init(struct lll_adv_pdu *pdu)
265 {
266 	struct pdu_adv *p;
267 
268 	p = mem_acquire(&mem_pdu.free);
269 	if (!p) {
270 		return -ENOMEM;
271 	}
272 
273 #if defined(CONFIG_BT_CTLR_ADV_PDU_LINK)
274 	PDU_ADV_NEXT_PTR(p) = NULL;
275 #endif /* CONFIG_BT_CTLR_ADV_PDU_LINK */
276 
277 	p->len = 0U;
278 	pdu->pdu[0] = (void *)p;
279 
280 	return 0;
281 }
282 
lll_adv_data_reset(struct lll_adv_pdu * pdu)283 int lll_adv_data_reset(struct lll_adv_pdu *pdu)
284 {
285 	/* NOTE: this function is used on HCI reset to mem-zero the structure
286 	 *       members that otherwise was zero-ed by the architecture
287 	 *       startup code that zero-ed the .bss section.
288 	 *       pdu[0] element in the array is not initialized as subsequent
289 	 *       call to lll_adv_data_init will allocate a PDU buffer and
290 	 *       assign that.
291 	 */
292 
293 	pdu->first = 0U;
294 	pdu->last = 0U;
295 	pdu->pdu[1] = NULL;
296 
297 #if defined(CONFIG_BT_CTLR_ADV_EXT_PDU_EXTRA_DATA_MEMORY)
298 	/* Both slots are NULL because the extra_memory is allocated only
299 	 * on request. Not every advertising PDU includes extra_data.
300 	 */
301 	pdu->extra_data[0] = NULL;
302 	pdu->extra_data[1] = NULL;
303 #endif /* CONFIG_BT_CTLR_ADV_EXT_PDU_EXTRA_DATA_MEMORY */
304 	return 0;
305 }
306 
lll_adv_data_dequeue(struct lll_adv_pdu * pdu)307 int lll_adv_data_dequeue(struct lll_adv_pdu *pdu)
308 {
309 	uint8_t first;
310 	void *p;
311 
312 	first = pdu->first;
313 	if (first == pdu->last) {
314 		return -ENOMEM;
315 	}
316 
317 	p = pdu->pdu[first];
318 	pdu->pdu[first] = NULL;
319 	mem_release(p, &mem_pdu.free);
320 
321 	first++;
322 	if (first == DOUBLE_BUFFER_SIZE) {
323 		first = 0U;
324 	}
325 	pdu->first = first;
326 
327 	return 0;
328 }
329 
lll_adv_data_release(struct lll_adv_pdu * pdu)330 int lll_adv_data_release(struct lll_adv_pdu *pdu)
331 {
332 	uint8_t last;
333 	void *p;
334 
335 	last = pdu->last;
336 	p = pdu->pdu[last];
337 	if (p) {
338 		pdu->pdu[last] = NULL;
339 		mem_release(p, &mem_pdu.free);
340 	}
341 
342 	last++;
343 	if (last == DOUBLE_BUFFER_SIZE) {
344 		last = 0U;
345 	}
346 	p = pdu->pdu[last];
347 	if (p) {
348 		pdu->pdu[last] = NULL;
349 		mem_release(p, &mem_pdu.free);
350 	}
351 
352 	return 0;
353 }
354 
lll_adv_pdu_alloc(struct lll_adv_pdu * pdu,uint8_t * idx)355 struct pdu_adv *lll_adv_pdu_alloc(struct lll_adv_pdu *pdu, uint8_t *idx)
356 {
357 	uint8_t first, last;
358 	void *p;
359 
360 	/* TODO: Make this unique mechanism to update last element in double
361 	 *       buffer a reusable utility function.
362 	 */
363 	first = pdu->first;
364 	last = pdu->last;
365 	if (first == last) {
366 		/* Return the index of next free PDU in the double buffer */
367 		last++;
368 		if (last == DOUBLE_BUFFER_SIZE) {
369 			last = 0U;
370 		}
371 	} else {
372 		uint8_t first_latest;
373 
374 		/* LLL has not consumed the first PDU. Revert back the `last` so
375 		 * that LLL still consumes the first PDU while the caller of
376 		 * this function updates/modifies the latest PDU.
377 		 *
378 		 * Under race condition:
379 		 * 1. LLL runs before `pdu->last` is reverted, then `pdu->first`
380 		 *    has changed, hence restore `pdu->last` and return index of
381 		 *    next free PDU in the double buffer.
382 		 * 2. LLL runs after `pdu->last` is reverted, then `pdu->first`
383 		 *    will not change, return the saved `last` as the index of
384 		 *    the next free PDU in the double buffer.
385 		 */
386 		pdu->last = first;
387 		cpu_dmb();
388 		first_latest = pdu->first;
389 		if (first_latest != first) {
390 			pdu->last = last;
391 			last++;
392 			if (last == DOUBLE_BUFFER_SIZE) {
393 				last = 0U;
394 			}
395 		}
396 	}
397 
398 	*idx = last;
399 
400 	p = (void *)pdu->pdu[last];
401 	if (p) {
402 		return p;
403 	}
404 
405 	p = lll_adv_pdu_alloc_pdu_adv();
406 
407 	pdu->pdu[last] = (void *)p;
408 
409 	return p;
410 }
411 
lll_adv_pdu_alloc_pdu_adv(void)412 struct pdu_adv *lll_adv_pdu_alloc_pdu_adv(void)
413 {
414 	struct pdu_adv *p;
415 	int err;
416 
417 	p = MFIFO_DEQUEUE_PEEK(pdu_free);
418 	if (p) {
419 		k_sem_reset(&sem_pdu_free);
420 
421 		MFIFO_DEQUEUE(pdu_free);
422 
423 #if defined(CONFIG_BT_CTLR_ADV_PDU_LINK)
424 		PDU_ADV_NEXT_PTR(p) = NULL;
425 #endif
426 		return p;
427 	}
428 
429 	p = mem_acquire(&mem_pdu.free);
430 	if (p) {
431 #if defined(CONFIG_BT_CTLR_ADV_PDU_LINK)
432 		PDU_ADV_NEXT_PTR(p) = NULL;
433 #endif
434 		return p;
435 	}
436 
437 	err = k_sem_take(&sem_pdu_free, PDU_FREE_TIMEOUT);
438 	LL_ASSERT(!err);
439 
440 	k_sem_reset(&sem_pdu_free);
441 
442 	p = MFIFO_DEQUEUE(pdu_free);
443 	LL_ASSERT(p);
444 
445 #if defined(CONFIG_BT_CTLR_ADV_PDU_LINK)
446 	PDU_ADV_NEXT_PTR(p) = NULL;
447 #endif
448 	return p;
449 }
450 
451 #if defined(CONFIG_BT_CTLR_ADV_PDU_LINK)
lll_adv_pdu_linked_release_all(struct pdu_adv * pdu_first)452 void lll_adv_pdu_linked_release_all(struct pdu_adv *pdu_first)
453 {
454 	struct pdu_adv *pdu = pdu_first;
455 
456 	while (pdu) {
457 		struct pdu_adv *pdu_next;
458 
459 		pdu_next = PDU_ADV_NEXT_PTR(pdu);
460 		PDU_ADV_NEXT_PTR(pdu) = NULL;
461 		mem_release(pdu, &mem_pdu.free);
462 		pdu = pdu_next;
463 	}
464 }
465 #endif
466 
lll_adv_pdu_latest_get(struct lll_adv_pdu * pdu,uint8_t * is_modified)467 struct pdu_adv *lll_adv_pdu_latest_get(struct lll_adv_pdu *pdu,
468 				       uint8_t *is_modified)
469 {
470 	uint8_t first;
471 
472 	first = pdu->first;
473 	if (first != pdu->last) {
474 		uint8_t free_idx;
475 		uint8_t pdu_idx;
476 		void *p;
477 
478 		pdu_idx = first;
479 		p = pdu->pdu[pdu_idx];
480 
481 		do {
482 			void *next;
483 
484 			/* Store partial list in current data index if there is
485 			 * no free slot in mfifo. It can be released on next
486 			 * switch attempt (on next event).
487 			 */
488 			if (!MFIFO_ENQUEUE_IDX_GET(pdu_free, &free_idx)) {
489 				break;
490 			}
491 
492 #if defined(CONFIG_BT_CTLR_ADV_PDU_LINK)
493 			next = lll_adv_pdu_linked_next_get(p);
494 #else
495 			next = NULL;
496 #endif
497 
498 			MFIFO_BY_IDX_ENQUEUE(pdu_free, free_idx, p);
499 			pdu_free_sem_give();
500 
501 			p = next;
502 		} while (p);
503 
504 		/* If not all PDUs where released into mfifo, keep the list in
505 		 * current data index, to be released on the next switch
506 		 * attempt.
507 		 */
508 		pdu->pdu[pdu_idx] = p;
509 
510 		/* Progress to next data index */
511 		first += 1U;
512 		if (first == DOUBLE_BUFFER_SIZE) {
513 			first = 0U;
514 		}
515 		pdu->first = first;
516 		*is_modified = 1U;
517 	}
518 
519 	return (void *)pdu->pdu[first];
520 }
521 
522 #if defined(CONFIG_BT_CTLR_ADV_EXT_PDU_EXTRA_DATA_MEMORY)
lll_adv_and_extra_data_init(struct lll_adv_pdu * pdu)523 int lll_adv_and_extra_data_init(struct lll_adv_pdu *pdu)
524 {
525 	struct pdu_adv *p;
526 	void *extra_data;
527 
528 	p = mem_acquire(&mem_pdu.free);
529 	if (!p) {
530 		return -ENOMEM;
531 	}
532 
533 #if defined(CONFIG_BT_CTLR_ADV_PDU_LINK)
534 	PDU_ADV_NEXT_PTR(p) = NULL;
535 #endif /* CONFIG_BT_CTLR_ADV_PDU_LINK */
536 
537 	pdu->pdu[0] = (void *)p;
538 
539 	extra_data = mem_acquire(&mem_extra_data.free);
540 	if (!extra_data) {
541 		return -ENOMEM;
542 	}
543 
544 	pdu->extra_data[0] = extra_data;
545 
546 	return 0;
547 }
548 
lll_adv_and_extra_data_release(struct lll_adv_pdu * pdu)549 int lll_adv_and_extra_data_release(struct lll_adv_pdu *pdu)
550 {
551 	uint8_t last;
552 	void *p;
553 
554 	last = pdu->last;
555 	p = pdu->pdu[last];
556 	pdu->pdu[last] = NULL;
557 	mem_release(p, &mem_pdu.free);
558 
559 	adv_extra_data_release(pdu, last);
560 
561 	last++;
562 	if (last == DOUBLE_BUFFER_SIZE) {
563 		last = 0U;
564 	}
565 	p = pdu->pdu[last];
566 	if (p) {
567 		pdu->pdu[last] = NULL;
568 		mem_release(p, &mem_pdu.free);
569 	}
570 
571 	adv_extra_data_release(pdu, last);
572 
573 	return 0;
574 }
575 
lll_adv_pdu_and_extra_data_alloc(struct lll_adv_pdu * pdu,void ** extra_data,uint8_t * idx)576 struct pdu_adv *lll_adv_pdu_and_extra_data_alloc(struct lll_adv_pdu *pdu,
577 						 void **extra_data,
578 						 uint8_t *idx)
579 {
580 	struct pdu_adv *p;
581 	p = lll_adv_pdu_alloc(pdu, idx);
582 
583 	if (extra_data) {
584 		*extra_data = adv_extra_data_allocate(pdu, *idx);
585 	} else {
586 		if (adv_extra_data_free(pdu, *idx)) {
587 			/* There is no release of memory allocated by
588 			 * adv_pdu_allocate because there is no memory leak.
589 			 * If caller can recover from this error and subsequent
590 			 * call to this function occurs, no new memory will be
591 			 * allocated. adv_pdu_allocate will return already
592 			 * allocated memory.
593 			 */
594 			return NULL;
595 		}
596 	}
597 
598 	return p;
599 }
600 
lll_adv_pdu_and_extra_data_latest_get(struct lll_adv_pdu * pdu,void ** extra_data,uint8_t * is_modified)601 struct pdu_adv *lll_adv_pdu_and_extra_data_latest_get(struct lll_adv_pdu *pdu,
602 						      void **extra_data,
603 						      uint8_t *is_modified)
604 {
605 	uint8_t first;
606 
607 	first = pdu->first;
608 	if (first != pdu->last) {
609 		uint8_t pdu_free_idx;
610 		uint8_t ed_free_idx;
611 		void *ed;
612 		uint8_t pdu_idx;
613 		void *p;
614 
615 		pdu_idx = first;
616 		p = pdu->pdu[pdu_idx];
617 		ed = pdu->extra_data[pdu_idx];
618 
619 		do {
620 			void *next;
621 
622 			/* Store partial list in current data index if there is
623 			 * no free slot in mfifo. It can be released on next
624 			 * switch attempt (on next event).
625 			 */
626 			if (!MFIFO_ENQUEUE_IDX_GET(pdu_free, &pdu_free_idx)) {
627 				pdu->pdu[pdu_idx] = p;
628 				return NULL;
629 			}
630 
631 #if defined(CONFIG_BT_CTLR_ADV_PDU_LINK)
632 			next = lll_adv_pdu_linked_next_get(p);
633 #else
634 			next = NULL;
635 #endif
636 
637 			MFIFO_BY_IDX_ENQUEUE(pdu_free, pdu_free_idx, p);
638 			pdu_free_sem_give();
639 
640 			p = next;
641 		} while (p);
642 
643 		pdu->pdu[pdu_idx] = NULL;
644 
645 		if (ed && (!MFIFO_ENQUEUE_IDX_GET(extra_data_free,
646 						  &ed_free_idx))) {
647 			/* No pdu_free_idx clean up is required, sobsequent
648 			 * calls to MFIFO_ENQUEUE_IDX_GET return the same
649 			 * index to memory that is in limbo state.
650 			 */
651 			return NULL;
652 		}
653 
654 		first += 1U;
655 		if (first == DOUBLE_BUFFER_SIZE) {
656 			first = 0U;
657 		}
658 		pdu->first = first;
659 		*is_modified = 1U;
660 
661 		pdu->pdu[pdu_idx] = NULL;
662 
663 		if (ed) {
664 			pdu->extra_data[pdu_idx] = NULL;
665 
666 			MFIFO_BY_IDX_ENQUEUE(extra_data_free, ed_free_idx, ed);
667 			extra_data_free_sem_give();
668 		}
669 	}
670 
671 	if (extra_data) {
672 		*extra_data = pdu->extra_data[first];
673 	}
674 
675 	return (void *)pdu->pdu[first];
676 }
677 #endif /* CONFIG_BT_CTLR_ADV_EXT_PDU_EXTRA_DATA_MEMORY */
678 
lll_adv_prepare(void * param)679 void lll_adv_prepare(void *param)
680 {
681 	int err;
682 
683 	err = lll_hfclock_on();
684 	LL_ASSERT(err >= 0);
685 
686 	err = lll_prepare(is_abort_cb, abort_cb, prepare_cb, 0, param);
687 	LL_ASSERT(!err || err == -EINPROGRESS);
688 }
689 
lll_adv_scan_req_check(struct lll_adv * lll,struct pdu_adv * sr,uint8_t tx_addr,uint8_t * addr,uint8_t devmatch_ok,uint8_t * rl_idx)690 bool lll_adv_scan_req_check(struct lll_adv *lll, struct pdu_adv *sr,
691 			    uint8_t tx_addr, uint8_t *addr,
692 			    uint8_t devmatch_ok, uint8_t *rl_idx)
693 {
694 #if defined(CONFIG_BT_CTLR_PRIVACY)
695 	return ((((lll->filter_policy & BT_LE_ADV_FP_FILTER_SCAN_REQ) == 0) &&
696 		 ull_filter_lll_rl_addr_allowed(sr->tx_addr,
697 						sr->scan_req.scan_addr,
698 						rl_idx)) ||
699 		(((lll->filter_policy & BT_LE_ADV_FP_FILTER_SCAN_REQ) != 0) &&
700 		 (devmatch_ok || ull_filter_lll_irk_in_fal(*rl_idx)))) &&
701 		isr_rx_sr_adva_check(tx_addr, addr, sr);
702 #else
703 	return (((lll->filter_policy & BT_LE_ADV_FP_FILTER_SCAN_REQ) == 0U) ||
704 		 devmatch_ok) &&
705 		isr_rx_sr_adva_check(tx_addr, addr, sr);
706 #endif /* CONFIG_BT_CTLR_PRIVACY */
707 }
708 
709 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
lll_adv_scan_req_report(struct lll_adv * lll,struct pdu_adv * pdu_adv_rx,uint8_t rl_idx,uint8_t rssi_ready)710 int lll_adv_scan_req_report(struct lll_adv *lll, struct pdu_adv *pdu_adv_rx,
711 			    uint8_t rl_idx, uint8_t rssi_ready)
712 {
713 	struct node_rx_pdu *node_rx;
714 
715 	node_rx = ull_pdu_rx_alloc_peek(3);
716 	if (!node_rx) {
717 		return -ENOBUFS;
718 	}
719 	ull_pdu_rx_alloc();
720 
721 	/* Prepare the report (scan req) */
722 	node_rx->hdr.type = NODE_RX_TYPE_SCAN_REQ;
723 	node_rx->hdr.handle = ull_adv_lll_handle_get(lll);
724 
725 	node_rx->rx_ftr.rssi = (rssi_ready) ? radio_rssi_get() :
726 						  BT_HCI_LE_RSSI_NOT_AVAILABLE;
727 #if defined(CONFIG_BT_CTLR_PRIVACY)
728 	node_rx->rx_ftr.rl_idx = rl_idx;
729 #endif
730 
731 	ull_rx_put_sched(node_rx->hdr.link, node_rx);
732 
733 	return 0;
734 }
735 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
736 
lll_adv_connect_ind_check(struct lll_adv * lll,struct pdu_adv * ci,uint8_t tx_addr,uint8_t * addr,uint8_t rx_addr,uint8_t * tgt_addr,uint8_t devmatch_ok,uint8_t * rl_idx)737 bool lll_adv_connect_ind_check(struct lll_adv *lll, struct pdu_adv *ci,
738 			       uint8_t tx_addr, uint8_t *addr,
739 			       uint8_t rx_addr, uint8_t *tgt_addr,
740 			       uint8_t devmatch_ok, uint8_t *rl_idx)
741 {
742 	/* LL 4.3.2: filter policy shall be ignored for directed adv */
743 	if (tgt_addr) {
744 #if defined(CONFIG_BT_CTLR_PRIVACY)
745 		return ull_filter_lll_rl_addr_allowed(ci->tx_addr,
746 						      ci->connect_ind.init_addr,
747 						      rl_idx) &&
748 #else
749 		return (1) &&
750 #endif
751 		       isr_rx_ci_adva_check(tx_addr, addr, ci) &&
752 		       isr_rx_ci_tgta_check(lll, rx_addr, tgt_addr, ci,
753 					    *rl_idx);
754 	}
755 
756 #if defined(CONFIG_BT_CTLR_PRIVACY)
757 	return ((((lll->filter_policy & BT_LE_ADV_FP_FILTER_CONN_IND) == 0) &&
758 		 ull_filter_lll_rl_addr_allowed(ci->tx_addr,
759 						ci->connect_ind.init_addr,
760 						rl_idx)) ||
761 		(((lll->filter_policy & BT_LE_ADV_FP_FILTER_CONN_IND) != 0) &&
762 		 (devmatch_ok || ull_filter_lll_irk_in_fal(*rl_idx)))) &&
763 	       isr_rx_ci_adva_check(tx_addr, addr, ci);
764 #else
765 	return (((lll->filter_policy & BT_LE_ADV_FP_FILTER_CONN_IND) == 0) ||
766 		(devmatch_ok)) &&
767 	       isr_rx_ci_adva_check(tx_addr, addr, ci);
768 #endif /* CONFIG_BT_CTLR_PRIVACY */
769 }
770 
771 /* Helper function to initialize data variable both at power up and on
772  * HCI reset.
773  */
init_reset(void)774 static int init_reset(void)
775 {
776 	/* Initialize AC PDU pool */
777 	mem_init(mem_pdu.pool, PDU_MEM_SIZE,
778 		 (sizeof(mem_pdu.pool) / PDU_MEM_SIZE), &mem_pdu.free);
779 
780 	/* Initialize AC PDU free buffer return queue */
781 	MFIFO_INIT(pdu_free);
782 
783 #if defined(CONFIG_BT_CTLR_ADV_EXT_PDU_EXTRA_DATA_MEMORY)
784 	/* Initialize extra data pool */
785 	mem_init(mem_extra_data.pool, EXTRA_DATA_MEM_SIZE,
786 		 (sizeof(mem_extra_data.pool) / EXTRA_DATA_MEM_SIZE), &mem_extra_data.free);
787 
788 	/* Initialize extra data free buffer return queue */
789 	MFIFO_INIT(extra_data_free);
790 
791 	k_sem_init(&sem_extra_data_free, 0, EXTRA_DATA_MEM_FIFO_COUNT);
792 #endif /* CONFIG_BT_CTLR_ADV_EXT_PDU_EXTRA_DATA_MEMORY */
793 
794 	/* Initialize semaphore for ticker API blocking wait */
795 	k_sem_init(&sem_pdu_free, 0, PDU_MEM_FIFO_COUNT);
796 
797 	return 0;
798 }
799 
800 #if defined(CONFIG_BT_CTLR_ZLI)
mfy_pdu_free_sem_give(void * param)801 static void mfy_pdu_free_sem_give(void *param)
802 {
803 	ARG_UNUSED(param);
804 
805 	k_sem_give(&sem_pdu_free);
806 }
807 
pdu_free_sem_give(void)808 static void pdu_free_sem_give(void)
809 {
810 	static memq_link_t link;
811 	static struct mayfly mfy = {0, 0, &link, NULL, mfy_pdu_free_sem_give};
812 
813 	/* Ignore mayfly_enqueue failure on repeated enqueue call */
814 	(void)mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 0,
815 			     &mfy);
816 }
817 
818 #else /* !CONFIG_BT_CTLR_ZLI */
pdu_free_sem_give(void)819 static void pdu_free_sem_give(void)
820 {
821 	k_sem_give(&sem_pdu_free);
822 }
823 #endif /* !CONFIG_BT_CTLR_ZLI */
824 
825 #if defined(CONFIG_BT_CTLR_ADV_EXT_PDU_EXTRA_DATA_MEMORY)
adv_extra_data_allocate(struct lll_adv_pdu * pdu,uint8_t last)826 static void *adv_extra_data_allocate(struct lll_adv_pdu *pdu, uint8_t last)
827 {
828 	void *extra_data;
829 	int err;
830 
831 	extra_data = pdu->extra_data[last];
832 	if (extra_data) {
833 		return extra_data;
834 	}
835 
836 	extra_data = MFIFO_DEQUEUE_PEEK(extra_data_free);
837 	if (extra_data) {
838 		err = k_sem_take(&sem_extra_data_free, K_NO_WAIT);
839 		LL_ASSERT(!err);
840 
841 		MFIFO_DEQUEUE(extra_data_free);
842 		pdu->extra_data[last] = extra_data;
843 
844 		return extra_data;
845 	}
846 
847 	extra_data = mem_acquire(&mem_extra_data.free);
848 	if (extra_data) {
849 		pdu->extra_data[last] = extra_data;
850 
851 		return extra_data;
852 	}
853 
854 	err = k_sem_take(&sem_extra_data_free, PDU_FREE_TIMEOUT);
855 	LL_ASSERT(!err);
856 
857 	extra_data = MFIFO_DEQUEUE(extra_data_free);
858 	LL_ASSERT(extra_data);
859 
860 	pdu->extra_data[last] = (void *)extra_data;
861 
862 	return extra_data;
863 }
864 
adv_extra_data_free(struct lll_adv_pdu * pdu,uint8_t last)865 static int adv_extra_data_free(struct lll_adv_pdu *pdu, uint8_t last)
866 {
867 	uint8_t ed_free_idx;
868 	void *ed;
869 
870 	ed = pdu->extra_data[last];
871 
872 	if (ed) {
873 		if (!MFIFO_ENQUEUE_IDX_GET(extra_data_free, &ed_free_idx)) {
874 			/* ToDo what if enqueue fails and assert does not fire?
875 			 * pdu_free_idx should be released before return.
876 			 */
877 			return -ENOMEM;
878 		}
879 		pdu->extra_data[last] = NULL;
880 
881 		MFIFO_BY_IDX_ENQUEUE(extra_data_free, ed_free_idx, ed);
882 		extra_data_free_sem_give();
883 	}
884 
885 	return 0;
886 }
887 
adv_extra_data_release(struct lll_adv_pdu * pdu,int idx)888 static inline void adv_extra_data_release(struct lll_adv_pdu *pdu, int idx)
889 {
890 	void *extra_data;
891 
892 	extra_data = pdu->extra_data[idx];
893 	if (extra_data) {
894 		pdu->extra_data[idx] = NULL;
895 		mem_release(extra_data, &mem_extra_data.free);
896 	}
897 }
898 
899 #if defined(CONFIG_BT_CTLR_ZLI)
mfy_extra_data_free_sem_give(void * param)900 static void mfy_extra_data_free_sem_give(void *param)
901 {
902 	ARG_UNUSED(param);
903 
904 	k_sem_give(&sem_extra_data_free);
905 }
906 
extra_data_free_sem_give(void)907 static void extra_data_free_sem_give(void)
908 {
909 	static memq_link_t link;
910 	static struct mayfly mfy = {0, 0, &link, NULL,
911 				    mfy_extra_data_free_sem_give};
912 	uint32_t retval;
913 
914 	retval = mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_HIGH, 0,
915 				&mfy);
916 	LL_ASSERT(!retval);
917 }
918 
919 #else /* !CONFIG_BT_CTLR_ZLI */
extra_data_free_sem_give(void)920 static void extra_data_free_sem_give(void)
921 {
922 	k_sem_give(&sem_extra_data_free);
923 }
924 #endif /* !CONFIG_BT_CTLR_ZLI */
925 #endif /* CONFIG_BT_CTLR_ADV_EXT_PDU_EXTRA_DATA_MEMORY */
926 
prepare_cb(struct lll_prepare_param * p)927 static int prepare_cb(struct lll_prepare_param *p)
928 {
929 	uint32_t ticks_at_event;
930 	uint32_t ticks_at_start;
931 	struct pdu_adv *pdu;
932 	struct ull_hdr *ull;
933 	struct lll_adv *lll;
934 	uint32_t remainder;
935 	uint32_t start_us;
936 	uint32_t ret;
937 	uint32_t aa;
938 
939 	DEBUG_RADIO_START_A(1);
940 
941 	lll = p->param;
942 
943 #if defined(CONFIG_BT_PERIPHERAL)
944 	/* Check if stopped (on connection establishment- or disabled race
945 	 * between LLL and ULL.
946 	 * When connectable advertising is disabled in thread context, cancelled
947 	 * flag is set, and initiated flag is checked. Here, we avoid
948 	 * transmitting connectable advertising event if cancelled flag is set.
949 	 */
950 	if (unlikely(lll->conn &&
951 		(lll->conn->periph.initiated || lll->conn->periph.cancelled))) {
952 		radio_isr_set(lll_isr_early_abort, lll);
953 		radio_disable();
954 
955 		return 0;
956 	}
957 #endif /* CONFIG_BT_PERIPHERAL */
958 
959 	radio_reset();
960 
961 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
962 	radio_tx_power_set(lll->tx_pwr_lvl);
963 #else
964 	radio_tx_power_set(RADIO_TXP_DEFAULT);
965 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
966 
967 #if defined(CONFIG_BT_CTLR_ADV_EXT)
968 	/* TODO: if coded we use S8? */
969 	radio_phy_set(lll->phy_p, lll->phy_flags);
970 	radio_pkt_configure(RADIO_PKT_CONF_LENGTH_8BIT, PDU_AC_LEG_PAYLOAD_SIZE_MAX,
971 			    RADIO_PKT_CONF_PHY(lll->phy_p));
972 #else /* !CONFIG_BT_CTLR_ADV_EXT */
973 	radio_phy_set(0, 0);
974 	radio_pkt_configure(RADIO_PKT_CONF_LENGTH_8BIT, PDU_AC_LEG_PAYLOAD_SIZE_MAX,
975 			    RADIO_PKT_CONF_PHY(RADIO_PKT_CONF_PHY_LEGACY));
976 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
977 
978 	aa = sys_cpu_to_le32(PDU_AC_ACCESS_ADDR);
979 	radio_aa_set((uint8_t *)&aa);
980 	radio_crc_configure(PDU_CRC_POLYNOMIAL,
981 					PDU_AC_CRC_IV);
982 
983 	lll->chan_map_curr = lll->chan_map;
984 
985 	pdu = chan_prepare(lll);
986 
987 #if defined(CONFIG_BT_HCI_MESH_EXT)
988 	_radio.mesh_adv_end_us = 0;
989 #endif /* CONFIG_BT_HCI_MESH_EXT */
990 
991 
992 #if defined(CONFIG_BT_CTLR_PRIVACY)
993 	if (ull_filter_lll_rl_enabled()) {
994 		struct lll_filter *filter =
995 			ull_filter_lll_get(!!(lll->filter_policy));
996 
997 		radio_filter_configure(filter->enable_bitmask,
998 				       filter->addr_type_bitmask,
999 				       (uint8_t *)filter->bdaddr);
1000 	} else
1001 #endif /* CONFIG_BT_CTLR_PRIVACY */
1002 
1003 	if (IS_ENABLED(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST) && lll->filter_policy) {
1004 		/* Setup Radio Filter */
1005 		struct lll_filter *fal = ull_filter_lll_get(true);
1006 
1007 		radio_filter_configure(fal->enable_bitmask,
1008 				       fal->addr_type_bitmask,
1009 				       (uint8_t *)fal->bdaddr);
1010 	}
1011 
1012 	ticks_at_event = p->ticks_at_expire;
1013 	ull = HDR_LLL2ULL(lll);
1014 	ticks_at_event += lll_event_offset_get(ull);
1015 
1016 	ticks_at_start = ticks_at_event;
1017 	ticks_at_start += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
1018 
1019 	remainder = p->remainder;
1020 	start_us = radio_tmr_start(1, ticks_at_start, remainder);
1021 
1022 	/* capture end of Tx-ed PDU, used to calculate HCTO. */
1023 	radio_tmr_end_capture();
1024 
1025 #if defined(HAL_RADIO_GPIO_HAVE_PA_PIN)
1026 	radio_gpio_pa_setup();
1027 	radio_gpio_pa_lna_enable(start_us + radio_tx_ready_delay_get(0, 0) -
1028 				 HAL_RADIO_GPIO_PA_OFFSET);
1029 #else /* !HAL_RADIO_GPIO_HAVE_PA_PIN */
1030 	ARG_UNUSED(start_us);
1031 #endif /* !HAL_RADIO_GPIO_HAVE_PA_PIN */
1032 
1033 #if defined(CONFIG_BT_CTLR_XTAL_ADVANCED) && \
1034 	(EVENT_OVERHEAD_PREEMPT_US <= EVENT_OVERHEAD_PREEMPT_MIN_US)
1035 	uint32_t overhead;
1036 
1037 	overhead = lll_preempt_calc(ull, (TICKER_ID_ADV_BASE + ull_adv_lll_handle_get(lll)),
1038 				    ticks_at_event);
1039 	/* check if preempt to start has changed */
1040 	if (overhead) {
1041 		LL_ASSERT_OVERHEAD(overhead);
1042 
1043 		radio_isr_set(isr_abort, lll);
1044 		radio_disable();
1045 
1046 		return -ECANCELED;
1047 	}
1048 #endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
1049 
1050 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1051 	if (lll->aux) {
1052 		/* fill in aux ptr in pdu */
1053 		ull_adv_aux_lll_auxptr_fill(pdu, lll);
1054 
1055 		/* NOTE: as first primary channel PDU does not use remainder, the packet
1056 		 * timer is started one tick in advance to start the radio with
1057 		 * microsecond precision, hence compensate for the higher start_us value
1058 		 * captured at radio start of the first primary channel PDU.
1059 		 */
1060 		lll->aux->ticks_pri_pdu_offset += 1U;
1061 	}
1062 #endif
1063 
1064 	ret = lll_prepare_done(lll);
1065 	LL_ASSERT(!ret);
1066 
1067 	DEBUG_RADIO_START_A(1);
1068 
1069 	return 0;
1070 }
1071 
1072 #if defined(CONFIG_BT_PERIPHERAL)
resume_prepare_cb(struct lll_prepare_param * p)1073 static int resume_prepare_cb(struct lll_prepare_param *p)
1074 {
1075 	struct ull_hdr *ull;
1076 
1077 	ull = HDR_LLL2ULL(p->param);
1078 	p->ticks_at_expire = ticker_ticks_now_get() - lll_event_offset_get(ull);
1079 	p->remainder = 0;
1080 	p->lazy = 0;
1081 
1082 	return prepare_cb(p);
1083 }
1084 #endif /* CONFIG_BT_PERIPHERAL */
1085 
is_abort_cb(void * next,void * curr,lll_prepare_cb_t * resume_cb)1086 static int is_abort_cb(void *next, void *curr, lll_prepare_cb_t *resume_cb)
1087 {
1088 #if defined(CONFIG_BT_PERIPHERAL)
1089 	struct lll_adv *lll = curr;
1090 	struct pdu_adv *pdu;
1091 #endif /* CONFIG_BT_PERIPHERAL */
1092 
1093 	/* TODO: prio check */
1094 	if (next != curr) {
1095 		if (0) {
1096 #if defined(CONFIG_BT_PERIPHERAL)
1097 		} else if (lll->is_hdcd) {
1098 			int err;
1099 
1100 			/* wrap back after the pre-empter */
1101 			*resume_cb = resume_prepare_cb;
1102 
1103 			/* Retain HF clk */
1104 			err = lll_hfclock_on();
1105 			LL_ASSERT(err >= 0);
1106 
1107 			return -EAGAIN;
1108 #endif /* CONFIG_BT_PERIPHERAL */
1109 		} else {
1110 			return -ECANCELED;
1111 		}
1112 	}
1113 
1114 #if defined(CONFIG_BT_PERIPHERAL)
1115 	pdu = lll_adv_data_curr_get(lll);
1116 	if (pdu->type == PDU_ADV_TYPE_DIRECT_IND) {
1117 		return 0;
1118 	}
1119 #endif /* CONFIG_BT_PERIPHERAL */
1120 
1121 	return -ECANCELED;
1122 }
1123 
abort_cb(struct lll_prepare_param * prepare_param,void * param)1124 static void abort_cb(struct lll_prepare_param *prepare_param, void *param)
1125 {
1126 	int err;
1127 
1128 	/* NOTE: This is not a prepare being cancelled */
1129 	if (!prepare_param) {
1130 		/* Perform event abort here.
1131 		 * After event has been cleanly aborted, clean up resources
1132 		 * and dispatch event done.
1133 		 */
1134 		radio_isr_set(isr_abort, param);
1135 		radio_disable();
1136 		return;
1137 	}
1138 
1139 	/* NOTE: Else clean the top half preparations of the aborted event
1140 	 * currently in preparation pipeline.
1141 	 */
1142 	err = lll_hfclock_off();
1143 	LL_ASSERT(err >= 0);
1144 
1145 	lll_done(param);
1146 }
1147 
isr_tx(void * param)1148 static void isr_tx(void *param)
1149 {
1150 	struct node_rx_pdu *node_rx_prof;
1151 	struct node_rx_pdu *node_rx;
1152 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1153 	struct lll_adv *lll = param;
1154 	uint8_t phy_p = lll->phy_p;
1155 	uint8_t phy_flags = lll->phy_flags;
1156 #else
1157 	const uint8_t phy_p = 0U;
1158 	const uint8_t phy_flags = 0U;
1159 #endif
1160 	uint32_t hcto;
1161 
1162 	if (IS_ENABLED(CONFIG_BT_CTLR_PROFILE_ISR)) {
1163 		lll_prof_latency_capture();
1164 		node_rx_prof = lll_prof_reserve();
1165 	}
1166 
1167 	/* Clear radio tx status and events */
1168 	lll_isr_tx_status_reset();
1169 
1170 	/* setup tIFS switching */
1171 	radio_tmr_tifs_set(EVENT_IFS_US);
1172 	radio_switch_complete_and_tx(phy_p, 0, phy_p, phy_flags);
1173 
1174 	/* setup Rx buffer */
1175 	node_rx = ull_pdu_rx_alloc_peek(1);
1176 	LL_ASSERT(node_rx);
1177 	radio_pkt_rx_set(node_rx->pdu);
1178 
1179 	/* assert if radio packet ptr is not set and radio started rx */
1180 	LL_ASSERT(!radio_is_ready());
1181 
1182 	if (IS_ENABLED(CONFIG_BT_CTLR_PROFILE_ISR)) {
1183 		lll_prof_cputime_capture();
1184 	}
1185 
1186 	radio_isr_set(isr_rx, param);
1187 
1188 #if defined(CONFIG_BT_CTLR_PRIVACY)
1189 	if (ull_filter_lll_rl_enabled()) {
1190 		uint8_t count, *irks = ull_filter_lll_irks_get(&count);
1191 
1192 		radio_ar_configure(count, irks, 0);
1193 	}
1194 #endif /* CONFIG_BT_CTLR_PRIVACY */
1195 
1196 	/* +/- 2us active clock jitter, +1 us PPI to timer start compensation */
1197 	hcto = radio_tmr_tifs_base_get() + EVENT_IFS_US +
1198 	       (EVENT_CLOCK_JITTER_US << 1) + RANGE_DELAY_US +
1199 	       HAL_RADIO_TMR_START_DELAY_US;
1200 	hcto += radio_rx_chain_delay_get(phy_p, 0);
1201 	hcto += addr_us_get(phy_p);
1202 	hcto -= radio_tx_chain_delay_get(phy_p, 0);
1203 	radio_tmr_hcto_configure(hcto);
1204 
1205 	/* capture end of CONNECT_IND PDU, used for calculating first
1206 	 * peripheral event.
1207 	 */
1208 	radio_tmr_end_capture();
1209 
1210 	if (IS_ENABLED(CONFIG_BT_CTLR_SCAN_REQ_RSSI) ||
1211 	    IS_ENABLED(CONFIG_BT_CTLR_CONN_RSSI)) {
1212 		radio_rssi_measure();
1213 	}
1214 
1215 #if defined(HAL_RADIO_GPIO_HAVE_LNA_PIN)
1216 	if (IS_ENABLED(CONFIG_BT_CTLR_PROFILE_ISR)) {
1217 		/* PA/LNA enable is overwriting packet end used in ISR
1218 		 * profiling, hence back it up for later use.
1219 		 */
1220 		lll_prof_radio_end_backup();
1221 	}
1222 
1223 	radio_gpio_lna_setup();
1224 	radio_gpio_pa_lna_enable(radio_tmr_tifs_base_get() + EVENT_IFS_US - 4 -
1225 				 radio_tx_chain_delay_get(phy_p, 0) -
1226 				 HAL_RADIO_GPIO_LNA_OFFSET);
1227 #endif /* HAL_RADIO_GPIO_HAVE_LNA_PIN */
1228 
1229 	if (IS_ENABLED(CONFIG_BT_CTLR_PROFILE_ISR)) {
1230 		/* NOTE: as scratch packet is used to receive, it is safe to
1231 		 * generate profile event using rx nodes.
1232 		 */
1233 		lll_prof_reserve_send(node_rx_prof);
1234 	}
1235 }
1236 
isr_rx(void * param)1237 static void isr_rx(void *param)
1238 {
1239 	uint8_t devmatch_ok;
1240 	uint8_t devmatch_id;
1241 	uint8_t irkmatch_ok;
1242 	uint8_t irkmatch_id;
1243 	uint8_t rssi_ready;
1244 	uint8_t trx_done;
1245 	uint8_t crc_ok;
1246 
1247 	if (IS_ENABLED(CONFIG_BT_CTLR_PROFILE_ISR)) {
1248 		lll_prof_latency_capture();
1249 	}
1250 
1251 	/* Read radio status and events */
1252 	trx_done = radio_is_done();
1253 	if (trx_done) {
1254 		crc_ok = radio_crc_is_valid();
1255 		devmatch_ok = radio_filter_has_match();
1256 		devmatch_id = radio_filter_match_get();
1257 		if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY)) {
1258 			irkmatch_ok = radio_ar_has_match();
1259 			irkmatch_id = radio_ar_match_get();
1260 		} else {
1261 			irkmatch_ok = 0U;
1262 			irkmatch_id = FILTER_IDX_NONE;
1263 		}
1264 		rssi_ready = radio_rssi_is_ready();
1265 	} else {
1266 		crc_ok = devmatch_ok = irkmatch_ok = rssi_ready = 0U;
1267 		devmatch_id = irkmatch_id = FILTER_IDX_NONE;
1268 	}
1269 
1270 	/* Clear radio status and events */
1271 	lll_isr_status_reset();
1272 
1273 	/* No Rx */
1274 	if (!trx_done) {
1275 		goto isr_rx_do_close;
1276 	}
1277 
1278 	if (crc_ok) {
1279 		int err;
1280 
1281 		err = isr_rx_pdu(param, devmatch_ok, devmatch_id, irkmatch_ok,
1282 				 irkmatch_id, rssi_ready);
1283 		if (!err) {
1284 			if (IS_ENABLED(CONFIG_BT_CTLR_PROFILE_ISR)) {
1285 				lll_prof_send();
1286 			}
1287 
1288 			return;
1289 		}
1290 	}
1291 
1292 	if (IS_ENABLED(CONFIG_BT_CTLR_PROFILE_ISR)) {
1293 		lll_prof_cputime_capture();
1294 		lll_prof_send();
1295 	}
1296 
1297 isr_rx_do_close:
1298 	radio_isr_set(isr_done, param);
1299 	radio_disable();
1300 }
1301 
isr_done(void * param)1302 static void isr_done(void *param)
1303 {
1304 	struct lll_adv *lll;
1305 
1306 	/* Clear radio status and events */
1307 	lll_isr_status_reset();
1308 
1309 #if defined(CONFIG_BT_HCI_MESH_EXT)
1310 	if (_radio.advertiser.is_mesh &&
1311 	    !_radio.mesh_adv_end_us) {
1312 		_radio.mesh_adv_end_us = radio_tmr_end_get();
1313 	}
1314 #endif /* CONFIG_BT_HCI_MESH_EXT */
1315 
1316 	lll = param;
1317 
1318 #if defined(CONFIG_BT_PERIPHERAL)
1319 	if (!IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) && lll->is_hdcd &&
1320 	    !lll->chan_map_curr) {
1321 		lll->chan_map_curr = lll->chan_map;
1322 	}
1323 #endif /* CONFIG_BT_PERIPHERAL */
1324 
1325 	/* NOTE: Do not continue to connectable advertise if advertising is
1326 	 *       being disabled, by checking the cancelled flag.
1327 	 */
1328 	if (lll->chan_map_curr &&
1329 #if defined(CONFIG_BT_PERIPHERAL)
1330 	    (!lll->conn || !lll->conn->periph.cancelled) &&
1331 #endif /* CONFIG_BT_PERIPHERAL */
1332 	    1) {
1333 		struct pdu_adv *pdu;
1334 		uint32_t start_us;
1335 
1336 		pdu = chan_prepare(lll);
1337 
1338 #if defined(HAL_RADIO_GPIO_HAVE_PA_PIN) || defined(CONFIG_BT_CTLR_ADV_EXT)
1339 		start_us = radio_tmr_start_now(1);
1340 
1341 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1342 		struct lll_adv_aux *lll_aux;
1343 
1344 		lll_aux = lll->aux;
1345 		if (lll_aux) {
1346 			(void)ull_adv_aux_lll_offset_fill(pdu,
1347 							  lll_aux->ticks_pri_pdu_offset,
1348 							  lll_aux->us_pri_pdu_offset,
1349 							  start_us);
1350 		}
1351 #else /* !CONFIG_BT_CTLR_ADV_EXT */
1352 		ARG_UNUSED(pdu);
1353 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
1354 
1355 #if defined(HAL_RADIO_GPIO_HAVE_PA_PIN)
1356 		radio_gpio_pa_setup();
1357 		radio_gpio_pa_lna_enable(start_us +
1358 					 radio_tx_ready_delay_get(0, 0) -
1359 					 HAL_RADIO_GPIO_PA_OFFSET);
1360 #endif /* HAL_RADIO_GPIO_HAVE_PA_PIN */
1361 #else /* !(HAL_RADIO_GPIO_HAVE_PA_PIN || defined(CONFIG_BT_CTLR_ADV_EXT)) */
1362 		ARG_UNUSED(start_us);
1363 
1364 		radio_tx_enable();
1365 #endif /* !(HAL_RADIO_GPIO_HAVE_PA_PIN || defined(CONFIG_BT_CTLR_ADV_EXT)) */
1366 
1367 		/* capture end of Tx-ed PDU, used to calculate HCTO. */
1368 		radio_tmr_end_capture();
1369 
1370 		return;
1371 	}
1372 
1373 	radio_filter_disable();
1374 
1375 #if defined(CONFIG_BT_PERIPHERAL)
1376 	if (!lll->is_hdcd)
1377 #endif /* CONFIG_BT_PERIPHERAL */
1378 	{
1379 #if defined(CONFIG_BT_HCI_MESH_EXT)
1380 		if (_radio.advertiser.is_mesh) {
1381 			uint32_t err;
1382 
1383 			err = isr_close_adv_mesh();
1384 			if (err) {
1385 				return;
1386 			}
1387 		}
1388 #endif /* CONFIG_BT_HCI_MESH_EXT */
1389 	}
1390 
1391 #if defined(CONFIG_BT_CTLR_ADV_INDICATION)
1392 	struct node_rx_pdu *node_rx = ull_pdu_rx_alloc_peek(3);
1393 
1394 	if (node_rx) {
1395 		ull_pdu_rx_alloc();
1396 
1397 		/* TODO: add other info by defining a payload struct */
1398 		node_rx->hdr.type = NODE_RX_TYPE_ADV_INDICATION;
1399 
1400 		ull_rx_put_sched(node_rx->hdr.link, node_rx);
1401 	}
1402 #endif /* CONFIG_BT_CTLR_ADV_INDICATION */
1403 
1404 #if defined(CONFIG_BT_CTLR_ADV_EXT) || defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1405 #if defined(CONFIG_BT_CTLR_ADV_EXT) && !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1406 	/* If no auxiliary PDUs scheduled, generate primary radio event done */
1407 	if (!lll->aux)
1408 #endif /* CONFIG_BT_CTLR_ADV_EXT && !CONFIG_BT_CTLR_JIT_SCHEDULING */
1409 
1410 	{
1411 		struct event_done_extra *extra;
1412 
1413 		extra = ull_done_extra_type_set(EVENT_DONE_EXTRA_TYPE_ADV);
1414 		LL_ASSERT(extra);
1415 	}
1416 #endif /* CONFIG_BT_CTLR_ADV_EXT || CONFIG_BT_CTLR_JIT_SCHEDULING */
1417 
1418 	lll_isr_cleanup(param);
1419 }
1420 
isr_abort(void * param)1421 static void isr_abort(void *param)
1422 {
1423 	/* Clear radio status and events */
1424 	lll_isr_status_reset();
1425 
1426 	/* Disable any filter that was setup */
1427 	radio_filter_disable();
1428 
1429 	/* Current LLL radio event is done*/
1430 	lll_isr_cleanup(param);
1431 }
1432 
1433 #if defined(CONFIG_BT_PERIPHERAL)
isr_abort_all(void * param)1434 static void isr_abort_all(void *param)
1435 {
1436 	static memq_link_t link;
1437 	static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
1438 	uint32_t ret;
1439 
1440 	/* Clear radio status and events */
1441 	lll_isr_status_reset();
1442 
1443 	/* Disable any filter that was setup */
1444 	radio_filter_disable();
1445 
1446 	/* Current LLL radio event is done*/
1447 	lll_isr_cleanup(param);
1448 
1449 	/* Abort any LLL prepare/resume enqueued in pipeline */
1450 	mfy.param = param;
1451 	ret = mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_LLL, 1U, &mfy);
1452 	LL_ASSERT(!ret);
1453 }
1454 #endif /* CONFIG_BT_PERIPHERAL */
1455 
chan_prepare(struct lll_adv * lll)1456 static struct pdu_adv *chan_prepare(struct lll_adv *lll)
1457 {
1458 	struct pdu_adv *pdu;
1459 	uint8_t chan;
1460 	uint8_t upd;
1461 
1462 	chan = find_lsb_set(lll->chan_map_curr);
1463 	LL_ASSERT(chan);
1464 
1465 	lll->chan_map_curr &= (lll->chan_map_curr - 1);
1466 
1467 	lll_chan_set(36 + chan);
1468 
1469 	/* FIXME: get latest only when primary PDU without Aux PDUs */
1470 	upd = 0U;
1471 	pdu = lll_adv_data_latest_get(lll, &upd);
1472 	LL_ASSERT(pdu);
1473 
1474 	radio_pkt_tx_set(pdu);
1475 
1476 	if ((pdu->type != PDU_ADV_TYPE_NONCONN_IND) &&
1477 	    (!IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) ||
1478 	     (pdu->type != PDU_ADV_TYPE_EXT_IND))) {
1479 		struct pdu_adv *scan_pdu;
1480 
1481 		scan_pdu = lll_adv_scan_rsp_latest_get(lll, &upd);
1482 		LL_ASSERT(scan_pdu);
1483 
1484 #if defined(CONFIG_BT_CTLR_PRIVACY)
1485 		if (upd) {
1486 			/* Copy the address from the adv packet we will send
1487 			 * into the scan response.
1488 			 */
1489 			memcpy(&scan_pdu->scan_rsp.addr[0],
1490 			       &pdu->adv_ind.addr[0], BDADDR_SIZE);
1491 		}
1492 #else
1493 		ARG_UNUSED(scan_pdu);
1494 		ARG_UNUSED(upd);
1495 #endif /* !CONFIG_BT_CTLR_PRIVACY */
1496 
1497 		radio_isr_set(isr_tx, lll);
1498 		radio_tmr_tifs_set(EVENT_IFS_US);
1499 		radio_switch_complete_and_rx(0);
1500 	} else {
1501 		radio_isr_set(isr_done, lll);
1502 		radio_switch_complete_and_disable();
1503 	}
1504 
1505 	return pdu;
1506 }
1507 
isr_rx_pdu(struct lll_adv * lll,uint8_t devmatch_ok,uint8_t devmatch_id,uint8_t irkmatch_ok,uint8_t irkmatch_id,uint8_t rssi_ready)1508 static inline int isr_rx_pdu(struct lll_adv *lll,
1509 			     uint8_t devmatch_ok, uint8_t devmatch_id,
1510 			     uint8_t irkmatch_ok, uint8_t irkmatch_id,
1511 			     uint8_t rssi_ready)
1512 {
1513 	struct node_rx_pdu *node_rx;
1514 	struct pdu_adv *pdu_adv;
1515 	struct pdu_adv *pdu_rx;
1516 	uint8_t tx_addr;
1517 	uint8_t *addr;
1518 	uint8_t rx_addr;
1519 	uint8_t *tgt_addr;
1520 
1521 #if defined(CONFIG_BT_CTLR_PRIVACY)
1522 	/* An IRK match implies address resolution enabled */
1523 	uint8_t rl_idx = irkmatch_ok ? ull_filter_lll_rl_irk_idx(irkmatch_id) :
1524 				    FILTER_IDX_NONE;
1525 #else
1526 	uint8_t rl_idx = FILTER_IDX_NONE;
1527 #endif /* CONFIG_BT_CTLR_PRIVACY */
1528 
1529 	node_rx = ull_pdu_rx_alloc_peek(1);
1530 	LL_ASSERT(node_rx);
1531 
1532 	pdu_rx = (void *)node_rx->pdu;
1533 	pdu_adv = lll_adv_data_curr_get(lll);
1534 
1535 	addr = pdu_adv->adv_ind.addr;
1536 	tx_addr = pdu_adv->tx_addr;
1537 
1538 	if (pdu_adv->type == PDU_ADV_TYPE_DIRECT_IND) {
1539 		tgt_addr = pdu_adv->direct_ind.tgt_addr;
1540 	} else {
1541 		tgt_addr = NULL;
1542 	}
1543 	rx_addr = pdu_adv->rx_addr;
1544 
1545 	if ((pdu_rx->type == PDU_ADV_TYPE_SCAN_REQ) &&
1546 	    (pdu_rx->len == sizeof(struct pdu_adv_scan_req)) &&
1547 	    (tgt_addr == NULL) &&
1548 	    lll_adv_scan_req_check(lll, pdu_rx, tx_addr, addr, devmatch_ok,
1549 				    &rl_idx)) {
1550 		radio_isr_set(isr_done, lll);
1551 		radio_switch_complete_and_disable();
1552 		radio_pkt_tx_set(lll_adv_scan_rsp_curr_get(lll));
1553 
1554 		/* assert if radio packet ptr is not set and radio started tx */
1555 		LL_ASSERT(!radio_is_ready());
1556 
1557 		if (IS_ENABLED(CONFIG_BT_CTLR_PROFILE_ISR)) {
1558 			lll_prof_cputime_capture();
1559 		}
1560 
1561 #if defined(CONFIG_BT_CTLR_SCAN_REQ_NOTIFY)
1562 		if (!IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) ||
1563 		    lll->scan_req_notify) {
1564 			uint32_t err;
1565 
1566 			/* Generate the scan request event */
1567 			err = lll_adv_scan_req_report(lll, pdu_rx, rl_idx,
1568 						      rssi_ready);
1569 			if (err) {
1570 				/* Scan Response will not be transmitted */
1571 				return err;
1572 			}
1573 		}
1574 #endif /* CONFIG_BT_CTLR_SCAN_REQ_NOTIFY */
1575 
1576 #if defined(HAL_RADIO_GPIO_HAVE_PA_PIN)
1577 		if (IS_ENABLED(CONFIG_BT_CTLR_PROFILE_ISR)) {
1578 			/* PA/LNA enable is overwriting packet end used in ISR
1579 			 * profiling, hence back it up for later use.
1580 			 */
1581 			lll_prof_radio_end_backup();
1582 		}
1583 
1584 		radio_gpio_pa_setup();
1585 		radio_gpio_pa_lna_enable(radio_tmr_tifs_base_get() +
1586 					 EVENT_IFS_US -
1587 					 radio_rx_chain_delay_get(0, 0) -
1588 					 HAL_RADIO_GPIO_PA_OFFSET);
1589 #endif /* HAL_RADIO_GPIO_HAVE_PA_PIN */
1590 		return 0;
1591 
1592 #if defined(CONFIG_BT_PERIPHERAL)
1593 	/* NOTE: Do not accept CONNECT_IND if cancelled flag is set in thread
1594 	 *       context when disabling connectable advertising. This is to
1595 	 *       avoid any race in checking the initiated flags in thread mode
1596 	 *       which is set here if accepting a connection establishment.
1597 	 *
1598 	 *       Under this race, peer central would get failed to establish
1599 	 *       connection as the disconnect reason. This is an acceptable
1600 	 *       outcome to keep the thread mode implementation simple when
1601 	 *       disabling connectable advertising.
1602 	 */
1603 	} else if ((pdu_rx->type == PDU_ADV_TYPE_CONNECT_IND) &&
1604 		   (pdu_rx->len == sizeof(struct pdu_adv_connect_ind)) &&
1605 		   lll->conn && !lll->conn->periph.cancelled &&
1606 		   lll_adv_connect_ind_check(lll, pdu_rx, tx_addr, addr,
1607 					     rx_addr, tgt_addr,
1608 					     devmatch_ok, &rl_idx)) {
1609 		struct node_rx_ftr *ftr;
1610 		struct node_rx_pdu *rx;
1611 
1612 		if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
1613 			rx = ull_pdu_rx_alloc_peek(4);
1614 		} else {
1615 			rx = ull_pdu_rx_alloc_peek(3);
1616 		}
1617 
1618 		if (!rx) {
1619 			return -ENOBUFS;
1620 		}
1621 
1622 		radio_isr_set(isr_abort_all, lll);
1623 		radio_disable();
1624 
1625 		/* assert if radio started tx */
1626 		LL_ASSERT(!radio_is_ready());
1627 
1628 		if (IS_ENABLED(CONFIG_BT_CTLR_PROFILE_ISR)) {
1629 			lll_prof_cputime_capture();
1630 		}
1631 
1632 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
1633 		if (rssi_ready) {
1634 			lll->conn->rssi_latest =  radio_rssi_get();
1635 		}
1636 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
1637 
1638 		/* Stop further LLL radio events */
1639 		lll->conn->periph.initiated = 1;
1640 
1641 		rx = ull_pdu_rx_alloc();
1642 
1643 		rx->hdr.type = NODE_RX_TYPE_CONNECTION;
1644 		rx->hdr.handle = 0xffff;
1645 
1646 		ftr = &(rx->rx_ftr);
1647 		ftr->param = lll;
1648 		ftr->ticks_anchor = radio_tmr_start_get();
1649 		ftr->radio_end_us = radio_tmr_end_get() -
1650 				    radio_rx_chain_delay_get(0, 0);
1651 
1652 #if defined(CONFIG_BT_CTLR_PRIVACY)
1653 		ftr->rl_idx = irkmatch_ok ? rl_idx : FILTER_IDX_NONE;
1654 #endif /* CONFIG_BT_CTLR_PRIVACY */
1655 
1656 		if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
1657 			ftr->extra = ull_pdu_rx_alloc();
1658 		}
1659 
1660 		ull_rx_put_sched(rx->hdr.link, rx);
1661 
1662 		return 0;
1663 #endif /* CONFIG_BT_PERIPHERAL */
1664 	}
1665 
1666 	return -EINVAL;
1667 }
1668 
isr_rx_sr_adva_check(uint8_t tx_addr,uint8_t * addr,struct pdu_adv * sr)1669 static bool isr_rx_sr_adva_check(uint8_t tx_addr, uint8_t *addr,
1670 				 struct pdu_adv *sr)
1671 {
1672 	return (tx_addr == sr->rx_addr) &&
1673 		!memcmp(addr, sr->scan_req.adv_addr, BDADDR_SIZE);
1674 }
1675 
isr_rx_ci_tgta_check(struct lll_adv * lll,uint8_t rx_addr,uint8_t * tgt_addr,struct pdu_adv * ci,uint8_t rl_idx)1676 static inline bool isr_rx_ci_tgta_check(struct lll_adv *lll,
1677 					uint8_t rx_addr, uint8_t *tgt_addr,
1678 					struct pdu_adv *ci, uint8_t rl_idx)
1679 {
1680 #if defined(CONFIG_BT_CTLR_PRIVACY)
1681 	if (rl_idx != FILTER_IDX_NONE && lll->rl_idx != FILTER_IDX_NONE) {
1682 		return rl_idx == lll->rl_idx;
1683 	}
1684 #endif /* CONFIG_BT_CTLR_PRIVACY */
1685 	return (rx_addr == ci->tx_addr) &&
1686 	       !memcmp(tgt_addr, ci->connect_ind.init_addr, BDADDR_SIZE);
1687 }
1688 
isr_rx_ci_adva_check(uint8_t tx_addr,uint8_t * addr,struct pdu_adv * ci)1689 static inline bool isr_rx_ci_adva_check(uint8_t tx_addr, uint8_t *addr,
1690 					struct pdu_adv *ci)
1691 {
1692 	return (tx_addr == ci->rx_addr) &&
1693 		!memcmp(addr, ci->connect_ind.adv_addr, BDADDR_SIZE);
1694 }
1695 
1696 #if defined(CONFIG_ZTEST)
lll_adv_free_pdu_fifo_count_get(void)1697 uint32_t lll_adv_free_pdu_fifo_count_get(void)
1698 {
1699 	return MFIFO_AVAIL_COUNT_GET(pdu_free);
1700 }
1701 
lll_adv_pdu_mem_free_count_get(void)1702 uint32_t lll_adv_pdu_mem_free_count_get(void)
1703 {
1704 	return mem_free_count_get(mem_pdu.free);
1705 }
1706 #endif /* CONFIG_ZTEST */
1707