1 /*
2  * Copyright (c) 2020 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/sys/byteorder.h>
8 #include <zephyr/sys/slist.h>
9 #include <zephyr/sys/util.h>
10 
11 #include "util/mem.h"
12 #include "util/memq.h"
13 #include "util/mayfly.h"
14 #include "util/util.h"
15 #include "util/dbuf.h"
16 
17 #include "hal/ticker.h"
18 #include "hal/ccm.h"
19 
20 #include "ticker/ticker.h"
21 
22 #include "pdu_df.h"
23 #include "lll/pdu_vendor.h"
24 #include "pdu.h"
25 
26 #include "lll.h"
27 #include "lll/lll_vendor.h"
28 #include "lll_scan.h"
29 #include "lll_scan_aux.h"
30 #include "lll/lll_df_types.h"
31 #include "lll_conn.h"
32 #include "lll_conn_iso.h"
33 #include "lll_sync.h"
34 #include "lll_sync_iso.h"
35 #include "lll/lll_adv_types.h"
36 #include "lll_adv.h"
37 #include "lll/lll_adv_pdu.h"
38 
39 #include "ll_sw/ull_tx_queue.h"
40 
41 #include "isoal.h"
42 #include "ull_scan_types.h"
43 #include "ull_conn_types.h"
44 #include "ull_iso_types.h"
45 #include "ull_conn_iso_types.h"
46 #include "ull_sync_types.h"
47 #include "ull_adv_types.h"
48 #include "ull_adv_internal.h"
49 
50 #include "ull_internal.h"
51 #include "ull_scan_internal.h"
52 #include "ull_conn_internal.h"
53 #include "ull_sync_internal.h"
54 #include "ull_sync_iso_internal.h"
55 #include "ull_df_internal.h"
56 
57 #include <zephyr/bluetooth/hci_types.h>
58 
59 #include <soc.h>
60 #include "hal/debug.h"
61 
62 static int init_reset(void);
63 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
64 		      uint32_t remainder, uint16_t lazy, uint8_t force,
65 		      void *param);
66 static void ticker_op_cb(uint32_t status, void *param);
67 static void flush_safe(void *param);
68 static void done_disabled_cb(void *param);
69 
70 #if !defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
71 
72 static inline struct ll_scan_aux_set *aux_acquire(void);
73 static inline void aux_release(struct ll_scan_aux_set *aux);
74 static inline uint8_t aux_handle_get(struct ll_scan_aux_set *aux);
75 static void flush(void *param);
76 static void aux_sync_incomplete(void *param);
77 
78 /* Auxiliary context pool used for reception of PDUs at aux offsets, common for
79  * both Extended Advertising and Periodic Advertising.
80  * Increasing the count allows simultaneous reception of interleaved chain PDUs
81  * from multiple advertisers.
82  */
83 static struct ll_scan_aux_set ll_scan_aux_pool[CONFIG_BT_CTLR_SCAN_AUX_SET];
84 static void *scan_aux_free;
85 
86 #else /* CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
87 
88 static inline struct ll_scan_aux_chain *aux_chain_acquire(void);
89 static inline void aux_chain_release(struct ll_scan_aux_chain *chain);
90 struct ll_scan_aux_chain *scan_aux_chain_is_valid_get(struct ll_scan_aux_chain *chain);
91 struct ll_scan_aux_chain *lll_scan_aux_chain_is_valid_get(struct lll_scan_aux *lll);
92 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
93 static void aux_sync_incomplete(struct ll_scan_aux_chain *chain);
94 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
95 static void flush(struct ll_scan_aux_chain *chain);
96 static void chain_start_ticker(struct ll_scan_aux_chain *chain, bool replace);
97 static bool chain_insert_in_sched_list(struct ll_scan_aux_chain *chain);
98 static void chain_remove_from_list(struct ll_scan_aux_chain **head,
99 				   struct ll_scan_aux_chain *chain);
100 static void chain_append_to_list(struct ll_scan_aux_chain **head, struct ll_scan_aux_chain *chain);
101 static bool chain_is_in_list(struct ll_scan_aux_chain *head, struct ll_scan_aux_chain *chain);
102 
103 /* Auxiliary context pool used for reception of PDUs at aux offsets, common for
104  * both Extended Advertising and Periodic Advertising.
105  * Increasing the count allows simultaneous reception of interleaved chain PDUs
106  * from multiple advertisers.
107  */
108 static struct ll_scan_aux_chain ll_scan_aux_pool[CONFIG_BT_CTLR_SCAN_AUX_CHAIN_COUNT];
109 static struct ll_scan_aux_set scan_aux_set;
110 static void *scan_aux_free;
111 
112 static K_SEM_DEFINE(sem_scan_aux_stop, 0, 1);
113 
114 #endif /* CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
115 
ull_scan_aux_init(void)116 int ull_scan_aux_init(void)
117 {
118 	int err;
119 
120 	err = init_reset();
121 	if (err) {
122 		return err;
123 	}
124 
125 	return 0;
126 }
127 
ull_scan_aux_reset(void)128 int ull_scan_aux_reset(void)
129 {
130 	int err;
131 
132 	err = init_reset();
133 	if (err) {
134 		return err;
135 	}
136 
137 	return 0;
138 }
139 
rx_release_put(struct node_rx_pdu * rx)140 static void rx_release_put(struct node_rx_pdu *rx)
141 {
142 	rx->hdr.type = NODE_RX_TYPE_RELEASE;
143 
144 	ll_rx_put(rx->hdr.link, rx);
145 }
146 
sync_create_get(struct ll_scan_set * scan)147 static inline struct ll_sync_set *sync_create_get(struct ll_scan_set *scan)
148 {
149 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
150 	return (!scan->periodic.cancelled) ? scan->periodic.sync : NULL;
151 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
152 	return NULL;
153 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
154 }
155 
156 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
157 static inline struct ll_sync_iso_set *
sync_iso_create_get(struct ll_sync_set * sync)158 	sync_iso_create_get(struct ll_sync_set *sync)
159 {
160 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
161 	return sync->iso.sync_iso;
162 #else /* !CONFIG_BT_CTLR_SYNC_ISO */
163 	return NULL;
164 #endif /* !CONFIG_BT_CTLR_SYNC_ISO */
165 }
166 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
167 
168 #if !defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
ull_scan_aux_setup(memq_link_t * link,struct node_rx_pdu * rx)169 void ull_scan_aux_setup(memq_link_t *link, struct node_rx_pdu *rx)
170 {
171 	struct node_rx_pdu *rx_incomplete;
172 	struct ll_sync_iso_set *sync_iso;
173 	struct pdu_adv_aux_ptr *aux_ptr;
174 	struct pdu_adv_com_ext_adv *p;
175 	uint32_t ticks_slot_overhead;
176 	struct lll_scan_aux *lll_aux;
177 	struct ll_scan_aux_set *aux;
178 	uint8_t ticker_yield_handle;
179 	uint32_t window_widening_us;
180 	uint32_t ticks_slot_offset;
181 	uint32_t ticks_aux_offset;
182 	struct pdu_adv_ext_hdr *h;
183 	struct lll_sync *sync_lll;
184 	struct ll_scan_set *scan;
185 	struct ll_sync_set *sync;
186 	struct pdu_adv_adi *adi;
187 	struct node_rx_ftr *ftr;
188 	uint32_t ready_delay_us;
189 	uint16_t window_size_us;
190 	uint32_t aux_offset_us;
191 	uint32_t ticker_status;
192 	struct lll_scan *lll;
193 	struct pdu_adv *pdu;
194 	uint8_t hdr_buf_len;
195 	uint8_t aux_handle;
196 	bool is_scan_req;
197 	uint8_t acad_len;
198 	uint8_t data_len;
199 	uint8_t hdr_len;
200 	uint32_t pdu_us;
201 	uint8_t phy_aux;
202 	uint8_t *ptr;
203 	uint8_t phy;
204 
205 	is_scan_req = false;
206 	ftr = &rx->rx_ftr;
207 
208 	switch (rx->hdr.type) {
209 	case NODE_RX_TYPE_EXT_1M_REPORT:
210 		lll_aux = NULL;
211 		aux = NULL;
212 		sync_lll = NULL;
213 		sync_iso = NULL;
214 		rx_incomplete = NULL;
215 
216 		lll = ftr->param;
217 		LL_ASSERT(!lll->lll_aux);
218 
219 		scan = HDR_LLL2ULL(lll);
220 		sync = sync_create_get(scan);
221 		phy = BT_HCI_LE_EXT_SCAN_PHY_1M;
222 
223 		ticker_yield_handle = TICKER_ID_SCAN_BASE +
224 				      ull_scan_handle_get(scan);
225 		break;
226 
227 #if defined(CONFIG_BT_CTLR_PHY_CODED)
228 	case NODE_RX_TYPE_EXT_CODED_REPORT:
229 		lll_aux = NULL;
230 		aux = NULL;
231 		sync_lll = NULL;
232 		sync_iso = NULL;
233 		rx_incomplete = NULL;
234 
235 		lll = ftr->param;
236 		LL_ASSERT(!lll->lll_aux);
237 
238 		scan = HDR_LLL2ULL(lll);
239 		sync = sync_create_get(scan);
240 		phy = BT_HCI_LE_EXT_SCAN_PHY_CODED;
241 
242 		ticker_yield_handle = TICKER_ID_SCAN_BASE +
243 				      ull_scan_handle_get(scan);
244 		break;
245 #endif /* CONFIG_BT_CTLR_PHY_CODED */
246 
247 	case NODE_RX_TYPE_EXT_AUX_REPORT:
248 		sync_iso = NULL;
249 		rx_incomplete = NULL;
250 		if (ull_scan_aux_is_valid_get(HDR_LLL2ULL(ftr->param))) {
251 			sync_lll = NULL;
252 
253 			/* Node has valid aux context so its scan was scheduled
254 			 * from ULL.
255 			 */
256 			lll_aux = ftr->param;
257 			aux = HDR_LLL2ULL(lll_aux);
258 
259 			/* aux parent will be NULL for periodic sync */
260 			lll = aux->parent;
261 			LL_ASSERT(lll);
262 
263 			ticker_yield_handle = TICKER_ID_SCAN_AUX_BASE +
264 					      aux_handle_get(aux);
265 
266 		} else if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) ||
267 			   ull_scan_is_valid_get(HDR_LLL2ULL(ftr->param))) {
268 			sync_lll = NULL;
269 
270 			/* Node that does not have valid aux context but has
271 			 * valid scan set was scheduled from LLL.
272 			 */
273 			lll = ftr->param;
274 
275 			/* We can not retrieve aux context that was stored in
276 			 * lll_scan when superior PDU was handled, as it may be
277 			 * reset to NULL before this node rx is processed here.
278 			 * The reset happens when new extended advertising chain
279 			 * is being received before we process the node here.
280 			 */
281 			lll_aux = ftr->lll_aux;
282 			LL_ASSERT(lll_aux);
283 
284 			aux = HDR_LLL2ULL(lll_aux);
285 			LL_ASSERT(lll == aux->parent);
286 
287 			ticker_yield_handle = TICKER_NULL;
288 
289 		} else {
290 			lll = NULL;
291 
292 			/* If none of the above, node is part of sync scanning
293 			 */
294 			sync_lll = ftr->param;
295 
296 			lll_aux = sync_lll->lll_aux;
297 			LL_ASSERT(lll_aux);
298 
299 			aux = HDR_LLL2ULL(lll_aux);
300 			LL_ASSERT(sync_lll == aux->parent);
301 
302 			ticker_yield_handle = TICKER_NULL;
303 		}
304 
305 		if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || lll) {
306 			scan = HDR_LLL2ULL(lll);
307 			sync = (void *)scan;
308 			scan = ull_scan_is_valid_get(scan);
309 			if (scan) {
310 				sync = NULL;
311 			}
312 		} else {
313 			scan = NULL;
314 			sync = HDR_LLL2ULL(sync_lll);
315 		}
316 
317 		phy = lll_aux->phy;
318 		if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || scan) {
319 			/* Here we are scanner context */
320 			sync = sync_create_get(scan);
321 
322 			/* Generate report based on PHY scanned */
323 			switch (phy) {
324 			case PHY_1M:
325 				rx->hdr.type = NODE_RX_TYPE_EXT_1M_REPORT;
326 				break;
327 			case PHY_2M:
328 				rx->hdr.type = NODE_RX_TYPE_EXT_2M_REPORT;
329 				break;
330 #if defined(CONFIG_BT_CTLR_PHY_CODED)
331 			case PHY_CODED:
332 				rx->hdr.type = NODE_RX_TYPE_EXT_CODED_REPORT;
333 				break;
334 #endif /* CONFIG_BT_CTLR_PHY_CODED */
335 			default:
336 				LL_ASSERT(0);
337 				return;
338 			}
339 
340 			/* Backup scan requested flag as it is in union with
341 			 * `extra` struct member which will be set to NULL
342 			 * in subsequent code.
343 			 */
344 			is_scan_req = !!ftr->scan_req;
345 
346 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
347 		} else {
348 			/* Here we are periodic sync context */
349 			rx->hdr.type = NODE_RX_TYPE_SYNC_REPORT;
350 			rx->hdr.handle = ull_sync_handle_get(sync);
351 
352 			/* Check if we need to create BIG sync */
353 			sync_iso = sync_iso_create_get(sync);
354 
355 			/* lll_aux and aux are auxiliary channel context,
356 			 * reuse the existing aux context to scan the chain.
357 			 * hence lll_aux and aux are not released or set to NULL.
358 			 */
359 			sync = NULL;
360 		}
361 		break;
362 
363 	case NODE_RX_TYPE_SYNC_REPORT:
364 		{
365 			struct ll_sync_set *ull_sync;
366 
367 			/* set the sync handle corresponding to the LLL context
368 			 * passed in the node rx footer field.
369 			 */
370 			sync_lll = ftr->param;
371 			LL_ASSERT(!sync_lll->lll_aux);
372 
373 			ull_sync = HDR_LLL2ULL(sync_lll);
374 			rx->hdr.handle = ull_sync_handle_get(ull_sync);
375 
376 			/* Check if we need to create BIG sync */
377 			sync_iso = sync_iso_create_get(ull_sync);
378 
379 			/* FIXME: we will need lll_scan if chain was scheduled
380 			 *        from LLL; should we store lll_scan_set in
381 			 *        sync_lll instead?
382 			 */
383 			lll = NULL;
384 			lll_aux = NULL;
385 			aux = NULL;
386 			scan = NULL;
387 			sync = NULL;
388 			phy =  sync_lll->phy;
389 
390 			/* backup extra node_rx supplied for generating
391 			 * incomplete report
392 			 */
393 			rx_incomplete = ftr->extra;
394 
395 			ticker_yield_handle = TICKER_ID_SCAN_SYNC_BASE +
396 					      ull_sync_handle_get(ull_sync);
397 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
398 
399 		}
400 		break;
401 	default:
402 		LL_ASSERT(0);
403 		return;
404 	}
405 
406 	rx->hdr.link = link;
407 	ftr->extra = NULL;
408 
409 	ftr->aux_sched = 0U;
410 
411 	pdu = (void *)rx->pdu;
412 	p = (void *)&pdu->adv_ext_ind;
413 	if (!pdu->len || !p->ext_hdr_len) {
414 		if (pdu->len) {
415 			data_len = pdu->len - PDU_AC_EXT_HEADER_SIZE_MIN;
416 		} else {
417 			data_len = 0U;
418 		}
419 
420 		if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll) {
421 			struct ll_sync_set *sync_set;
422 
423 			sync_set = HDR_LLL2ULL(sync_lll);
424 			ftr->aux_data_len = sync_set->data_len + data_len;
425 			sync_set->data_len = 0U;
426 		} else if (aux) {
427 			aux->data_len += data_len;
428 			ftr->aux_data_len = aux->data_len;
429 		} else {
430 			ftr->aux_data_len = data_len;
431 		}
432 
433 		goto ull_scan_aux_rx_flush;
434 	}
435 
436 	h = (void *)p->ext_hdr_adv_data;
437 
438 	/* Note: The extended header contains a RFU flag that could potentially cause incorrect
439 	 * calculation of offset to ACAD field if it gets used to add a new header field; However,
440 	 * from discussion in BT errata ES-8080 it seems clear that BT SIG is aware that the RFU
441 	 * bit can not be used to add a new field since existing implementations will not be able
442 	 * to calculate the start of ACAD in that case
443 	 */
444 
445 	ptr = h->data;
446 
447 	if (h->adv_addr) {
448 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
449 		/* Check if Periodic Advertising Synchronization to be created
450 		 */
451 		if (sync && (scan->periodic.state != LL_SYNC_STATE_CREATED)) {
452 			/* Check address and update internal state */
453 #if defined(CONFIG_BT_CTLR_PRIVACY)
454 			ull_sync_setup_addr_check(sync, scan, pdu->tx_addr, ptr,
455 						  ftr->rl_idx);
456 #else /* !CONFIG_BT_CTLR_PRIVACY */
457 			ull_sync_setup_addr_check(sync, scan, pdu->tx_addr, ptr, 0U);
458 #endif /* !CONFIG_BT_CTLR_PRIVACY */
459 
460 		}
461 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
462 
463 		ptr += BDADDR_SIZE;
464 	}
465 
466 	if (h->tgt_addr) {
467 		ptr += BDADDR_SIZE;
468 	}
469 
470 	if (h->cte_info) {
471 		ptr += sizeof(struct pdu_cte_info);
472 	}
473 
474 	adi = NULL;
475 	if (h->adi) {
476 		adi = (void *)ptr;
477 		ptr += sizeof(*adi);
478 	}
479 
480 	aux_ptr = NULL;
481 	if (h->aux_ptr) {
482 		aux_ptr = (void *)ptr;
483 		ptr += sizeof(*aux_ptr);
484 	}
485 
486 	if (h->sync_info) {
487 		struct pdu_adv_sync_info *si;
488 
489 		si = (void *)ptr;
490 		ptr += sizeof(*si);
491 
492 		/* Check if Periodic Advertising Synchronization to be created.
493 		 * Setup synchronization if address and SID match in the
494 		 * Periodic Advertiser List or with the explicitly supplied.
495 		 */
496 		if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && aux && sync && adi &&
497 		    ull_sync_setup_sid_match(sync, scan, PDU_ADV_ADI_SID_GET(adi))) {
498 			ull_sync_setup(scan, aux->lll.phy, rx, si);
499 		}
500 	}
501 
502 	if (h->tx_pwr) {
503 		ptr++;
504 	}
505 
506 	/* Calculate ACAD Len */
507 	hdr_len = ptr - (uint8_t *)p;
508 	hdr_buf_len = PDU_AC_EXT_HEADER_SIZE_MIN + p->ext_hdr_len;
509 	if (hdr_len > hdr_buf_len) {
510 		/* FIXME: Handle invalid header length */
511 		acad_len = 0U;
512 	} else {
513 		acad_len = hdr_buf_len - hdr_len;
514 		hdr_len += acad_len;
515 	}
516 
517 	/* calculate total data length */
518 	if (hdr_len < pdu->len) {
519 		data_len = pdu->len - hdr_len;
520 	} else {
521 		data_len = 0U;
522 	}
523 
524 	/* Periodic Advertising Channel Map Indication and/or Broadcast ISO
525 	 * synchronization
526 	 */
527 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) &&
528 	    (rx->hdr.type == NODE_RX_TYPE_SYNC_REPORT) &&
529 	    acad_len) {
530 		/* Periodic Advertising Channel Map Indication */
531 		ull_sync_chm_update(rx->hdr.handle, ptr, acad_len);
532 
533 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
534 		struct ll_sync_set *sync_set;
535 		struct pdu_big_info *bi;
536 		uint8_t bi_size;
537 
538 		sync_set = HDR_LLL2ULL(sync_lll);
539 
540 		/* Provide encryption information for BIG sync creation */
541 		bi_size = ptr[PDU_ADV_DATA_HEADER_LEN_OFFSET] -
542 			  PDU_ADV_DATA_HEADER_TYPE_SIZE;
543 		sync_set->enc = (bi_size == PDU_BIG_INFO_ENCRYPTED_SIZE);
544 
545 		/* Store number of BISes in the BIG */
546 		bi = (void *)&ptr[PDU_ADV_DATA_HEADER_DATA_OFFSET];
547 		sync_set->num_bis = PDU_BIG_INFO_NUM_BIS_GET(bi);
548 
549 		/* Broadcast ISO synchronize */
550 		if (sync_iso) {
551 			ull_sync_iso_setup(sync_iso, rx, ptr, acad_len);
552 		}
553 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
554 	}
555 
556 	/* Do not ULL schedule auxiliary PDU reception if no aux pointer
557 	 * or aux pointer is zero or scannable advertising has erroneous aux
558 	 * pointer being present or PHY in the aux pointer is invalid or unsupported.
559 	 */
560 	if (!aux_ptr || !PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr) || is_scan_req ||
561 	    (PDU_ADV_AUX_PTR_PHY_GET(aux_ptr) > EXT_ADV_AUX_PHY_LE_CODED) ||
562 		(!IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
563 		  PDU_ADV_AUX_PTR_PHY_GET(aux_ptr) == EXT_ADV_AUX_PHY_LE_CODED)) {
564 		if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll) {
565 			struct ll_sync_set *sync_set;
566 
567 			sync_set = HDR_LLL2ULL(sync_lll);
568 			ftr->aux_data_len = sync_set->data_len + data_len;
569 			sync_set->data_len = 0U;
570 		} else if (aux) {
571 			aux->data_len += data_len;
572 			ftr->aux_data_len = aux->data_len;
573 		} else {
574 			ftr->aux_data_len = data_len;
575 		}
576 
577 		if (is_scan_req) {
578 			LL_ASSERT(aux && aux->rx_last);
579 
580 			aux->rx_last->rx_ftr.extra = rx;
581 			aux->rx_last = rx;
582 
583 			return;
584 		}
585 
586 		goto ull_scan_aux_rx_flush;
587 	}
588 
589 	/* Determine the window size */
590 	if (aux_ptr->offs_units) {
591 		window_size_us = OFFS_UNIT_300_US;
592 	} else {
593 		window_size_us = OFFS_UNIT_30_US;
594 	}
595 
596 	/* Calculate received aux offset we need to have ULL schedule a reception */
597 	aux_offset_us = (uint32_t)PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr) * window_size_us;
598 
599 	/* Skip reception if invalid aux offset */
600 	pdu_us = PDU_AC_US(pdu->len, phy, ftr->phy_flags);
601 	if (unlikely(!AUX_OFFSET_IS_VALID(aux_offset_us, window_size_us, pdu_us))) {
602 		goto ull_scan_aux_rx_flush;
603 	}
604 
605 	/* CA field contains the clock accuracy of the advertiser;
606 	 * 0 - 51 ppm to 500 ppm
607 	 * 1 - 0 ppm to 50 ppm
608 	 */
609 	if (aux_ptr->ca) {
610 		window_widening_us = SCA_DRIFT_50_PPM_US(aux_offset_us);
611 	} else {
612 		window_widening_us = SCA_DRIFT_500_PPM_US(aux_offset_us);
613 	}
614 
615 	phy_aux = BIT(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
616 	ready_delay_us = lll_radio_rx_ready_delay_get(phy_aux, PHY_FLAGS_S8);
617 
618 	/* Calculate the aux offset from start of the scan window */
619 	aux_offset_us += ftr->radio_end_us;
620 	aux_offset_us -= pdu_us;
621 	aux_offset_us -= EVENT_TICKER_RES_MARGIN_US;
622 	aux_offset_us -= EVENT_JITTER_US;
623 	aux_offset_us -= ready_delay_us;
624 	aux_offset_us -= window_widening_us;
625 
626 	ticks_aux_offset = HAL_TICKER_US_TO_TICKS(aux_offset_us);
627 
628 	/* Check if too late to ULL schedule an auxiliary PDU reception */
629 	if (!ftr->aux_lll_sched) {
630 		uint32_t ticks_at_expire;
631 		uint32_t overhead_us;
632 		uint32_t ticks_now;
633 		uint32_t diff;
634 
635 		/* CPU execution overhead to setup the radio for reception plus the
636 		 * minimum prepare tick offset. And allow one additional event in
637 		 * between as overhead (say, an advertising event in between got closed
638 		 * when reception for auxiliary PDU is being setup).
639 		 */
640 		overhead_us = (EVENT_OVERHEAD_END_US + EVENT_OVERHEAD_START_US +
641 			       HAL_TICKER_TICKS_TO_US(HAL_TICKER_CNTR_CMP_OFFSET_MIN)) << 1;
642 
643 		ticks_now = ticker_ticks_now_get();
644 		ticks_at_expire = ftr->ticks_anchor + ticks_aux_offset -
645 				  HAL_TICKER_US_TO_TICKS(overhead_us);
646 		diff = ticker_ticks_diff_get(ticks_now, ticks_at_expire);
647 		if ((diff & BIT(HAL_TICKER_CNTR_MSBIT)) == 0U) {
648 			goto ull_scan_aux_rx_flush;
649 		}
650 	}
651 
652 	if (!aux) {
653 		aux = aux_acquire();
654 		if (!aux) {
655 			/* As LLL scheduling has been used and will fail due to
656 			 * non-allocation of aux context, a sync report with
657 			 * aux_failed flag set will be generated. Let the
658 			 * current sync report be set as partial, and the
659 			 * sync report corresponding to ull_scan_aux_release
660 			 * have the incomplete data status.
661 			 */
662 			if (ftr->aux_lll_sched) {
663 				ftr->aux_sched = 1U;
664 			}
665 
666 			if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) &&
667 			    sync_lll) {
668 				struct ll_sync_set *sync_set;
669 
670 				sync_set = HDR_LLL2ULL(sync_lll);
671 				ftr->aux_data_len = sync_set->data_len + data_len;
672 				sync_set->data_len = 0U;
673 
674 			}
675 
676 			goto ull_scan_aux_rx_flush;
677 		}
678 
679 		aux->rx_head = aux->rx_last = NULL;
680 		aux->data_len = data_len;
681 		lll_aux = &aux->lll;
682 		lll_aux->is_chain_sched = 0U;
683 
684 		ull_hdr_init(&aux->ull);
685 		lll_hdr_init(lll_aux, aux);
686 
687 		aux->parent = lll ? (void *)lll : (void *)sync_lll;
688 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
689 		if (lll) {
690 			lll_aux->hdr.score = lll->scan_aux_score;
691 		}
692 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
693 
694 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
695 		aux->rx_incomplete = rx_incomplete;
696 		rx_incomplete = NULL;
697 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
698 
699 	} else if (!(IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll)) {
700 		aux->data_len += data_len;
701 
702 		/* Flush auxiliary PDU receptions and stop any more ULL
703 		 * scheduling if accumulated data length exceeds configured
704 		 * maximum supported.
705 		 */
706 		if (aux->data_len >= CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX) {
707 			/* If LLL has already scheduled, then let it proceed.
708 			 *
709 			 * TODO: LLL to check accumulated data length and
710 			 *       stop further reception.
711 			 *       Currently LLL will schedule as long as there
712 			 *       are free node rx available.
713 			 */
714 			if (!ftr->aux_lll_sched) {
715 				goto ull_scan_aux_rx_flush;
716 			}
717 		}
718 	}
719 
720 	/* In sync context we can dispatch rx immediately, in scan context we
721 	 * enqueue rx in aux context and will flush them after scan is complete.
722 	 */
723 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll) {
724 		struct ll_sync_set *sync_set;
725 
726 		sync_set = HDR_LLL2ULL(sync_lll);
727 		sync_set->data_len += data_len;
728 		ftr->aux_data_len = sync_set->data_len;
729 
730 		/* Flush auxiliary PDU receptions and stop any more ULL
731 		 * scheduling if accumulated data length exceeds configured
732 		 * maximum supported.
733 		 */
734 		if (sync_set->data_len >= CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX) {
735 			/* If LLL has already scheduled, then let it proceed.
736 			 *
737 			 * TODO: LLL to check accumulated data length and
738 			 *       stop further reception.
739 			 *       Currently LLL will schedule as long as there
740 			 *       are free node rx available.
741 			 */
742 			if (!ftr->aux_lll_sched) {
743 				sync_set->data_len = 0U;
744 				goto ull_scan_aux_rx_flush;
745 			}
746 		}
747 	} else {
748 		if (aux->rx_last) {
749 			aux->rx_last->rx_ftr.extra = rx;
750 		} else {
751 			aux->rx_head = rx;
752 		}
753 		aux->rx_last = rx;
754 
755 		ftr->aux_data_len = aux->data_len;
756 	}
757 
758 	/* Initialize the channel index and PHY for the Auxiliary PDU reception.
759 	 */
760 	lll_aux->chan = aux_ptr->chan_idx;
761 	lll_aux->phy = phy_aux;
762 
763 	/* See if this was already scheduled from LLL. If so, store aux context
764 	 * in global scan struct so we can pick it when scanned node is received
765 	 * with a valid context.
766 	 */
767 	if (ftr->aux_lll_sched) {
768 		if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll) {
769 			/* Associate Sync context with the Aux context so that
770 			 * it can continue reception in LLL scheduling.
771 			 */
772 			sync_lll->lll_aux = lll_aux;
773 
774 			/* AUX_ADV_IND/AUX_CHAIN_IND PDU reception is being
775 			 * setup
776 			 */
777 			ftr->aux_sched = 1U;
778 
779 			/* In sync context, dispatch immediately */
780 			ll_rx_put_sched(link, rx);
781 		} else {
782 			/* check scan context is not already using LLL
783 			 * scheduling, or receiving a chain then it will
784 			 * reuse the aux context.
785 			 */
786 			LL_ASSERT(!lll->lll_aux || (lll->lll_aux == lll_aux));
787 
788 			/* Associate Scan context with the Aux context so that
789 			 * it can continue reception in LLL scheduling.
790 			 */
791 			lll->lll_aux = lll_aux;
792 
793 			/* AUX_ADV_IND/AUX_CHAIN_IND PDU reception is being
794 			 * setup
795 			 */
796 			ftr->aux_sched = 1U;
797 		}
798 
799 		/* Reset auxiliary channel PDU scan state which otherwise is
800 		 * done in the prepare_cb when ULL scheduling is used.
801 		 */
802 		lll_aux->state = 0U;
803 
804 		return;
805 	}
806 
807 	/* Switching to ULL scheduling to receive auxiliary PDUs */
808 	if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || lll) {
809 		LL_ASSERT(scan);
810 
811 		/* Do not ULL schedule if scan disable requested */
812 		if (unlikely(scan->is_stop)) {
813 			goto ull_scan_aux_rx_flush;
814 		}
815 	} else {
816 		struct ll_sync_set *sync_set;
817 
818 		LL_ASSERT(sync_lll &&
819 			  (!sync_lll->lll_aux || sync_lll->lll_aux == lll_aux));
820 
821 		/* Do not ULL schedule if sync terminate requested */
822 		sync_set = HDR_LLL2ULL(sync_lll);
823 		if (unlikely(sync_set->is_stop)) {
824 			goto ull_scan_aux_rx_flush;
825 		}
826 
827 		/* Associate the auxiliary context with sync context, we do this
828 		 * for ULL scheduling also in constrast to how extended
829 		 * advertising only associates when LLL scheduling is used.
830 		 * Each Periodic Advertising chain is received by unique sync
831 		 * context, hence LLL and ULL scheduling is always associated
832 		 * with same unique sync context.
833 		 */
834 		sync_lll->lll_aux = lll_aux;
835 
836 		/* Backup the node rx to be dispatch on successfully ULL
837 		 * scheduling setup.
838 		 */
839 		aux->rx_head = rx;
840 	}
841 
842 	/* TODO: active_to_start feature port */
843 	aux->ull.ticks_active_to_start = 0;
844 	aux->ull.ticks_prepare_to_start =
845 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
846 	aux->ull.ticks_preempt_to_start =
847 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
848 	aux->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(
849 		EVENT_OVERHEAD_START_US + ready_delay_us +
850 		PDU_AC_MAX_US(PDU_AC_EXT_PAYLOAD_RX_SIZE, lll_aux->phy) +
851 		EVENT_OVERHEAD_END_US);
852 
853 	ticks_slot_offset = MAX(aux->ull.ticks_active_to_start,
854 				aux->ull.ticks_prepare_to_start);
855 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
856 		ticks_slot_overhead = ticks_slot_offset;
857 	} else {
858 		ticks_slot_overhead = 0U;
859 	}
860 	ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
861 
862 	/* Initialize the window size for the Auxiliary PDU reception. */
863 	lll_aux->window_size_us = window_size_us;
864 	lll_aux->window_size_us += ((EVENT_TICKER_RES_MARGIN_US + EVENT_JITTER_US +
865 				     window_widening_us) << 1);
866 
867 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
868 	/* disable ticker job, in order to chain yield and start to reduce
869 	 * CPU use by reducing successive calls to ticker_job().
870 	 */
871 	mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 0);
872 #endif
873 
874 	/* Yield the primary scan window or auxiliary or periodic sync event
875 	 * in ticker.
876 	 */
877 	if (ticker_yield_handle != TICKER_NULL) {
878 		ticker_status = ticker_yield_abs(TICKER_INSTANCE_ID_CTLR,
879 						 TICKER_USER_ID_ULL_HIGH,
880 						 ticker_yield_handle,
881 						 (ftr->ticks_anchor +
882 						  ticks_aux_offset -
883 						  ticks_slot_offset),
884 						 NULL, NULL);
885 		LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
886 			  (ticker_status == TICKER_STATUS_BUSY));
887 	}
888 
889 	aux_handle = aux_handle_get(aux);
890 	ticker_status = ticker_start(TICKER_INSTANCE_ID_CTLR,
891 				     TICKER_USER_ID_ULL_HIGH,
892 				     TICKER_ID_SCAN_AUX_BASE + aux_handle,
893 				     ftr->ticks_anchor - ticks_slot_offset,
894 				     ticks_aux_offset,
895 				     TICKER_NULL_PERIOD,
896 				     TICKER_NULL_REMAINDER,
897 				     TICKER_NULL_LAZY,
898 				     (aux->ull.ticks_slot +
899 				      ticks_slot_overhead),
900 				     ticker_cb, aux, ticker_op_cb, aux);
901 	LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
902 		  (ticker_status == TICKER_STATUS_BUSY) ||
903 		  ((ticker_status == TICKER_STATUS_FAILURE) &&
904 		   IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT)));
905 
906 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
907 	/* enable ticker job, queued ticker operation will be handled
908 	 * thereafter.
909 	 */
910 	mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1);
911 #endif
912 
913 	return;
914 
915 ull_scan_aux_rx_flush:
916 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
917 	if (sync && (scan->periodic.state != LL_SYNC_STATE_CREATED)) {
918 		scan->periodic.state = LL_SYNC_STATE_IDLE;
919 	}
920 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
921 
922 	if (aux) {
923 		/* Enqueue last rx in aux context if possible, otherwise send
924 		 * immediately since we are in sync context.
925 		 */
926 		if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || aux->rx_last) {
927 			LL_ASSERT(scan);
928 
929 			/* If scan is being disabled, rx could already be
930 			 * enqueued before coming here to ull_scan_aux_rx_flush.
931 			 * Check if rx not the last in the list of received PDUs
932 			 * then add it, else do not add it, to avoid duplicate
933 			 * report generation, release and probable infinite loop
934 			 * processing of the list.
935 			 */
936 			if (unlikely(scan->is_stop)) {
937 				/* Add the node rx to aux context list of node
938 				 * rx if not already added when coming here to
939 				 * ull_scan_aux_rx_flush. This is handling a
940 				 * race condition where in the last PDU in
941 				 * chain is received and at the same time scan
942 				 * is being disabled.
943 				 */
944 				if (aux->rx_last != rx) {
945 					aux->rx_last->rx_ftr.extra = rx;
946 					aux->rx_last = rx;
947 				}
948 
949 				return;
950 			}
951 
952 			aux->rx_last->rx_ftr.extra = rx;
953 			aux->rx_last = rx;
954 		} else {
955 			const struct ll_sync_set *sync_set;
956 
957 			LL_ASSERT(sync_lll);
958 
959 			ll_rx_put_sched(link, rx);
960 
961 			sync_set = HDR_LLL2ULL(sync_lll);
962 			if (unlikely(sync_set->is_stop && sync_lll->lll_aux)) {
963 				return;
964 			}
965 		}
966 
967 		LL_ASSERT(aux->parent);
968 
969 		flush_safe(aux);
970 
971 		return;
972 	}
973 
974 	ll_rx_put(link, rx);
975 
976 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && rx_incomplete) {
977 		rx_release_put(rx_incomplete);
978 	}
979 
980 	ll_rx_sched();
981 }
982 
ull_scan_aux_done(struct node_rx_event_done * done)983 void ull_scan_aux_done(struct node_rx_event_done *done)
984 {
985 	struct ll_scan_aux_set *aux;
986 
987 	/* Get reference to ULL context */
988 	aux = CONTAINER_OF(done->param, struct ll_scan_aux_set, ull);
989 
990 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) &&
991 	    !ull_scan_aux_is_valid_get(aux)) {
992 		struct ll_sync_set *sync;
993 
994 		sync = CONTAINER_OF(done->param, struct ll_sync_set, ull);
995 		LL_ASSERT(ull_sync_is_valid_get(sync));
996 
997 		/* Auxiliary context will be flushed by ull_scan_aux_stop() */
998 		if (unlikely(sync->is_stop) || !sync->lll.lll_aux) {
999 			return;
1000 		}
1001 
1002 		aux = HDR_LLL2ULL(sync->lll.lll_aux);
1003 		LL_ASSERT(aux->parent);
1004 	} else {
1005 		struct ll_scan_set *scan;
1006 		struct lll_scan *lll;
1007 
1008 		lll = aux->parent;
1009 		LL_ASSERT(lll);
1010 
1011 		scan = HDR_LLL2ULL(lll);
1012 		LL_ASSERT(ull_scan_is_valid_get(scan));
1013 
1014 		/* Auxiliary context will be flushed by ull_scan_aux_stop() */
1015 		if (unlikely(scan->is_stop)) {
1016 			return;
1017 		}
1018 	}
1019 
1020 	flush(aux);
1021 }
1022 
ull_scan_aux_set_get(uint8_t handle)1023 struct ll_scan_aux_set *ull_scan_aux_set_get(uint8_t handle)
1024 {
1025 	if (handle >= CONFIG_BT_CTLR_SCAN_AUX_SET) {
1026 		return NULL;
1027 	}
1028 
1029 	return &ll_scan_aux_pool[handle];
1030 }
1031 
ull_scan_aux_lll_handle_get(struct lll_scan_aux * lll)1032 uint8_t ull_scan_aux_lll_handle_get(struct lll_scan_aux *lll)
1033 {
1034 	struct ll_scan_aux_set *aux;
1035 
1036 	aux = HDR_LLL2ULL(lll);
1037 
1038 	return aux_handle_get(aux);
1039 }
1040 
ull_scan_aux_lll_parent_get(struct lll_scan_aux * lll,uint8_t * is_lll_scan)1041 void *ull_scan_aux_lll_parent_get(struct lll_scan_aux *lll,
1042 				  uint8_t *is_lll_scan)
1043 {
1044 	struct ll_scan_aux_set *aux;
1045 
1046 	aux = HDR_LLL2ULL(lll);
1047 
1048 	if (is_lll_scan) {
1049 		struct ll_scan_set *scan;
1050 		struct lll_scan *lllscan;
1051 
1052 		lllscan = aux->parent;
1053 		LL_ASSERT(lllscan);
1054 
1055 		scan = HDR_LLL2ULL(lllscan);
1056 		*is_lll_scan = !!ull_scan_is_valid_get(scan);
1057 	}
1058 
1059 	return aux->parent;
1060 }
1061 
ull_scan_aux_is_valid_get(struct ll_scan_aux_set * aux)1062 struct ll_scan_aux_set *ull_scan_aux_is_valid_get(struct ll_scan_aux_set *aux)
1063 {
1064 	if (((uint8_t *)aux < (uint8_t *)ll_scan_aux_pool) ||
1065 	    ((uint8_t *)aux > ((uint8_t *)ll_scan_aux_pool +
1066 			       (sizeof(struct ll_scan_aux_set) *
1067 				(CONFIG_BT_CTLR_SCAN_AUX_SET - 1))))) {
1068 		return NULL;
1069 	}
1070 
1071 	return aux;
1072 }
1073 
ull_scan_aux_lll_is_valid_get(struct lll_scan_aux * lll)1074 struct lll_scan_aux *ull_scan_aux_lll_is_valid_get(struct lll_scan_aux *lll)
1075 {
1076 	struct ll_scan_aux_set *aux;
1077 
1078 	aux = HDR_LLL2ULL(lll);
1079 	aux = ull_scan_aux_is_valid_get(aux);
1080 	if (aux) {
1081 		return &aux->lll;
1082 	}
1083 
1084 	return NULL;
1085 }
1086 
ull_scan_aux_release(memq_link_t * link,struct node_rx_pdu * rx)1087 void ull_scan_aux_release(memq_link_t *link, struct node_rx_pdu *rx)
1088 {
1089 	struct lll_scan_aux *lll_aux;
1090 	void *param_ull;
1091 
1092 	param_ull = HDR_LLL2ULL(rx->rx_ftr.param);
1093 
1094 	if (ull_scan_is_valid_get(param_ull)) {
1095 		struct lll_scan *lll;
1096 
1097 		/* Mark for buffer for release */
1098 		rx->hdr.type = NODE_RX_TYPE_RELEASE;
1099 
1100 		lll = rx->rx_ftr.param;
1101 		lll_aux = lll->lll_aux;
1102 
1103 	} else if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) ||
1104 		   ull_scan_aux_is_valid_get(param_ull)) {
1105 		/* Mark for buffer for release */
1106 		rx->hdr.type = NODE_RX_TYPE_RELEASE;
1107 
1108 		lll_aux = rx->rx_ftr.param;
1109 
1110 	} else if (ull_sync_is_valid_get(param_ull)) {
1111 		struct ll_sync_set *sync;
1112 		struct lll_sync *lll;
1113 
1114 		sync = param_ull;
1115 
1116 		/* reset data len total */
1117 		sync->data_len = 0U;
1118 
1119 		lll = rx->rx_ftr.param;
1120 		lll_aux = lll->lll_aux;
1121 
1122 		/* Change node type so HCI can dispatch report for truncated
1123 		 * data properly.
1124 		 */
1125 		rx->hdr.type = NODE_RX_TYPE_SYNC_REPORT;
1126 		rx->hdr.handle = ull_sync_handle_get(sync);
1127 
1128 		/* Dequeue will try releasing list of node rx, set the extra
1129 		 * pointer to NULL.
1130 		 */
1131 		rx->rx_ftr.extra = NULL;
1132 
1133 	} else {
1134 		LL_ASSERT(0);
1135 		lll_aux = NULL;
1136 	}
1137 
1138 	if (lll_aux) {
1139 		struct ll_scan_aux_set *aux;
1140 		struct ll_scan_set *scan;
1141 		struct lll_scan *lll;
1142 		uint8_t is_stop;
1143 
1144 		aux = HDR_LLL2ULL(lll_aux);
1145 		lll = aux->parent;
1146 		LL_ASSERT(lll);
1147 
1148 		scan = HDR_LLL2ULL(lll);
1149 		scan = ull_scan_is_valid_get(scan);
1150 		if (scan) {
1151 			is_stop = scan->is_stop;
1152 		} else {
1153 			struct lll_sync *sync_lll;
1154 			struct ll_sync_set *sync;
1155 
1156 			sync_lll = (void *)lll;
1157 			sync = HDR_LLL2ULL(sync_lll);
1158 			is_stop = sync->is_stop;
1159 		}
1160 
1161 		if (!is_stop) {
1162 			LL_ASSERT(aux->parent);
1163 
1164 			flush_safe(aux);
1165 
1166 		} else if (!scan) {
1167 			/* Sync terminate requested, enqueue node rx so that it
1168 			 * be flushed by ull_scan_aux_stop().
1169 			 */
1170 			rx->hdr.link = link;
1171 			if (aux->rx_last) {
1172 				aux->rx_last->rx_ftr.extra = rx;
1173 			} else {
1174 				aux->rx_head = rx;
1175 			}
1176 			aux->rx_last = rx;
1177 
1178 			return;
1179 		}
1180 	}
1181 
1182 	ll_rx_put(link, rx);
1183 	ll_rx_sched();
1184 }
1185 
ull_scan_aux_stop(struct ll_scan_aux_set * aux)1186 int ull_scan_aux_stop(struct ll_scan_aux_set *aux)
1187 {
1188 	static memq_link_t link;
1189 	static struct mayfly mfy = {0, 0, &link, NULL, NULL};
1190 	uint8_t aux_handle;
1191 	uint32_t ret;
1192 	int err;
1193 
1194 	/* Stop any ULL scheduling of auxiliary PDU scan */
1195 	aux_handle = aux_handle_get(aux);
1196 	err = ull_ticker_stop_with_mark(TICKER_ID_SCAN_AUX_BASE + aux_handle,
1197 					aux, &aux->lll);
1198 	if (err && (err != -EALREADY)) {
1199 		return err;
1200 	}
1201 
1202 	/* Abort LLL event if ULL scheduling not used or already in prepare */
1203 	if (err == -EALREADY) {
1204 		err = ull_disable(&aux->lll);
1205 		if (err && (err != -EALREADY)) {
1206 			return err;
1207 		}
1208 
1209 		mfy.fp = flush;
1210 
1211 	} else if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC)) {
1212 		/* ULL scan auxiliary PDU reception scheduling stopped
1213 		 * before prepare.
1214 		 */
1215 		mfy.fp = flush;
1216 
1217 	} else {
1218 		struct ll_scan_set *scan;
1219 		struct lll_scan *lll;
1220 
1221 		lll = aux->parent;
1222 		LL_ASSERT(lll);
1223 
1224 		scan = HDR_LLL2ULL(lll);
1225 		scan = ull_scan_is_valid_get(scan);
1226 		if (scan) {
1227 			/* ULL scan auxiliary PDU reception scheduling stopped
1228 			 * before prepare.
1229 			 */
1230 			mfy.fp = flush;
1231 		} else {
1232 			/* ULL sync chain reception scheduling stopped before
1233 			 * prepare.
1234 			 */
1235 			mfy.fp = aux_sync_incomplete;
1236 		}
1237 	}
1238 
1239 	/* Release auxiliary context in ULL execution context */
1240 	mfy.param = aux;
1241 	ret = mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_ULL_HIGH,
1242 			     0, &mfy);
1243 	LL_ASSERT(!ret);
1244 
1245 	return 0;
1246 }
1247 
init_reset(void)1248 static int init_reset(void)
1249 {
1250 	/* Initialize adv aux pool. */
1251 	mem_init(ll_scan_aux_pool, sizeof(struct ll_scan_aux_set),
1252 		 sizeof(ll_scan_aux_pool) / sizeof(struct ll_scan_aux_set),
1253 		 &scan_aux_free);
1254 
1255 	return 0;
1256 }
1257 
aux_acquire(void)1258 static inline struct ll_scan_aux_set *aux_acquire(void)
1259 {
1260 	return mem_acquire(&scan_aux_free);
1261 }
1262 
aux_release(struct ll_scan_aux_set * aux)1263 static inline void aux_release(struct ll_scan_aux_set *aux)
1264 {
1265 	/* Clear the parent so that when scan is being disabled then this
1266 	 * auxiliary context shall not associate itself from being disable.
1267 	 */
1268 	LL_ASSERT(aux->parent);
1269 	aux->parent = NULL;
1270 
1271 	mem_release(aux, &scan_aux_free);
1272 }
1273 
aux_handle_get(struct ll_scan_aux_set * aux)1274 static inline uint8_t aux_handle_get(struct ll_scan_aux_set *aux)
1275 {
1276 	return mem_index_get(aux, ll_scan_aux_pool,
1277 			     sizeof(struct ll_scan_aux_set));
1278 }
1279 
done_disabled_cb(void * param)1280 static void done_disabled_cb(void *param)
1281 {
1282 	struct ll_scan_aux_set *aux;
1283 
1284 	aux = param;
1285 	LL_ASSERT(aux->parent);
1286 
1287 	flush(aux);
1288 }
1289 
flush_safe(void * param)1290 static void flush_safe(void *param)
1291 {
1292 	struct ll_scan_aux_set *aux;
1293 	struct ull_hdr *hdr;
1294 	uint8_t ref;
1295 
1296 	aux = param;
1297 	LL_ASSERT(aux->parent);
1298 
1299 	/* ref == 0
1300 	 * All PDUs were scheduled from LLL and there is no pending done
1301 	 * event, we can flush here.
1302 	 *
1303 	 * ref == 1
1304 	 * There is pending done event so we need to flush from disabled
1305 	 * callback. Flushing here would release aux context and thus
1306 	 * ull_hdr before done event was processed.
1307 	 */
1308 	hdr = &aux->ull;
1309 	ref = ull_ref_get(hdr);
1310 	if (ref == 0U) {
1311 		flush(aux);
1312 	} else {
1313 		/* A specific single shot scheduled aux context
1314 		 * cannot overlap, i.e. ULL reference count
1315 		 * shall be less than 2.
1316 		 */
1317 		LL_ASSERT(ref < 2U);
1318 
1319 		LL_ASSERT(!hdr->disabled_cb);
1320 		hdr->disabled_param = aux;
1321 		hdr->disabled_cb = done_disabled_cb;
1322 	}
1323 }
1324 
flush(void * param)1325 static void flush(void *param)
1326 {
1327 	struct ll_scan_aux_set *aux;
1328 	struct ll_scan_set *scan;
1329 	struct node_rx_pdu *rx;
1330 	struct lll_scan *lll;
1331 	bool sched = false;
1332 
1333 	/* Debug check that parent was assigned when allocated for reception of
1334 	 * auxiliary channel PDUs.
1335 	 */
1336 	aux = param;
1337 	LL_ASSERT(aux->parent);
1338 
1339 	rx = aux->rx_head;
1340 	if (rx) {
1341 		aux->rx_head = NULL;
1342 
1343 		ll_rx_put(rx->hdr.link, rx);
1344 		sched = true;
1345 	}
1346 
1347 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1348 	rx = aux->rx_incomplete;
1349 	if (rx) {
1350 		aux->rx_incomplete = NULL;
1351 
1352 		rx_release_put(rx);
1353 		sched = true;
1354 	}
1355 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1356 
1357 	if (sched) {
1358 		ll_rx_sched();
1359 	}
1360 
1361 	lll = aux->parent;
1362 	scan = HDR_LLL2ULL(lll);
1363 	scan = ull_scan_is_valid_get(scan);
1364 	if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || scan) {
1365 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1366 		lll->scan_aux_score = aux->lll.hdr.score;
1367 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
1368 	}
1369 
1370 	aux_release(aux);
1371 }
1372 
aux_sync_partial(void * param)1373 static void aux_sync_partial(void *param)
1374 {
1375 	struct ll_scan_aux_set *aux;
1376 	struct node_rx_pdu *rx;
1377 
1378 	aux = param;
1379 	rx = aux->rx_head;
1380 	aux->rx_head = NULL;
1381 
1382 	LL_ASSERT(rx);
1383 	rx->rx_ftr.aux_sched = 1U;
1384 
1385 	ll_rx_put_sched(rx->hdr.link, rx);
1386 }
1387 
aux_sync_incomplete(void * param)1388 static void aux_sync_incomplete(void *param)
1389 {
1390 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1391 	struct ll_scan_aux_set *aux;
1392 
1393 	aux = param;
1394 	LL_ASSERT(aux->parent);
1395 
1396 	/* ULL scheduling succeeded hence no backup node rx present, use the
1397 	 * extra node rx reserved for incomplete data status generation.
1398 	 */
1399 	if (!aux->rx_head) {
1400 		struct ll_sync_set *sync;
1401 		struct node_rx_pdu *rx;
1402 		struct lll_sync *lll;
1403 
1404 		/* get reference to sync context */
1405 		lll = aux->parent;
1406 		LL_ASSERT(lll);
1407 		sync = HDR_LLL2ULL(lll);
1408 
1409 		/* reset data len total */
1410 		sync->data_len = 0U;
1411 
1412 		/* pick extra node rx stored in aux context */
1413 		rx = aux->rx_incomplete;
1414 		LL_ASSERT(rx);
1415 		aux->rx_incomplete = NULL;
1416 
1417 		/* prepare sync report with failure */
1418 		rx->hdr.type = NODE_RX_TYPE_SYNC_REPORT;
1419 		rx->hdr.handle = ull_sync_handle_get(sync);
1420 		rx->rx_ftr.param = lll;
1421 
1422 		/* flag chain reception failure */
1423 		rx->rx_ftr.aux_failed = 1U;
1424 
1425 		/* Dequeue will try releasing list of node rx,
1426 		 * set the extra pointer to NULL.
1427 		 */
1428 		rx->rx_ftr.extra = NULL;
1429 
1430 		/* add to rx list, will be flushed */
1431 		aux->rx_head = rx;
1432 	}
1433 
1434 	LL_ASSERT(!ull_ref_get(&aux->ull));
1435 
1436 	flush(aux);
1437 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1438 }
1439 
ticker_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)1440 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
1441 		      uint32_t remainder, uint16_t lazy, uint8_t force,
1442 		      void *param)
1443 {
1444 	static memq_link_t link;
1445 	static struct mayfly mfy = {0, 0, &link, NULL, lll_scan_aux_prepare};
1446 	struct ll_scan_aux_set *aux = param;
1447 	static struct lll_prepare_param p;
1448 	uint32_t ret;
1449 	uint8_t ref;
1450 
1451 	DEBUG_RADIO_PREPARE_O(1);
1452 
1453 	/* Increment prepare reference count */
1454 	ref = ull_ref_inc(&aux->ull);
1455 	LL_ASSERT(ref);
1456 
1457 	/* Append timing parameters */
1458 	p.ticks_at_expire = ticks_at_expire;
1459 	p.remainder = 0; /* FIXME: remainder; */
1460 	p.lazy = lazy;
1461 	p.force = force;
1462 	p.param = &aux->lll;
1463 	mfy.param = &p;
1464 
1465 	/* Kick LLL prepare */
1466 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
1467 			     0, &mfy);
1468 	LL_ASSERT(!ret);
1469 
1470 	DEBUG_RADIO_PREPARE_O(1);
1471 }
1472 
ticker_op_cb(uint32_t status,void * param)1473 static void ticker_op_cb(uint32_t status, void *param)
1474 {
1475 	static memq_link_t link;
1476 	static struct mayfly mfy = {0, 0, &link, NULL, NULL};
1477 	struct ll_sync_set *sync;
1478 	uint32_t ret;
1479 
1480 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC)) {
1481 		struct ll_scan_aux_set *aux;
1482 		struct lll_sync *sync_lll;
1483 
1484 		aux = param;
1485 		sync_lll = aux->parent;
1486 		LL_ASSERT(sync_lll);
1487 
1488 		sync = HDR_LLL2ULL(sync_lll);
1489 		sync = ull_sync_is_valid_get(sync);
1490 	} else {
1491 		sync = NULL;
1492 	}
1493 
1494 	if (status == TICKER_STATUS_SUCCESS) {
1495 		if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync) {
1496 			mfy.fp = aux_sync_partial;
1497 		} else {
1498 			return;
1499 		}
1500 	} else {
1501 		if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync) {
1502 			mfy.fp = aux_sync_incomplete;
1503 		} else {
1504 			struct ll_scan_aux_set *aux;
1505 
1506 			aux = param;
1507 			LL_ASSERT(aux->parent);
1508 
1509 			mfy.fp = flush_safe;
1510 		}
1511 	}
1512 
1513 	mfy.param = param;
1514 
1515 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW, TICKER_USER_ID_ULL_HIGH,
1516 			     0, &mfy);
1517 	LL_ASSERT(!ret);
1518 }
1519 
1520 #else /* CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
1521 
ull_scan_aux_setup(memq_link_t * link,struct node_rx_pdu * rx)1522 void ull_scan_aux_setup(memq_link_t *link, struct node_rx_pdu *rx)
1523 {
1524 	struct node_rx_pdu *rx_incomplete;
1525 	struct ll_sync_iso_set *sync_iso;
1526 	struct ll_scan_aux_chain *chain;
1527 	struct pdu_adv_aux_ptr *aux_ptr;
1528 	struct pdu_adv_com_ext_adv *p;
1529 	struct lll_scan_aux *lll_aux;
1530 	uint32_t window_widening_us;
1531 	uint32_t ticks_aux_offset;
1532 	struct pdu_adv_ext_hdr *h;
1533 	struct lll_sync *sync_lll;
1534 	struct ll_scan_set *scan;
1535 	struct ll_sync_set *sync;
1536 	struct pdu_adv_adi *adi;
1537 	struct node_rx_ftr *ftr;
1538 	uint32_t ready_delay_us;
1539 	uint16_t window_size_us;
1540 	uint32_t aux_offset_us;
1541 	struct lll_scan *lll;
1542 	struct pdu_adv *pdu;
1543 	uint8_t hdr_buf_len;
1544 	bool is_scan_req;
1545 	uint8_t acad_len;
1546 	uint8_t data_len;
1547 	uint8_t hdr_len;
1548 	uint32_t pdu_us;
1549 	uint8_t phy_aux;
1550 	uint8_t *ptr;
1551 	uint8_t phy;
1552 
1553 	is_scan_req = false;
1554 	ftr = &rx->rx_ftr;
1555 
1556 	switch (rx->hdr.type) {
1557 	case NODE_RX_TYPE_EXT_1M_REPORT:
1558 		lll_aux = NULL;
1559 		chain = NULL;
1560 		sync_lll = NULL;
1561 		sync_iso = NULL;
1562 		rx_incomplete = NULL;
1563 
1564 		lll = ftr->param;
1565 		LL_ASSERT(!lll->lll_aux);
1566 
1567 		scan = HDR_LLL2ULL(lll);
1568 		sync = sync_create_get(scan);
1569 		phy = BT_HCI_LE_EXT_SCAN_PHY_1M;
1570 		break;
1571 
1572 #if defined(CONFIG_BT_CTLR_PHY_CODED)
1573 	case NODE_RX_TYPE_EXT_CODED_REPORT:
1574 		lll_aux = NULL;
1575 		chain = NULL;
1576 		sync_lll = NULL;
1577 		sync_iso = NULL;
1578 		rx_incomplete = NULL;
1579 
1580 		lll = ftr->param;
1581 		LL_ASSERT(!lll->lll_aux);
1582 
1583 		scan = HDR_LLL2ULL(lll);
1584 		sync = sync_create_get(scan);
1585 		phy = BT_HCI_LE_EXT_SCAN_PHY_CODED;
1586 		break;
1587 #endif /* CONFIG_BT_CTLR_PHY_CODED */
1588 
1589 	case NODE_RX_TYPE_EXT_AUX_REPORT:
1590 		sync_iso = NULL;
1591 		rx_incomplete = NULL;
1592 		if (lll_scan_aux_chain_is_valid_get(ftr->param)) {
1593 			sync_lll = NULL;
1594 
1595 			/* Node has valid chain context so its scan was scheduled
1596 			 * from ULL.
1597 			 */
1598 			lll_aux = ftr->param;
1599 			chain = CONTAINER_OF(lll_aux, struct ll_scan_aux_chain, lll);
1600 
1601 			/* chain parent will be NULL for periodic sync */
1602 			lll = chain->parent;
1603 			LL_ASSERT(lll);
1604 
1605 		} else if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) ||
1606 			   ull_scan_is_valid_get(HDR_LLL2ULL(ftr->param))) {
1607 			sync_lll = NULL;
1608 
1609 			/* Node that does not have valid chain context but has
1610 			 * valid scan set was scheduled from LLL. We can
1611 			 * retrieve chain context from lll_scan as it was stored
1612 			 * there when superior PDU was handled.
1613 			 */
1614 			lll = ftr->param;
1615 
1616 			lll_aux = lll->lll_aux;
1617 			LL_ASSERT(lll_aux);
1618 
1619 			chain = CONTAINER_OF(lll_aux, struct ll_scan_aux_chain, lll);
1620 			LL_ASSERT(lll == chain->parent);
1621 
1622 		} else {
1623 			lll = NULL;
1624 
1625 			/* If none of the above, node is part of sync scanning
1626 			 */
1627 			sync_lll = ftr->param;
1628 
1629 			lll_aux = sync_lll->lll_aux;
1630 			LL_ASSERT(lll_aux);
1631 
1632 			chain = CONTAINER_OF(lll_aux, struct ll_scan_aux_chain, lll);
1633 			LL_ASSERT(sync_lll == chain->parent);
1634 		}
1635 
1636 		if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || lll) {
1637 			scan = HDR_LLL2ULL(lll);
1638 			sync = (void *)scan;
1639 			scan = ull_scan_is_valid_get(scan);
1640 			if (scan) {
1641 				sync = NULL;
1642 			}
1643 		} else {
1644 			scan = NULL;
1645 			sync = HDR_LLL2ULL(sync_lll);
1646 		}
1647 
1648 		phy = lll_aux->phy;
1649 		if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || scan) {
1650 			/* Here we are scanner context */
1651 			sync = sync_create_get(scan);
1652 
1653 			/* Generate report based on PHY scanned */
1654 			switch (phy) {
1655 			case PHY_1M:
1656 				rx->hdr.type = NODE_RX_TYPE_EXT_1M_REPORT;
1657 				break;
1658 			case PHY_2M:
1659 				rx->hdr.type = NODE_RX_TYPE_EXT_2M_REPORT;
1660 				break;
1661 #if defined(CONFIG_BT_CTLR_PHY_CODED)
1662 			case PHY_CODED:
1663 				rx->hdr.type = NODE_RX_TYPE_EXT_CODED_REPORT;
1664 				break;
1665 #endif /* CONFIG_BT_CTLR_PHY_CODED */
1666 			default:
1667 				LL_ASSERT(0);
1668 				return;
1669 			}
1670 
1671 			/* Backup scan requested flag as it is in union with
1672 			 * `extra` struct member which will be set to NULL
1673 			 * in subsequent code.
1674 			 */
1675 			is_scan_req = !!ftr->scan_req;
1676 
1677 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1678 		} else {
1679 			/* Here we are periodic sync context */
1680 			rx->hdr.type = NODE_RX_TYPE_SYNC_REPORT;
1681 			rx->hdr.handle = ull_sync_handle_get(sync);
1682 
1683 			/* Check if we need to create BIG sync */
1684 			sync_iso = sync_iso_create_get(sync);
1685 
1686 			/* lll_aux and aux are auxiliary channel context,
1687 			 * reuse the existing aux context to scan the chain.
1688 			 * hence lll_aux and aux are not released or set to NULL.
1689 			 */
1690 			sync = NULL;
1691 		}
1692 		break;
1693 
1694 	case NODE_RX_TYPE_SYNC_REPORT:
1695 		{
1696 			struct ll_sync_set *ull_sync;
1697 
1698 			/* set the sync handle corresponding to the LLL context
1699 			 * passed in the node rx footer field.
1700 			 */
1701 			sync_lll = ftr->param;
1702 			LL_ASSERT(!sync_lll->lll_aux);
1703 
1704 			ull_sync = HDR_LLL2ULL(sync_lll);
1705 			rx->hdr.handle = ull_sync_handle_get(ull_sync);
1706 
1707 			/* Check if we need to create BIG sync */
1708 			sync_iso = sync_iso_create_get(ull_sync);
1709 
1710 			/* FIXME: we will need lll_scan if chain was scheduled
1711 			 *        from LLL; should we store lll_scan_set in
1712 			 *        sync_lll instead?
1713 			 */
1714 			lll = NULL;
1715 			lll_aux = NULL;
1716 			chain = NULL;
1717 			scan = NULL;
1718 			sync = NULL;
1719 			phy =  sync_lll->phy;
1720 
1721 			/* backup extra node_rx supplied for generating
1722 			 * incomplete report
1723 			 */
1724 			rx_incomplete = ftr->extra;
1725 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1726 
1727 		}
1728 		break;
1729 	default:
1730 		LL_ASSERT(0);
1731 		return;
1732 	}
1733 
1734 	rx->hdr.link = link;
1735 	ftr->extra = NULL;
1736 
1737 	ftr->aux_sched = 0U;
1738 
1739 	if (chain) {
1740 		chain->aux_sched = 0U;
1741 
1742 		if (!is_scan_req) {
1743 			/* Remove chain from active list */
1744 			chain_remove_from_list(&scan_aux_set.active_chains, chain);
1745 
1746 			/* Reset LLL scheduled flag */
1747 			chain->is_lll_sched = 0U;
1748 		}
1749 	}
1750 
1751 	pdu = (void *)rx->pdu;
1752 	p = (void *)&pdu->adv_ext_ind;
1753 	if (!pdu->len || !p->ext_hdr_len) {
1754 		if (pdu->len) {
1755 			data_len = pdu->len - PDU_AC_EXT_HEADER_SIZE_MIN;
1756 		} else {
1757 			data_len = 0U;
1758 		}
1759 
1760 		if (chain) {
1761 			chain->data_len += data_len;
1762 			ftr->aux_data_len = chain->data_len;
1763 		} else {
1764 			ftr->aux_data_len = data_len;
1765 		}
1766 
1767 		goto ull_scan_aux_rx_flush;
1768 	}
1769 
1770 	h = (void *)p->ext_hdr_adv_data;
1771 
1772 	/* Note: The extended header contains a RFU flag that could potentially cause incorrect
1773 	 * calculation of offset to ACAD field if it gets used to add a new header field; However,
1774 	 * from discussion in BT errata ES-8080 it seems clear that BT SIG is aware that the RFU
1775 	 * bit can not be used to add a new field since existing implementations will not be able
1776 	 * to calculate the start of ACAD in that case
1777 	 */
1778 
1779 	ptr = h->data;
1780 
1781 	if (h->adv_addr) {
1782 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1783 		/* Check if Periodic Advertising Synchronization to be created
1784 		 */
1785 		if (sync && (scan->periodic.state != LL_SYNC_STATE_CREATED)) {
1786 			/* Check address and update internal state */
1787 #if defined(CONFIG_BT_CTLR_PRIVACY)
1788 			ull_sync_setup_addr_check(sync, scan, pdu->tx_addr, ptr,
1789 						  ftr->rl_idx);
1790 #else /* !CONFIG_BT_CTLR_PRIVACY */
1791 			ull_sync_setup_addr_check(sync, scan, pdu->tx_addr, ptr, 0U);
1792 #endif /* !CONFIG_BT_CTLR_PRIVACY */
1793 
1794 		}
1795 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1796 
1797 		ptr += BDADDR_SIZE;
1798 	}
1799 
1800 	if (h->tgt_addr) {
1801 		ptr += BDADDR_SIZE;
1802 	}
1803 
1804 	if (h->cte_info) {
1805 		ptr += sizeof(struct pdu_cte_info);
1806 	}
1807 
1808 	adi = NULL;
1809 	if (h->adi) {
1810 		adi = (void *)ptr;
1811 		ptr += sizeof(*adi);
1812 	}
1813 
1814 	aux_ptr = NULL;
1815 	if (h->aux_ptr) {
1816 		aux_ptr = (void *)ptr;
1817 		ptr += sizeof(*aux_ptr);
1818 	}
1819 
1820 	if (h->sync_info) {
1821 		struct pdu_adv_sync_info *si;
1822 
1823 		si = (void *)ptr;
1824 		ptr += sizeof(*si);
1825 
1826 		/* Check if Periodic Advertising Synchronization to be created.
1827 		 * Setup synchronization if address and SID match in the
1828 		 * Periodic Advertiser List or with the explicitly supplied.
1829 		 */
1830 		if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && chain && sync && adi &&
1831 		    ull_sync_setup_sid_match(sync, scan, PDU_ADV_ADI_SID_GET(adi))) {
1832 			ull_sync_setup(scan, chain->lll.phy, rx, si);
1833 		}
1834 	}
1835 
1836 	if (h->tx_pwr) {
1837 		ptr++;
1838 	}
1839 
1840 	/* Calculate ACAD Len */
1841 	hdr_len = ptr - (uint8_t *)p;
1842 	hdr_buf_len = PDU_AC_EXT_HEADER_SIZE_MIN + p->ext_hdr_len;
1843 	if (hdr_len > hdr_buf_len) {
1844 		/* FIXME: Handle invalid header length */
1845 		acad_len = 0U;
1846 	} else {
1847 		acad_len = hdr_buf_len - hdr_len;
1848 		hdr_len += acad_len;
1849 	}
1850 
1851 	/* calculate and set total data length */
1852 	if (hdr_len < pdu->len) {
1853 		data_len = pdu->len - hdr_len;
1854 	} else {
1855 		data_len = 0U;
1856 	}
1857 
1858 	if (chain) {
1859 		chain->data_len += data_len;
1860 		ftr->aux_data_len = chain->data_len;
1861 	} else {
1862 		ftr->aux_data_len = data_len;
1863 	}
1864 
1865 	/* Periodic Advertising Channel Map Indication and/or Broadcast ISO
1866 	 * synchronization
1867 	 */
1868 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) &&
1869 	    (rx->hdr.type == NODE_RX_TYPE_SYNC_REPORT) &&
1870 	    acad_len) {
1871 		/* Periodic Advertising Channel Map Indication */
1872 		ull_sync_chm_update(rx->hdr.handle, ptr, acad_len);
1873 
1874 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1875 		struct ll_sync_set *sync_set;
1876 		struct pdu_big_info *bi;
1877 		uint8_t bi_size;
1878 
1879 		sync_set = HDR_LLL2ULL(sync_lll);
1880 
1881 		/* Provide encryption information for BIG sync creation */
1882 		bi_size = ptr[PDU_ADV_DATA_HEADER_LEN_OFFSET] -
1883 			  PDU_ADV_DATA_HEADER_TYPE_SIZE;
1884 		sync_set->enc = (bi_size == PDU_BIG_INFO_ENCRYPTED_SIZE);
1885 
1886 		/* Store number of BISes in the BIG */
1887 		bi = (void *)&ptr[PDU_ADV_DATA_HEADER_DATA_OFFSET];
1888 		sync_set->num_bis = PDU_BIG_INFO_NUM_BIS_GET(bi);
1889 
1890 		/* Broadcast ISO synchronize */
1891 		if (sync_iso) {
1892 			ull_sync_iso_setup(sync_iso, rx, ptr, acad_len);
1893 		}
1894 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1895 	}
1896 
1897 	/* Do not ULL schedule auxiliary PDU reception if no aux pointer
1898 	 * or aux pointer is zero or scannable advertising has erroneous aux
1899 	 * pointer being present or PHY in the aux pointer is invalid or unsupported
1900 	 * or if scanning and scan has been stopped
1901 	 */
1902 	if (!aux_ptr || !PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr) || is_scan_req ||
1903 	    (PDU_ADV_AUX_PTR_PHY_GET(aux_ptr) > EXT_ADV_AUX_PHY_LE_CODED) ||
1904 		(!IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
1905 		  PDU_ADV_AUX_PTR_PHY_GET(aux_ptr) == EXT_ADV_AUX_PHY_LE_CODED)) {
1906 
1907 		if (is_scan_req) {
1908 			LL_ASSERT(chain && chain->rx_last);
1909 
1910 			chain->rx_last->rx_ftr.extra = rx;
1911 			chain->rx_last = rx;
1912 
1913 			return;
1914 		}
1915 
1916 		goto ull_scan_aux_rx_flush;
1917 	}
1918 
1919 	/* Determine the window size */
1920 	if (aux_ptr->offs_units) {
1921 		window_size_us = OFFS_UNIT_300_US;
1922 	} else {
1923 		window_size_us = OFFS_UNIT_30_US;
1924 	}
1925 
1926 	/* Calculate received aux offset we need to have ULL schedule a reception */
1927 	aux_offset_us = (uint32_t)PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr) * window_size_us;
1928 
1929 	/* Skip reception if invalid aux offset */
1930 	pdu_us = PDU_AC_US(pdu->len, phy, ftr->phy_flags);
1931 	if (unlikely(!AUX_OFFSET_IS_VALID(aux_offset_us, window_size_us, pdu_us))) {
1932 		goto ull_scan_aux_rx_flush;
1933 	}
1934 
1935 	/* CA field contains the clock accuracy of the advertiser;
1936 	 * 0 - 51 ppm to 500 ppm
1937 	 * 1 - 0 ppm to 50 ppm
1938 	 */
1939 	if (aux_ptr->ca) {
1940 		window_widening_us = SCA_DRIFT_50_PPM_US(aux_offset_us);
1941 	} else {
1942 		window_widening_us = SCA_DRIFT_500_PPM_US(aux_offset_us);
1943 	}
1944 
1945 	phy_aux = BIT(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
1946 	ready_delay_us = lll_radio_rx_ready_delay_get(phy_aux, PHY_FLAGS_S8);
1947 
1948 	/* Calculate the aux offset from start of the scan window */
1949 	aux_offset_us += ftr->radio_end_us;
1950 	aux_offset_us -= pdu_us;
1951 	aux_offset_us -= EVENT_TICKER_RES_MARGIN_US;
1952 	aux_offset_us -= EVENT_JITTER_US;
1953 	aux_offset_us -= ready_delay_us;
1954 	aux_offset_us -= window_widening_us;
1955 
1956 	ticks_aux_offset = HAL_TICKER_US_TO_TICKS(aux_offset_us);
1957 
1958 	/* Check if too late to ULL schedule an auxiliary PDU reception */
1959 	if (!ftr->aux_lll_sched) {
1960 		uint32_t ticks_at_expire;
1961 		uint32_t overhead_us;
1962 		uint32_t ticks_now;
1963 		uint32_t diff;
1964 
1965 		/* CPU execution overhead to setup the radio for reception plus the
1966 		 * minimum prepare tick offset. And allow one additional event in
1967 		 * between as overhead (say, an advertising event in between got closed
1968 		 * when reception for auxiliary PDU is being setup).
1969 		 */
1970 		overhead_us = (EVENT_OVERHEAD_END_US + EVENT_OVERHEAD_START_US +
1971 			       HAL_TICKER_TICKS_TO_US(HAL_TICKER_CNTR_CMP_OFFSET_MIN)) << 1;
1972 
1973 		ticks_now = ticker_ticks_now_get();
1974 		ticks_at_expire = ftr->ticks_anchor + ticks_aux_offset -
1975 				  HAL_TICKER_US_TO_TICKS(overhead_us);
1976 		diff = ticker_ticks_diff_get(ticks_now, ticks_at_expire);
1977 		if ((diff & BIT(HAL_TICKER_CNTR_MSBIT)) == 0U) {
1978 			goto ull_scan_aux_rx_flush;
1979 		}
1980 	}
1981 
1982 	if (!chain) {
1983 		chain = aux_chain_acquire();
1984 		if (!chain) {
1985 			/* As LLL scheduling has been used and will fail due to
1986 			 * non-allocation of a new chain context, a sync report with
1987 			 * aux_failed flag set will be generated. Let the
1988 			 * current sync report be set as partial, and the
1989 			 * sync report corresponding to ull_scan_aux_release
1990 			 * have the incomplete data status.
1991 			 */
1992 			if (ftr->aux_lll_sched) {
1993 				ftr->aux_sched = 1U;
1994 			}
1995 
1996 			goto ull_scan_aux_rx_flush;
1997 		}
1998 
1999 		chain->rx_head = chain->rx_last = NULL;
2000 		chain->data_len = data_len;
2001 		chain->is_lll_sched = ftr->aux_lll_sched;
2002 		lll_aux = &chain->lll;
2003 		lll_aux->is_chain_sched = 0U;
2004 
2005 		lll_hdr_init(lll_aux, &scan_aux_set);
2006 
2007 		chain->parent = lll ? (void *)lll : (void *)sync_lll;
2008 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2009 		if (lll) {
2010 			lll_aux->hdr.score = lll->scan_aux_score;
2011 		}
2012 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
2013 
2014 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2015 		if (sync_lll) {
2016 			struct ll_sync_set *sync_set = HDR_LLL2ULL(sync_lll);
2017 
2018 			sync_set->rx_incomplete = rx_incomplete;
2019 			rx_incomplete = NULL;
2020 		}
2021 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2022 
2023 		/* See if this was already scheduled from LLL. If so, store aux context
2024 		 * in global scan/sync struct so we can pick it when scanned node is received
2025 		 * with a valid context.
2026 		 */
2027 		if (ftr->aux_lll_sched) {
2028 
2029 			if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll) {
2030 				sync_lll->lll_aux = lll_aux;
2031 			} else {
2032 				lll->lll_aux = lll_aux;
2033 			}
2034 
2035 			/* Reset auxiliary channel PDU scan state which otherwise is
2036 			 * done in the prepare_cb when ULL scheduling is used.
2037 			 */
2038 			lll_aux->state = 0U;
2039 		}
2040 	} else if (chain->data_len >= CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX) {
2041 
2042 		/* Flush auxiliary PDU receptions and stop any more ULL
2043 		 * scheduling if accumulated data length exceeds configured
2044 		 * maximum supported.
2045 		 */
2046 
2047 		/* If LLL has already scheduled, then let it proceed.
2048 		 *
2049 		 * TODO: LLL to check accumulated data length and
2050 		 *       stop further reception.
2051 		 *       Currently LLL will schedule as long as there
2052 		 *       are free node rx available.
2053 		 */
2054 		if (!ftr->aux_lll_sched) {
2055 			goto ull_scan_aux_rx_flush;
2056 		}
2057 	}
2058 
2059 	/* In sync context we can dispatch rx immediately, in scan context we
2060 	 * enqueue rx in aux context and will flush them after scan is complete.
2061 	 */
2062 	if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || scan) {
2063 		if (chain->rx_last) {
2064 			chain->rx_last->rx_ftr.extra = rx;
2065 		} else {
2066 			chain->rx_head = rx;
2067 		}
2068 		chain->rx_last = rx;
2069 	}
2070 
2071 	/* Initialize the channel index and PHY for the Auxiliary PDU reception.
2072 	 */
2073 	lll_aux->chan = aux_ptr->chan_idx;
2074 	lll_aux->phy = phy_aux;
2075 
2076 	if (ftr->aux_lll_sched) {
2077 		/* AUX_ADV_IND/AUX_CHAIN_IND PDU reception is being setup */
2078 		ftr->aux_sched = 1U;
2079 		chain->aux_sched = 1U;
2080 
2081 		chain->next = scan_aux_set.active_chains;
2082 		scan_aux_set.active_chains = chain;
2083 
2084 		if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll) {
2085 			/* In sync context, dispatch immediately */
2086 			ll_rx_put_sched(link, rx);
2087 		}
2088 
2089 		return;
2090 	}
2091 
2092 	/* Switching to ULL scheduling to receive auxiliary PDUs */
2093 	if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || lll) {
2094 		LL_ASSERT(scan);
2095 
2096 		/* Do not ULL schedule if scan disable requested */
2097 		if (unlikely(scan->is_stop)) {
2098 			goto ull_scan_aux_rx_flush;
2099 		}
2100 
2101 		/* Remove auxiliary context association with scan context so
2102 		 * that LLL can differentiate it to being ULL scheduling.
2103 		 */
2104 		if (lll->lll_aux == &chain->lll) {
2105 			lll->lll_aux = NULL;
2106 		}
2107 	} else {
2108 		struct ll_sync_set *sync_set;
2109 
2110 		LL_ASSERT(sync_lll &&
2111 			  (!sync_lll->lll_aux || sync_lll->lll_aux == lll_aux));
2112 
2113 		/* Do not ULL schedule if sync terminate requested */
2114 		sync_set = HDR_LLL2ULL(sync_lll);
2115 		if (unlikely(sync_set->is_stop)) {
2116 			goto ull_scan_aux_rx_flush;
2117 		}
2118 
2119 		/* Associate the auxiliary context with sync context, we do this
2120 		 * for ULL scheduling also in constrast to how extended
2121 		 * advertising only associates when LLL scheduling is used.
2122 		 * Each Periodic Advertising chain is received by unique sync
2123 		 * context, hence LLL and ULL scheduling is always associated
2124 		 * with same unique sync context.
2125 		 */
2126 		sync_lll->lll_aux = lll_aux;
2127 
2128 	}
2129 
2130 	lll_aux->window_size_us = window_size_us;
2131 	lll_aux->window_size_us += ((EVENT_TICKER_RES_MARGIN_US + EVENT_JITTER_US +
2132 				     window_widening_us) << 1);
2133 
2134 	chain->ticker_ticks = (ftr->ticks_anchor + ticks_aux_offset) & HAL_TICKER_CNTR_MASK;
2135 
2136 	if (!chain_insert_in_sched_list(chain)) {
2137 		/* Failed to add chain - flush */
2138 		goto ull_scan_aux_rx_flush;
2139 	}
2140 
2141 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2142 	if (sync_lll) {
2143 		/* In sync context, dispatch immediately */
2144 		rx->rx_ftr.aux_sched = 1U;
2145 		chain->aux_sched = 1U;
2146 		ll_rx_put_sched(link, rx);
2147 	}
2148 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2149 
2150 	return;
2151 
2152 ull_scan_aux_rx_flush:
2153 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2154 	if (sync && (scan->periodic.state != LL_SYNC_STATE_CREATED)) {
2155 		scan->periodic.state = LL_SYNC_STATE_IDLE;
2156 	}
2157 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2158 
2159 	if (chain) {
2160 		/* Enqueue last rx in chain context if possible, otherwise send
2161 		 * immediately since we are in sync context.
2162 		 */
2163 		if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || chain->rx_last) {
2164 			LL_ASSERT(scan);
2165 
2166 			/* rx could already be enqueued before coming here -
2167 			 * check if rx not the last in the list of received PDUs
2168 			 * then add it, else do not add it, to avoid duplicate
2169 			 * report generation, release and probable infinite loop
2170 			 * processing of the list.
2171 			 */
2172 			if (chain->rx_last != rx) {
2173 				chain->rx_last->rx_ftr.extra = rx;
2174 				chain->rx_last = rx;
2175 			}
2176 		} else {
2177 			LL_ASSERT(sync_lll);
2178 
2179 			ll_rx_put_sched(link, rx);
2180 		}
2181 
2182 		LL_ASSERT(chain->parent);
2183 
2184 		flush_safe(chain);
2185 
2186 		return;
2187 	}
2188 
2189 	ll_rx_put(link, rx);
2190 
2191 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && rx_incomplete) {
2192 		rx_release_put(rx_incomplete);
2193 	}
2194 
2195 	ll_rx_sched();
2196 }
2197 
ull_scan_aux_done(struct node_rx_event_done * done)2198 void ull_scan_aux_done(struct node_rx_event_done *done)
2199 {
2200 	struct ll_scan_aux_chain *chain;
2201 
2202 	/* Get reference to chain */
2203 	chain = CONTAINER_OF(done->extra.lll, struct ll_scan_aux_chain, lll);
2204 	LL_ASSERT(scan_aux_chain_is_valid_get(chain));
2205 
2206 	/* Remove chain from active list */
2207 	chain_remove_from_list(&scan_aux_set.active_chains, chain);
2208 
2209 	flush(chain);
2210 }
2211 
lll_scan_aux_chain_is_valid_get(struct lll_scan_aux * lll)2212 struct ll_scan_aux_chain *lll_scan_aux_chain_is_valid_get(struct lll_scan_aux *lll)
2213 {
2214 	return scan_aux_chain_is_valid_get(CONTAINER_OF(lll, struct ll_scan_aux_chain, lll));
2215 }
2216 
ull_scan_aux_lll_parent_get(struct lll_scan_aux * lll,uint8_t * is_lll_scan)2217 void *ull_scan_aux_lll_parent_get(struct lll_scan_aux *lll,
2218 				  uint8_t *is_lll_scan)
2219 {
2220 	struct ll_scan_aux_chain *chain;
2221 
2222 	chain = CONTAINER_OF(lll, struct ll_scan_aux_chain, lll);
2223 
2224 	if (is_lll_scan) {
2225 		struct ll_scan_set *scan;
2226 		struct lll_scan *lllscan;
2227 
2228 		lllscan = chain->parent;
2229 		LL_ASSERT(lllscan);
2230 
2231 		scan = HDR_LLL2ULL(lllscan);
2232 		*is_lll_scan = !!ull_scan_is_valid_get(scan);
2233 	}
2234 
2235 	return chain->parent;
2236 }
2237 
scan_aux_chain_is_valid_get(struct ll_scan_aux_chain * chain)2238 struct ll_scan_aux_chain *scan_aux_chain_is_valid_get(struct ll_scan_aux_chain *chain)
2239 {
2240 	if (((uint8_t *)chain < (uint8_t *)ll_scan_aux_pool) ||
2241 	    ((uint8_t *)chain > ((uint8_t *)ll_scan_aux_pool +
2242 			       (sizeof(struct ll_scan_aux_chain) *
2243 				(CONFIG_BT_CTLR_SCAN_AUX_CHAIN_COUNT - 1))))) {
2244 		return NULL;
2245 	}
2246 
2247 	return chain;
2248 }
2249 
ull_scan_aux_lll_is_valid_get(struct lll_scan_aux * lll)2250 struct lll_scan_aux *ull_scan_aux_lll_is_valid_get(struct lll_scan_aux *lll)
2251 {
2252 	struct ll_scan_aux_chain *chain;
2253 
2254 	chain = CONTAINER_OF(lll, struct ll_scan_aux_chain, lll);
2255 	chain = scan_aux_chain_is_valid_get(chain);
2256 	if (chain) {
2257 		return &chain->lll;
2258 	}
2259 
2260 	return NULL;
2261 }
2262 
ull_scan_aux_release(memq_link_t * link,struct node_rx_pdu * rx)2263 void ull_scan_aux_release(memq_link_t *link, struct node_rx_pdu *rx)
2264 {
2265 	struct lll_scan_aux *lll_aux;
2266 	void *param_ull;
2267 
2268 	param_ull = HDR_LLL2ULL(rx->rx_ftr.param);
2269 
2270 	/* Mark for buffer for release */
2271 	rx->hdr.type = NODE_RX_TYPE_RELEASE;
2272 
2273 	if (ull_scan_is_valid_get(param_ull)) {
2274 		struct lll_scan *lll;
2275 
2276 		lll = rx->rx_ftr.param;
2277 		lll_aux = lll->lll_aux;
2278 
2279 	} else if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) ||
2280 		   param_ull == &scan_aux_set) {
2281 
2282 		lll_aux = rx->rx_ftr.param;
2283 
2284 	} else if (ull_sync_is_valid_get(param_ull)) {
2285 		struct lll_sync *lll;
2286 
2287 		lll = rx->rx_ftr.param;
2288 		lll_aux = lll->lll_aux;
2289 
2290 		if (!lll_aux) {
2291 			struct ll_sync_set *sync = param_ull;
2292 
2293 			/* Change node type so HCI can dispatch report for truncated
2294 			 * data properly.
2295 			 */
2296 			rx->hdr.type = NODE_RX_TYPE_SYNC_REPORT;
2297 			rx->hdr.handle = ull_sync_handle_get(sync);
2298 
2299 			/* Dequeue will try releasing list of node rx, set the extra
2300 			 * pointer to NULL.
2301 			 */
2302 			rx->rx_ftr.extra = NULL;
2303 		}
2304 	} else {
2305 		LL_ASSERT(0);
2306 		lll_aux = NULL;
2307 	}
2308 
2309 	if (lll_aux) {
2310 		struct ll_scan_aux_chain *chain;
2311 		struct ll_scan_set *scan;
2312 		struct lll_scan *lll;
2313 		uint8_t is_stop;
2314 
2315 		chain = CONTAINER_OF(lll_aux, struct ll_scan_aux_chain, lll);
2316 		lll = chain->parent;
2317 		LL_ASSERT(lll);
2318 
2319 		scan = HDR_LLL2ULL(lll);
2320 		scan = ull_scan_is_valid_get(scan);
2321 		if (scan) {
2322 			is_stop = scan->is_stop;
2323 		} else {
2324 			struct lll_sync *sync_lll;
2325 			struct ll_sync_set *sync;
2326 
2327 			sync_lll = (void *)lll;
2328 			sync = HDR_LLL2ULL(sync_lll);
2329 			is_stop = sync->is_stop;
2330 		}
2331 
2332 		if (!is_stop) {
2333 			LL_ASSERT(chain->parent);
2334 
2335 			/* Remove chain from active list and flush */
2336 			chain_remove_from_list(&scan_aux_set.active_chains, chain);
2337 			flush(chain);
2338 		}
2339 	}
2340 
2341 	ll_rx_put(link, rx);
2342 	ll_rx_sched();
2343 }
2344 
scan_aux_stop_all_chains_for_parent(void * parent)2345 static void scan_aux_stop_all_chains_for_parent(void *parent)
2346 {
2347 	static memq_link_t link;
2348 	static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
2349 	struct ll_scan_aux_chain *curr = scan_aux_set.sched_chains;
2350 	struct ll_scan_aux_chain *prev = NULL;
2351 	bool ticker_stopped = false;
2352 	bool disabling = false;
2353 
2354 	if (curr && curr->parent == parent) {
2355 		uint8_t ticker_status;
2356 
2357 		/* Scheduled head is about to be removed - stop running ticker */
2358 		ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
2359 				    TICKER_ID_SCAN_AUX, NULL, NULL);
2360 		LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2361 			  (ticker_status == TICKER_STATUS_BUSY));
2362 		ticker_stopped = true;
2363 	}
2364 
2365 	while (curr) {
2366 		if (curr->parent == parent) {
2367 			if (curr == scan_aux_set.sched_chains) {
2368 				scan_aux_set.sched_chains = curr->next;
2369 				flush(curr);
2370 				curr = scan_aux_set.sched_chains;
2371 			} else {
2372 				prev->next = curr->next;
2373 				flush(curr);
2374 				curr = prev->next;
2375 			}
2376 		} else {
2377 			prev = curr;
2378 			curr = curr->next;
2379 		}
2380 	}
2381 
2382 	if (ticker_stopped && scan_aux_set.sched_chains) {
2383 		/* Start ticker using new head */
2384 		chain_start_ticker(scan_aux_set.sched_chains, false);
2385 	}
2386 
2387 	/* Check active chains */
2388 	prev = NULL;
2389 	curr = scan_aux_set.active_chains;
2390 	while (curr) {
2391 		if (curr->parent == parent) {
2392 			struct ll_scan_aux_chain *chain = curr;
2393 			uint32_t ret;
2394 
2395 			if (curr == scan_aux_set.active_chains) {
2396 				scan_aux_set.active_chains = curr->next;
2397 				curr = scan_aux_set.active_chains;
2398 			} else {
2399 				prev->next = curr->next;
2400 				curr = prev->next;
2401 			}
2402 
2403 			if (chain->is_lll_sched || ull_ref_get(&scan_aux_set.ull) == 0) {
2404 				/* Disable called by parent disable or race with scan stop */
2405 				flush(chain);
2406 			} else {
2407 				/* Flush on disabled callback */
2408 				chain->next = scan_aux_set.flushing_chains;
2409 				scan_aux_set.flushing_chains = chain;
2410 				scan_aux_set.ull.disabled_cb = done_disabled_cb;
2411 
2412 				/* Call lll_disable */
2413 				disabling = true;
2414 				mfy.param = &curr->lll;
2415 				ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0,
2416 				     &mfy);
2417 				LL_ASSERT(!ret);
2418 			}
2419 		} else {
2420 			prev = curr;
2421 			curr = curr->next;
2422 		}
2423 	}
2424 
2425 	if (!disabling) {
2426 		/* Signal completion */
2427 		k_sem_give(&sem_scan_aux_stop);
2428 	}
2429 }
2430 
2431 /* Stops all chains with the given parent */
ull_scan_aux_stop(void * parent)2432 int ull_scan_aux_stop(void *parent)
2433 {
2434 	static memq_link_t link;
2435 	static struct mayfly mfy = {0, 0, &link, NULL, scan_aux_stop_all_chains_for_parent};
2436 	uint32_t ret;
2437 
2438 	/* Stop chains in ULL execution context */
2439 	mfy.param = parent;
2440 	ret = mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_ULL_HIGH, 0, &mfy);
2441 	LL_ASSERT(!ret);
2442 
2443 	/* Wait for chains to be stopped before returning */
2444 	(void)k_sem_take(&sem_scan_aux_stop, K_FOREVER);
2445 
2446 	return 0;
2447 }
2448 
init_reset(void)2449 static int init_reset(void)
2450 {
2451 	ull_hdr_init(&scan_aux_set.ull);
2452 	scan_aux_set.sched_chains = NULL;
2453 	scan_aux_set.active_chains = NULL;
2454 
2455 	/* Initialize scan aux chains pool */
2456 	mem_init(ll_scan_aux_pool, sizeof(struct ll_scan_aux_chain),
2457 		 sizeof(ll_scan_aux_pool) / sizeof(struct ll_scan_aux_chain),
2458 		 &scan_aux_free);
2459 
2460 	return 0;
2461 }
2462 
aux_chain_acquire(void)2463 static inline struct ll_scan_aux_chain *aux_chain_acquire(void)
2464 {
2465 	return mem_acquire(&scan_aux_free);
2466 }
2467 
aux_chain_release(struct ll_scan_aux_chain * chain)2468 static inline void aux_chain_release(struct ll_scan_aux_chain *chain)
2469 {
2470 	/* Clear the parent so that when scan is being disabled then this
2471 	 * auxiliary context shall not associate itself from being disable.
2472 	 */
2473 	LL_ASSERT(chain->parent);
2474 	chain->parent = NULL;
2475 
2476 	mem_release(chain, &scan_aux_free);
2477 }
2478 
done_disabled_cb(void * param)2479 static void done_disabled_cb(void *param)
2480 {
2481 	ARG_UNUSED(param);
2482 
2483 	while (scan_aux_set.flushing_chains) {
2484 		flush(scan_aux_set.flushing_chains);
2485 	}
2486 
2487 	scan_aux_set.ull.disabled_cb = NULL;
2488 
2489 	/* Release semaphore if it is locked */
2490 	if (k_sem_count_get(&sem_scan_aux_stop) == 0) {
2491 		k_sem_give(&sem_scan_aux_stop);
2492 	}
2493 }
2494 
flush_safe(void * param)2495 static void flush_safe(void *param)
2496 {
2497 	struct ll_scan_aux_chain *chain;
2498 
2499 	chain = param;
2500 	LL_ASSERT(chain->parent);
2501 
2502 	if (chain_is_in_list(scan_aux_set.flushing_chains, chain)) {
2503 		/* Chain already marked for flushing */
2504 		return;
2505 	}
2506 
2507 	/* If chain is active we need to flush from disabled callback */
2508 	if (chain_is_in_list(scan_aux_set.active_chains, chain) &&
2509 	    ull_ref_get(&scan_aux_set.ull)) {
2510 
2511 		chain->next = scan_aux_set.flushing_chains;
2512 		scan_aux_set.flushing_chains = chain;
2513 		scan_aux_set.ull.disabled_cb = done_disabled_cb;
2514 	} else {
2515 		flush(chain);
2516 	}
2517 }
2518 
flush(struct ll_scan_aux_chain * chain)2519 static void flush(struct ll_scan_aux_chain *chain)
2520 {
2521 	struct ll_scan_set *scan;
2522 	struct node_rx_pdu *rx;
2523 	struct lll_scan *lll;
2524 	bool sched = false;
2525 
2526 	/* Debug check that parent was assigned when allocated for reception of
2527 	 * auxiliary channel PDUs.
2528 	 */
2529 	LL_ASSERT(chain->parent);
2530 
2531 	/* Chain is being flushed now - remove from flushing_chains if present */
2532 	chain_remove_from_list(&scan_aux_set.flushing_chains, chain);
2533 
2534 	lll = chain->parent;
2535 	scan = HDR_LLL2ULL(lll);
2536 	scan = ull_scan_is_valid_get(scan);
2537 
2538 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2539 	if (!scan && chain->aux_sched) {
2540 		/* Send incomplete sync message */
2541 		aux_sync_incomplete(chain);
2542 	}
2543 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2544 
2545 	rx = chain->rx_head;
2546 	if (rx) {
2547 		chain->rx_head = NULL;
2548 
2549 		ll_rx_put(rx->hdr.link, rx);
2550 		sched = true;
2551 	}
2552 
2553 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2554 	if (!scan) {
2555 		struct ll_sync_set *sync = HDR_LLL2ULL(lll);
2556 
2557 		rx = sync->rx_incomplete;
2558 		if (rx) {
2559 			sync->rx_incomplete = NULL;
2560 
2561 			rx_release_put(rx);
2562 			sched = true;
2563 		}
2564 	}
2565 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2566 
2567 	if (sched) {
2568 		ll_rx_sched();
2569 	}
2570 
2571 	if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || scan) {
2572 		if (lll->lll_aux == &chain->lll) {
2573 			lll->lll_aux = NULL;
2574 		}
2575 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2576 		lll->scan_aux_score = chain->lll.hdr.score;
2577 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
2578 	} else {
2579 		struct lll_sync *sync_lll;
2580 		struct ll_sync_set *sync;
2581 
2582 		sync_lll = chain->parent;
2583 		sync = HDR_LLL2ULL(sync_lll);
2584 
2585 		LL_ASSERT(sync->is_stop || sync_lll->lll_aux);
2586 		sync_lll->lll_aux = NULL;
2587 	}
2588 
2589 	aux_chain_release(chain);
2590 }
2591 
2592 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
aux_sync_incomplete(struct ll_scan_aux_chain * chain)2593 static void aux_sync_incomplete(struct ll_scan_aux_chain *chain)
2594 {
2595 	struct ll_sync_set *sync;
2596 	struct node_rx_pdu *rx;
2597 	struct lll_sync *lll;
2598 
2599 	LL_ASSERT(chain->parent);
2600 
2601 	/* get reference to sync context */
2602 	lll = chain->parent;
2603 	LL_ASSERT(lll);
2604 	sync = HDR_LLL2ULL(lll);
2605 
2606 	/* pick extra node rx stored in sync context */
2607 	rx = sync->rx_incomplete;
2608 	LL_ASSERT(rx);
2609 	sync->rx_incomplete = NULL;
2610 
2611 	/* prepare sync report with failure */
2612 	rx->hdr.type = NODE_RX_TYPE_SYNC_REPORT;
2613 	rx->hdr.handle = ull_sync_handle_get(sync);
2614 	rx->rx_ftr.param = lll;
2615 
2616 	/* flag chain reception failure */
2617 	rx->rx_ftr.aux_failed = 1U;
2618 
2619 	/* Dequeue will try releasing list of node rx,
2620 	 * set the extra pointer to NULL.
2621 	 */
2622 	rx->rx_ftr.extra = NULL;
2623 
2624 	/* add to rx list, will be flushed */
2625 	chain->rx_head = rx;
2626 }
2627 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2628 
chain_start_ticker(struct ll_scan_aux_chain * chain,bool replace)2629 static void chain_start_ticker(struct ll_scan_aux_chain *chain, bool replace)
2630 {
2631 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2632 	uint8_t ticker_yield_handle = TICKER_NULL;
2633 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2634 	uint32_t ticks_slot_overhead;
2635 	uint32_t ticks_slot_offset;
2636 	uint32_t ready_delay_us;
2637 	uint8_t ticker_status;
2638 
2639 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2640 	if (ull_scan_is_valid_get(HDR_LLL2ULL(chain->parent))) {
2641 		if (chain->rx_head == chain->rx_last) {
2642 			struct ll_scan_set *scan = HDR_LLL2ULL(chain->parent);
2643 
2644 			ticker_yield_handle = TICKER_ID_SCAN_BASE +
2645 					      ull_scan_handle_get(scan);
2646 		} else {
2647 			ticker_yield_handle = TICKER_ID_SCAN_AUX;
2648 		}
2649 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2650 	} else {
2651 		/* Periodic sync context */
2652 		struct ll_sync_set *ull_sync = HDR_LLL2ULL(chain->parent);
2653 
2654 		ticker_yield_handle = TICKER_ID_SCAN_SYNC_BASE +
2655 				      ull_sync_handle_get(ull_sync);
2656 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2657 	}
2658 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2659 
2660 	ready_delay_us = lll_radio_rx_ready_delay_get(chain->lll.phy, PHY_FLAGS_S8);
2661 
2662 	/* TODO: active_to_start feature port */
2663 	scan_aux_set.ull.ticks_active_to_start = 0;
2664 	scan_aux_set.ull.ticks_prepare_to_start =
2665 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
2666 	scan_aux_set.ull.ticks_preempt_to_start =
2667 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
2668 	scan_aux_set.ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(
2669 		EVENT_OVERHEAD_START_US + ready_delay_us +
2670 		PDU_AC_MAX_US(PDU_AC_EXT_PAYLOAD_RX_SIZE, chain->lll.phy) +
2671 		EVENT_OVERHEAD_END_US);
2672 
2673 	ticks_slot_offset = MAX(scan_aux_set.ull.ticks_active_to_start,
2674 				scan_aux_set.ull.ticks_prepare_to_start);
2675 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
2676 		ticks_slot_overhead = ticks_slot_offset;
2677 	} else {
2678 		ticks_slot_overhead = 0U;
2679 	}
2680 	ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
2681 
2682 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
2683 	/* disable ticker job, in order to chain yield and start to reduce
2684 	 * CPU use by reducing successive calls to ticker_job().
2685 	 */
2686 	mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 0);
2687 #endif
2688 
2689 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2690 	/* Yield the primary scan window or auxiliary or periodic sync event
2691 	 * in ticker.
2692 	 */
2693 	if (ticker_yield_handle != TICKER_NULL) {
2694 		ticker_status = ticker_yield_abs(TICKER_INSTANCE_ID_CTLR,
2695 						 TICKER_USER_ID_ULL_HIGH,
2696 						 ticker_yield_handle,
2697 						 (chain->ticker_ticks -
2698 						  ticks_slot_offset),
2699 						 NULL, NULL);
2700 		LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2701 			  (ticker_status == TICKER_STATUS_BUSY));
2702 	}
2703 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2704 
2705 	if (replace) {
2706 		ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
2707 				    TICKER_ID_SCAN_AUX, NULL, NULL);
2708 		LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2709 			  (ticker_status == TICKER_STATUS_BUSY));
2710 	}
2711 
2712 	ticker_status = ticker_start(TICKER_INSTANCE_ID_CTLR,
2713 				     TICKER_USER_ID_ULL_HIGH,
2714 				     TICKER_ID_SCAN_AUX,
2715 				     chain->ticker_ticks - ticks_slot_offset,
2716 				     0,
2717 				     TICKER_NULL_PERIOD,
2718 				     TICKER_NULL_REMAINDER,
2719 				     TICKER_NULL_LAZY,
2720 				     (scan_aux_set.ull.ticks_slot +
2721 				      ticks_slot_overhead),
2722 				     ticker_cb, chain, ticker_op_cb, chain);
2723 #if defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2724 	LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2725 		  (ticker_status == TICKER_STATUS_BUSY));
2726 #else
2727 	LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2728 		  (ticker_status == TICKER_STATUS_BUSY) ||
2729 		  ((ticker_status == TICKER_STATUS_FAILURE) &&
2730 		   IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT)));
2731 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2732 
2733 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
2734 	/* enable ticker job, queued ticker operation will be handled
2735 	 * thereafter.
2736 	 */
2737 	mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1);
2738 #endif
2739 }
2740 
ticker_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)2741 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
2742 		      uint32_t remainder, uint16_t lazy, uint8_t force,
2743 		      void *param)
2744 {
2745 	static memq_link_t link;
2746 	static struct mayfly mfy = {0, 0, &link, NULL, lll_scan_aux_prepare};
2747 	struct ll_scan_aux_chain *chain = param;
2748 	static struct lll_prepare_param p;
2749 	uint32_t ret;
2750 	uint8_t ref;
2751 
2752 	DEBUG_RADIO_PREPARE_O(1);
2753 
2754 	/* Increment prepare reference count */
2755 	ref = ull_ref_inc(&scan_aux_set.ull);
2756 	LL_ASSERT(ref);
2757 
2758 	/* The chain should always be the first in the sched_chains list */
2759 	LL_ASSERT(scan_aux_set.sched_chains == chain);
2760 
2761 	/* Move chain to active list */
2762 	chain_remove_from_list(&scan_aux_set.sched_chains, chain);
2763 	chain_append_to_list(&scan_aux_set.active_chains, chain);
2764 
2765 	/* Append timing parameters */
2766 	p.ticks_at_expire = ticks_at_expire;
2767 	p.remainder = 0; /* FIXME: remainder; */
2768 	p.lazy = lazy;
2769 	p.force = force;
2770 	p.param = &chain->lll;
2771 	mfy.param = &p;
2772 
2773 	/* Kick LLL prepare */
2774 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
2775 			     0, &mfy);
2776 	LL_ASSERT(!ret);
2777 
2778 	if (scan_aux_set.sched_chains) {
2779 		/* Start ticker for next chain */
2780 		chain_start_ticker(scan_aux_set.sched_chains, false);
2781 	}
2782 
2783 	DEBUG_RADIO_PREPARE_O(1);
2784 }
2785 
2786 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
ticker_start_failed(void * param)2787 static void ticker_start_failed(void *param)
2788 {
2789 	struct ll_scan_aux_chain *chain;
2790 
2791 	/* Ticker start failed, so remove this chain from scheduled chains */
2792 	chain = param;
2793 	chain_remove_from_list(&scan_aux_set.sched_chains, chain);
2794 
2795 	flush(chain);
2796 
2797 	if (scan_aux_set.sched_chains) {
2798 		chain_start_ticker(scan_aux_set.sched_chains, false);
2799 	}
2800 }
2801 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2802 
ticker_op_cb(uint32_t status,void * param)2803 static void ticker_op_cb(uint32_t status, void *param)
2804 {
2805 #if defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2806 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
2807 #else /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2808 	static memq_link_t link;
2809 	static struct mayfly mfy = {0, 0, &link, NULL, ticker_start_failed};
2810 	uint32_t ret;
2811 
2812 	if (status == TICKER_STATUS_SUCCESS) {
2813 		return;
2814 	}
2815 
2816 	mfy.param = param;
2817 
2818 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW, TICKER_USER_ID_ULL_HIGH,
2819 			     1, &mfy);
2820 	LL_ASSERT(!ret);
2821 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2822 }
2823 
chain_ticker_ticks_diff(uint32_t ticks_a,uint32_t ticks_b)2824 static int32_t chain_ticker_ticks_diff(uint32_t ticks_a, uint32_t ticks_b)
2825 {
2826 	if ((ticks_a - ticks_b) & BIT(HAL_TICKER_CNTR_MSBIT)) {
2827 		return -ticker_ticks_diff_get(ticks_b, ticks_a);
2828 	} else {
2829 		return ticker_ticks_diff_get(ticks_a, ticks_b);
2830 	}
2831 }
2832 
2833 /* Sorted insertion into sched list, starting/replacing the ticker when needed
2834  * Returns:
2835  *  - false for no insertion (conflict with existing entry)
2836  *  - true for inserted
2837  */
chain_insert_in_sched_list(struct ll_scan_aux_chain * chain)2838 static bool chain_insert_in_sched_list(struct ll_scan_aux_chain *chain)
2839 {
2840 	struct ll_scan_aux_chain *curr = scan_aux_set.sched_chains;
2841 	struct ll_scan_aux_chain *prev = NULL;
2842 	uint32_t ticks_min_delta;
2843 
2844 	if (!scan_aux_set.sched_chains) {
2845 		chain->next = NULL;
2846 		scan_aux_set.sched_chains = chain;
2847 		chain_start_ticker(chain, false);
2848 		return true;
2849 	}
2850 
2851 	/* Find insertion point */
2852 	while (curr && chain_ticker_ticks_diff(chain->ticker_ticks, curr->ticker_ticks) > 0) {
2853 		prev = curr;
2854 		curr = curr->next;
2855 	}
2856 
2857 	/* Check for conflict with existing entry */
2858 	ticks_min_delta = HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_END_US + EVENT_OVERHEAD_START_US);
2859 	if ((prev &&
2860 	    ticker_ticks_diff_get(chain->ticker_ticks, prev->ticker_ticks) < ticks_min_delta) ||
2861 	    (curr &&
2862 	    ticker_ticks_diff_get(curr->ticker_ticks, chain->ticker_ticks) < ticks_min_delta)) {
2863 		return false;
2864 	}
2865 
2866 	if (prev) {
2867 		chain->next = prev->next;
2868 		prev->next = chain;
2869 	} else {
2870 		chain->next = scan_aux_set.sched_chains;
2871 		scan_aux_set.sched_chains = chain;
2872 		chain_start_ticker(chain, true);
2873 	}
2874 
2875 	return true;
2876 }
2877 
chain_remove_from_list(struct ll_scan_aux_chain ** head,struct ll_scan_aux_chain * chain)2878 static void chain_remove_from_list(struct ll_scan_aux_chain **head, struct ll_scan_aux_chain *chain)
2879 {
2880 	struct ll_scan_aux_chain *curr = *head;
2881 	struct ll_scan_aux_chain *prev = NULL;
2882 
2883 	while (curr && curr != chain) {
2884 		prev = curr;
2885 		curr = curr->next;
2886 	}
2887 
2888 	if (curr) {
2889 		if (prev) {
2890 			prev->next = curr->next;
2891 		} else {
2892 			*head = curr->next;
2893 		}
2894 	}
2895 
2896 	chain->next = NULL;
2897 }
2898 
chain_append_to_list(struct ll_scan_aux_chain ** head,struct ll_scan_aux_chain * chain)2899 static void chain_append_to_list(struct ll_scan_aux_chain **head, struct ll_scan_aux_chain *chain)
2900 {
2901 	struct ll_scan_aux_chain *prev = *head;
2902 
2903 	if (!*head) {
2904 		chain->next = NULL;
2905 		*head = chain;
2906 		return;
2907 	}
2908 
2909 	while (prev->next) {
2910 		prev = prev->next;
2911 	}
2912 
2913 	prev->next = chain;
2914 }
2915 
chain_is_in_list(struct ll_scan_aux_chain * head,struct ll_scan_aux_chain * chain)2916 static bool chain_is_in_list(struct ll_scan_aux_chain *head, struct ll_scan_aux_chain *chain)
2917 {
2918 	while (head) {
2919 		if (head == chain) {
2920 			return true;
2921 		}
2922 		head = head->next;
2923 	}
2924 	return false;
2925 }
2926 #endif /* CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
2927