1 /*
2  * Copyright (c) 2020 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/sys/byteorder.h>
8 #include <zephyr/sys/util.h>
9 
10 #include "util/mem.h"
11 #include "util/memq.h"
12 #include "util/mayfly.h"
13 #include "util/util.h"
14 #include "util/dbuf.h"
15 
16 #include "hal/ticker.h"
17 #include "hal/ccm.h"
18 
19 #include "ticker/ticker.h"
20 
21 #include "pdu_df.h"
22 #include "lll/pdu_vendor.h"
23 #include "pdu.h"
24 
25 #include "lll.h"
26 #include "lll/lll_vendor.h"
27 #include "lll_scan.h"
28 #include "lll_scan_aux.h"
29 #include "lll/lll_df_types.h"
30 #include "lll_conn.h"
31 #include "lll_sync.h"
32 #include "lll_sync_iso.h"
33 
34 #include "ull_scan_types.h"
35 #include "ull_sync_types.h"
36 
37 #include "ull_internal.h"
38 #include "ull_scan_internal.h"
39 #include "ull_sync_internal.h"
40 #include "ull_sync_iso_internal.h"
41 #include "ull_df_internal.h"
42 
43 #include <zephyr/bluetooth/hci_types.h>
44 
45 #include <soc.h>
46 #include "hal/debug.h"
47 
48 static int init_reset(void);
49 static inline struct ll_scan_aux_set *aux_acquire(void);
50 static inline void aux_release(struct ll_scan_aux_set *aux);
51 static inline uint8_t aux_handle_get(struct ll_scan_aux_set *aux);
52 static inline struct ll_sync_set *sync_create_get(struct ll_scan_set *scan);
53 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
54 static inline struct ll_sync_iso_set *
55 	sync_iso_create_get(struct ll_sync_set *sync);
56 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
57 static void done_disabled_cb(void *param);
58 static void flush_safe(void *param);
59 static void flush(void *param);
60 static void rx_release_put(struct node_rx_pdu *rx);
61 static void aux_sync_incomplete(void *param);
62 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
63 		      uint32_t remainder, uint16_t lazy, uint8_t force,
64 		      void *param);
65 static void ticker_op_cb(uint32_t status, void *param);
66 
67 /* Auxiliary context pool used for reception of PDUs at aux offsets, common for
68  * both Extended Advertising and Periodic Advertising.
69  * Increasing the count allows simultaneous reception of interleaved chain PDUs
70  * from multiple advertisers.
71  */
72 static struct ll_scan_aux_set ll_scan_aux_pool[CONFIG_BT_CTLR_SCAN_AUX_SET];
73 static void *scan_aux_free;
74 
ull_scan_aux_init(void)75 int ull_scan_aux_init(void)
76 {
77 	int err;
78 
79 	err = init_reset();
80 	if (err) {
81 		return err;
82 	}
83 
84 	return 0;
85 }
86 
ull_scan_aux_reset(void)87 int ull_scan_aux_reset(void)
88 {
89 	int err;
90 
91 	err = init_reset();
92 	if (err) {
93 		return err;
94 	}
95 
96 	return 0;
97 }
98 
ull_scan_aux_setup(memq_link_t * link,struct node_rx_pdu * rx)99 void ull_scan_aux_setup(memq_link_t *link, struct node_rx_pdu *rx)
100 {
101 	struct node_rx_pdu *rx_incomplete;
102 	struct ll_sync_iso_set *sync_iso;
103 	struct pdu_adv_aux_ptr *aux_ptr;
104 	struct pdu_adv_com_ext_adv *p;
105 	uint32_t ticks_slot_overhead;
106 	struct lll_scan_aux *lll_aux;
107 	struct ll_scan_aux_set *aux;
108 	uint8_t ticker_yield_handle;
109 	uint32_t window_widening_us;
110 	uint32_t ticks_slot_offset;
111 	uint32_t ticks_aux_offset;
112 	struct pdu_adv_ext_hdr *h;
113 	struct lll_sync *sync_lll;
114 	struct ll_scan_set *scan;
115 	struct ll_sync_set *sync;
116 	struct pdu_adv_adi *adi;
117 	struct node_rx_ftr *ftr;
118 	uint32_t ready_delay_us;
119 	uint32_t aux_offset_us;
120 	uint32_t ticker_status;
121 	struct lll_scan *lll;
122 	struct pdu_adv *pdu;
123 	uint8_t hdr_buf_len;
124 	uint8_t aux_handle;
125 	bool is_scan_req;
126 	uint8_t acad_len;
127 	uint8_t data_len;
128 	uint8_t hdr_len;
129 	uint8_t *ptr;
130 	uint8_t phy;
131 
132 	is_scan_req = false;
133 	ftr = &rx->rx_ftr;
134 
135 	switch (rx->hdr.type) {
136 	case NODE_RX_TYPE_EXT_1M_REPORT:
137 		lll_aux = NULL;
138 		aux = NULL;
139 		sync_lll = NULL;
140 		sync_iso = NULL;
141 		rx_incomplete = NULL;
142 
143 		lll = ftr->param;
144 		LL_ASSERT(!lll->lll_aux);
145 
146 		scan = HDR_LLL2ULL(lll);
147 		sync = sync_create_get(scan);
148 		phy = BT_HCI_LE_EXT_SCAN_PHY_1M;
149 
150 		ticker_yield_handle = TICKER_ID_SCAN_BASE +
151 				      ull_scan_handle_get(scan);
152 		break;
153 
154 #if defined(CONFIG_BT_CTLR_PHY_CODED)
155 	case NODE_RX_TYPE_EXT_CODED_REPORT:
156 		lll_aux = NULL;
157 		aux = NULL;
158 		sync_lll = NULL;
159 		sync_iso = NULL;
160 		rx_incomplete = NULL;
161 
162 		lll = ftr->param;
163 		LL_ASSERT(!lll->lll_aux);
164 
165 		scan = HDR_LLL2ULL(lll);
166 		sync = sync_create_get(scan);
167 		phy = BT_HCI_LE_EXT_SCAN_PHY_CODED;
168 
169 		ticker_yield_handle = TICKER_ID_SCAN_BASE +
170 				      ull_scan_handle_get(scan);
171 		break;
172 #endif /* CONFIG_BT_CTLR_PHY_CODED */
173 
174 	case NODE_RX_TYPE_EXT_AUX_REPORT:
175 		sync_iso = NULL;
176 		rx_incomplete = NULL;
177 		if (ull_scan_aux_is_valid_get(HDR_LLL2ULL(ftr->param))) {
178 			sync_lll = NULL;
179 
180 			/* Node has valid aux context so its scan was scheduled
181 			 * from ULL.
182 			 */
183 			lll_aux = ftr->param;
184 			aux = HDR_LLL2ULL(lll_aux);
185 
186 			/* aux parent will be NULL for periodic sync */
187 			lll = aux->parent;
188 			LL_ASSERT(lll);
189 
190 			ticker_yield_handle = TICKER_ID_SCAN_AUX_BASE +
191 					      aux_handle_get(aux);
192 
193 		} else if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) ||
194 			   ull_scan_is_valid_get(HDR_LLL2ULL(ftr->param))) {
195 			sync_lll = NULL;
196 
197 			/* Node that does not have valid aux context but has
198 			 * valid scan set was scheduled from LLL. We can
199 			 * retrieve aux context from lll_scan as it was stored
200 			 * there when superior PDU was handled.
201 			 */
202 			lll = ftr->param;
203 
204 			lll_aux = lll->lll_aux;
205 			LL_ASSERT(lll_aux);
206 
207 			aux = HDR_LLL2ULL(lll_aux);
208 			LL_ASSERT(lll == aux->parent);
209 
210 			ticker_yield_handle = TICKER_NULL;
211 
212 		} else {
213 			lll = NULL;
214 
215 			/* If none of the above, node is part of sync scanning
216 			 */
217 			sync_lll = ftr->param;
218 
219 			lll_aux = sync_lll->lll_aux;
220 			LL_ASSERT(lll_aux);
221 
222 			aux = HDR_LLL2ULL(lll_aux);
223 			LL_ASSERT(sync_lll == aux->parent);
224 
225 			ticker_yield_handle = TICKER_NULL;
226 		}
227 
228 		if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || lll) {
229 			scan = HDR_LLL2ULL(lll);
230 			sync = (void *)scan;
231 			scan = ull_scan_is_valid_get(scan);
232 			if (scan) {
233 				sync = NULL;
234 			}
235 		} else {
236 			scan = NULL;
237 			sync = HDR_LLL2ULL(sync_lll);
238 		}
239 
240 		phy = lll_aux->phy;
241 		if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || scan) {
242 			/* Here we are scanner context */
243 			sync = sync_create_get(scan);
244 
245 			/* Generate report based on PHY scanned */
246 			switch (phy) {
247 			case PHY_1M:
248 				rx->hdr.type = NODE_RX_TYPE_EXT_1M_REPORT;
249 				break;
250 			case PHY_2M:
251 				rx->hdr.type = NODE_RX_TYPE_EXT_2M_REPORT;
252 				break;
253 #if defined(CONFIG_BT_CTLR_PHY_CODED)
254 			case PHY_CODED:
255 				rx->hdr.type = NODE_RX_TYPE_EXT_CODED_REPORT;
256 				break;
257 #endif /* CONFIG_BT_CTLR_PHY_CODED */
258 			default:
259 				LL_ASSERT(0);
260 				return;
261 			}
262 
263 			/* Backup scan requested flag as it is in union with
264 			 * `extra` struct member which will be set to NULL
265 			 * in subsequent code.
266 			 */
267 			is_scan_req = !!ftr->scan_req;
268 
269 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
270 		} else {
271 			/* Here we are periodic sync context */
272 			rx->hdr.type = NODE_RX_TYPE_SYNC_REPORT;
273 			rx->hdr.handle = ull_sync_handle_get(sync);
274 
275 			/* Check if we need to create BIG sync */
276 			sync_iso = sync_iso_create_get(sync);
277 
278 			/* lll_aux and aux are auxiliary channel context,
279 			 * reuse the existing aux context to scan the chain.
280 			 * hence lll_aux and aux are not released or set to NULL.
281 			 */
282 			sync = NULL;
283 		}
284 		break;
285 
286 	case NODE_RX_TYPE_SYNC_REPORT:
287 		{
288 			struct ll_sync_set *ull_sync;
289 
290 			/* set the sync handle corresponding to the LLL context
291 			 * passed in the node rx footer field.
292 			 */
293 			sync_lll = ftr->param;
294 			LL_ASSERT(!sync_lll->lll_aux);
295 
296 			ull_sync = HDR_LLL2ULL(sync_lll);
297 			rx->hdr.handle = ull_sync_handle_get(ull_sync);
298 
299 			/* Check if we need to create BIG sync */
300 			sync_iso = sync_iso_create_get(ull_sync);
301 
302 			/* FIXME: we will need lll_scan if chain was scheduled
303 			 *        from LLL; should we store lll_scan_set in
304 			 *        sync_lll instead?
305 			 */
306 			lll = NULL;
307 			lll_aux = NULL;
308 			aux = NULL;
309 			scan = NULL;
310 			sync = NULL;
311 			phy =  sync_lll->phy;
312 
313 			/* backup extra node_rx supplied for generating
314 			 * incomplete report
315 			 */
316 			rx_incomplete = ftr->extra;
317 
318 			ticker_yield_handle = TICKER_ID_SCAN_SYNC_BASE +
319 					      ull_sync_handle_get(ull_sync);
320 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
321 
322 		}
323 		break;
324 	default:
325 		LL_ASSERT(0);
326 		return;
327 	}
328 
329 	rx->hdr.link = link;
330 	ftr->extra = NULL;
331 
332 	ftr->aux_sched = 0U;
333 
334 	pdu = (void *)rx->pdu;
335 	p = (void *)&pdu->adv_ext_ind;
336 	if (!pdu->len || !p->ext_hdr_len) {
337 		if (pdu->len) {
338 			data_len = pdu->len - PDU_AC_EXT_HEADER_SIZE_MIN;
339 		} else {
340 			data_len = 0U;
341 		}
342 
343 		if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll) {
344 			struct ll_sync_set *sync_set;
345 
346 			sync_set = HDR_LLL2ULL(sync_lll);
347 			ftr->aux_data_len = sync_set->data_len + data_len;
348 			sync_set->data_len = 0U;
349 		} else if (aux) {
350 			aux->data_len += data_len;
351 			ftr->aux_data_len = aux->data_len;
352 		} else {
353 			ftr->aux_data_len = data_len;
354 		}
355 
356 		goto ull_scan_aux_rx_flush;
357 	}
358 
359 	h = (void *)p->ext_hdr_adv_data;
360 
361 	/* Regard PDU as invalid if a RFU field is set, we do not know the
362 	 * size of this future field, hence will cause incorrect calculation of
363 	 * offset to ACAD field.
364 	 */
365 	if (h->rfu) {
366 		goto ull_scan_aux_rx_flush;
367 	}
368 
369 	ptr = h->data;
370 
371 	if (h->adv_addr) {
372 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
373 		/* Check if Periodic Advertising Synchronization to be created
374 		 */
375 		if (sync && (scan->periodic.state != LL_SYNC_STATE_CREATED)) {
376 			/* Check address and update internal state */
377 #if defined(CONFIG_BT_CTLR_PRIVACY)
378 			ull_sync_setup_addr_check(scan, pdu->tx_addr, ptr,
379 						  ftr->rl_idx);
380 #else /* !CONFIG_BT_CTLR_PRIVACY */
381 			ull_sync_setup_addr_check(scan, pdu->tx_addr, ptr, 0U);
382 #endif /* !CONFIG_BT_CTLR_PRIVACY */
383 
384 		}
385 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
386 
387 		ptr += BDADDR_SIZE;
388 	}
389 
390 	if (h->tgt_addr) {
391 		ptr += BDADDR_SIZE;
392 	}
393 
394 	if (h->cte_info) {
395 		ptr += sizeof(struct pdu_cte_info);
396 	}
397 
398 	adi = NULL;
399 	if (h->adi) {
400 		adi = (void *)ptr;
401 		ptr += sizeof(*adi);
402 	}
403 
404 	aux_ptr = NULL;
405 	if (h->aux_ptr) {
406 		aux_ptr = (void *)ptr;
407 		ptr += sizeof(*aux_ptr);
408 	}
409 
410 	if (h->sync_info) {
411 		struct pdu_adv_sync_info *si;
412 
413 		si = (void *)ptr;
414 		ptr += sizeof(*si);
415 
416 		/* Check if Periodic Advertising Synchronization to be created.
417 		 * Setup synchronization if address and SID match in the
418 		 * Periodic Advertiser List or with the explicitly supplied.
419 		 */
420 		if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && aux && sync && adi &&
421 		    ull_sync_setup_sid_match(scan, PDU_ADV_ADI_SID_GET(adi))) {
422 			ull_sync_setup(scan, aux, rx, si);
423 		}
424 	}
425 
426 	if (h->tx_pwr) {
427 		ptr++;
428 	}
429 
430 	/* Calculate ACAD Len */
431 	hdr_len = ptr - (uint8_t *)p;
432 	hdr_buf_len = PDU_AC_EXT_HEADER_SIZE_MIN + p->ext_hdr_len;
433 	if (hdr_len > hdr_buf_len) {
434 		/* FIXME: Handle invalid header length */
435 		acad_len = 0U;
436 	} else {
437 		acad_len = hdr_buf_len - hdr_len;
438 		hdr_len += acad_len;
439 	}
440 
441 	/* calculate total data length */
442 	if (hdr_len < pdu->len) {
443 		data_len = pdu->len - hdr_len;
444 	} else {
445 		data_len = 0U;
446 	}
447 
448 	/* Periodic Advertising Channel Map Indication and/or Broadcast ISO
449 	 * synchronization
450 	 */
451 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) &&
452 	    (rx->hdr.type == NODE_RX_TYPE_SYNC_REPORT) &&
453 	    acad_len) {
454 		/* Periodic Advertising Channel Map Indication */
455 		ull_sync_chm_update(rx->hdr.handle, ptr, acad_len);
456 
457 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
458 		struct ll_sync_set *sync_set;
459 		struct pdu_big_info *bi;
460 		uint8_t bi_size;
461 
462 		sync_set = HDR_LLL2ULL(sync_lll);
463 
464 		/* Provide encryption information for BIG sync creation */
465 		bi_size = ptr[PDU_ADV_DATA_HEADER_LEN_OFFSET] -
466 			  PDU_ADV_DATA_HEADER_TYPE_SIZE;
467 		sync_set->enc = (bi_size == PDU_BIG_INFO_ENCRYPTED_SIZE);
468 
469 		/* Store number of BISes in the BIG */
470 		bi = (void *)&ptr[PDU_ADV_DATA_HEADER_DATA_OFFSET];
471 		sync_set->num_bis = PDU_BIG_INFO_NUM_BIS_GET(bi);
472 
473 		/* Broadcast ISO synchronize */
474 		if (sync_iso) {
475 			ull_sync_iso_setup(sync_iso, rx, ptr, acad_len);
476 		}
477 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
478 	}
479 
480 	/* Do not ULL schedule auxiliary PDU reception if no aux pointer
481 	 * or aux pointer is zero or scannable advertising has erroneous aux
482 	 * pointer being present or PHY in the aux pointer is invalid or unsupported.
483 	 */
484 	if (!aux_ptr || !PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr) || is_scan_req ||
485 	    (PDU_ADV_AUX_PTR_PHY_GET(aux_ptr) > EXT_ADV_AUX_PHY_LE_CODED) ||
486 		(!IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
487 		  PDU_ADV_AUX_PTR_PHY_GET(aux_ptr) == EXT_ADV_AUX_PHY_LE_CODED)) {
488 		if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll) {
489 			struct ll_sync_set *sync_set;
490 
491 			sync_set = HDR_LLL2ULL(sync_lll);
492 			ftr->aux_data_len = sync_set->data_len + data_len;
493 			sync_set->data_len = 0U;
494 		} else if (aux) {
495 			aux->data_len += data_len;
496 			ftr->aux_data_len = aux->data_len;
497 		} else {
498 			ftr->aux_data_len = data_len;
499 		}
500 
501 		if (is_scan_req) {
502 			LL_ASSERT(aux && aux->rx_last);
503 
504 			aux->rx_last->rx_ftr.extra = rx;
505 			aux->rx_last = rx;
506 
507 			return;
508 		}
509 
510 		goto ull_scan_aux_rx_flush;
511 	}
512 
513 	if (!aux) {
514 		aux = aux_acquire();
515 		if (!aux) {
516 			/* As LLL scheduling has been used and will fail due to
517 			 * non-allocation of aux context, a sync report with
518 			 * aux_failed flag set will be generated. Let the
519 			 * current sync report be set as partial, and the
520 			 * sync report corresponding to ull_scan_aux_release
521 			 * have the incomplete data status.
522 			 */
523 			if (ftr->aux_lll_sched) {
524 				ftr->aux_sched = 1U;
525 			}
526 
527 			if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) &&
528 			    sync_lll) {
529 				struct ll_sync_set *sync_set;
530 
531 				sync_set = HDR_LLL2ULL(sync_lll);
532 				ftr->aux_data_len = sync_set->data_len + data_len;
533 				sync_set->data_len = 0U;
534 
535 			}
536 
537 			goto ull_scan_aux_rx_flush;
538 		}
539 
540 		aux->rx_head = aux->rx_last = NULL;
541 		aux->data_len = data_len;
542 		lll_aux = &aux->lll;
543 		lll_aux->is_chain_sched = 0U;
544 
545 		ull_hdr_init(&aux->ull);
546 		lll_hdr_init(lll_aux, aux);
547 
548 		aux->parent = lll ? (void *)lll : (void *)sync_lll;
549 
550 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
551 		aux->rx_incomplete = rx_incomplete;
552 		rx_incomplete = NULL;
553 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
554 
555 	} else if (!(IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll)) {
556 		aux->data_len += data_len;
557 
558 		/* Flush auxiliary PDU receptions and stop any more ULL
559 		 * scheduling if accumulated data length exceeds configured
560 		 * maximum supported.
561 		 */
562 		if (aux->data_len >= CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX) {
563 			/* If LLL has already scheduled, then let it proceed.
564 			 *
565 			 * TODO: LLL to check accumulated data length and
566 			 *       stop further reception.
567 			 *       Currently LLL will schedule as long as there
568 			 *       are free node rx available.
569 			 */
570 			if (!ftr->aux_lll_sched) {
571 				goto ull_scan_aux_rx_flush;
572 			}
573 		}
574 	}
575 
576 	/* In sync context we can dispatch rx immediately, in scan context we
577 	 * enqueue rx in aux context and will flush them after scan is complete.
578 	 */
579 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll) {
580 		struct ll_sync_set *sync_set;
581 
582 		sync_set = HDR_LLL2ULL(sync_lll);
583 		sync_set->data_len += data_len;
584 		ftr->aux_data_len = sync_set->data_len;
585 
586 		/* Flush auxiliary PDU receptions and stop any more ULL
587 		 * scheduling if accumulated data length exceeds configured
588 		 * maximum supported.
589 		 */
590 		if (sync_set->data_len >= CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX) {
591 			/* If LLL has already scheduled, then let it proceed.
592 			 *
593 			 * TODO: LLL to check accumulated data length and
594 			 *       stop further reception.
595 			 *       Currently LLL will schedule as long as there
596 			 *       are free node rx available.
597 			 */
598 			if (!ftr->aux_lll_sched) {
599 				sync_set->data_len = 0U;
600 				goto ull_scan_aux_rx_flush;
601 			}
602 		}
603 	} else {
604 		if (aux->rx_last) {
605 			aux->rx_last->rx_ftr.extra = rx;
606 		} else {
607 			aux->rx_head = rx;
608 		}
609 		aux->rx_last = rx;
610 
611 		ftr->aux_data_len = aux->data_len;
612 	}
613 
614 	/* Initialize the channel index and PHY for the Auxiliary PDU reception.
615 	 */
616 	lll_aux->chan = aux_ptr->chan_idx;
617 	lll_aux->phy = BIT(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
618 
619 	/* See if this was already scheduled from LLL. If so, store aux context
620 	 * in global scan struct so we can pick it when scanned node is received
621 	 * with a valid context.
622 	 */
623 	if (ftr->aux_lll_sched) {
624 		if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll) {
625 			sync_lll->lll_aux = lll_aux;
626 
627 			/* AUX_ADV_IND/AUX_CHAIN_IND PDU reception is being
628 			 * setup
629 			 */
630 			ftr->aux_sched = 1U;
631 
632 			/* In sync context, dispatch immediately */
633 			ll_rx_put_sched(link, rx);
634 		} else {
635 			/* check scan context is not already using LLL
636 			 * scheduling, or receiving a chain then it will
637 			 * reuse the aux context.
638 			 */
639 			LL_ASSERT(!lll->lll_aux || (lll->lll_aux == lll_aux));
640 
641 			/* scan context get the aux context so that it can
642 			 * continue reception in LLL scheduling.
643 			 */
644 			lll->lll_aux = lll_aux;
645 
646 			/* AUX_ADV_IND/AUX_CHAIN_IND PDU reception is being
647 			 * setup
648 			 */
649 			ftr->aux_sched = 1U;
650 		}
651 
652 		/* Reset auxiliary channel PDU scan state which otherwise is
653 		 * done in the prepare_cb when ULL scheduling is used.
654 		 */
655 		lll_aux->state = 0U;
656 
657 		return;
658 	}
659 
660 	/* Switching to ULL scheduling to receive auxiliary PDUs */
661 	if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || lll) {
662 		LL_ASSERT(scan);
663 
664 		/* Do not ULL schedule if scan disable requested */
665 		if (unlikely(scan->is_stop)) {
666 			goto ull_scan_aux_rx_flush;
667 		}
668 
669 		/* Remove auxiliary context association with scan context so
670 		 * that LLL can differentiate it to being ULL scheduling.
671 		 */
672 		lll->lll_aux = NULL;
673 	} else {
674 		struct ll_sync_set *sync_set;
675 
676 		LL_ASSERT(sync_lll &&
677 			  (!sync_lll->lll_aux || sync_lll->lll_aux == lll_aux));
678 
679 		/* Do not ULL schedule if sync terminate requested */
680 		sync_set = HDR_LLL2ULL(sync_lll);
681 		if (unlikely(sync_set->is_stop)) {
682 			goto ull_scan_aux_rx_flush;
683 		}
684 
685 		/* Associate the auxiliary context with sync context */
686 		sync_lll->lll_aux = lll_aux;
687 
688 		/* Backup the node rx to be dispatch on successfully ULL
689 		 * scheduling setup.
690 		 */
691 		aux->rx_head = rx;
692 	}
693 
694 	/* Determine the window size */
695 	if (aux_ptr->offs_units) {
696 		lll_aux->window_size_us = OFFS_UNIT_300_US;
697 	} else {
698 		lll_aux->window_size_us = OFFS_UNIT_30_US;
699 	}
700 
701 	aux_offset_us = (uint32_t)PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr) * lll_aux->window_size_us;
702 
703 	/* CA field contains the clock accuracy of the advertiser;
704 	 * 0 - 51 ppm to 500 ppm
705 	 * 1 - 0 ppm to 50 ppm
706 	 */
707 	if (aux_ptr->ca) {
708 		window_widening_us = SCA_DRIFT_50_PPM_US(aux_offset_us);
709 	} else {
710 		window_widening_us = SCA_DRIFT_500_PPM_US(aux_offset_us);
711 	}
712 
713 	lll_aux->window_size_us += ((EVENT_TICKER_RES_MARGIN_US + EVENT_JITTER_US +
714 				     window_widening_us) << 1);
715 
716 	ready_delay_us = lll_radio_rx_ready_delay_get(lll_aux->phy,
717 						      PHY_FLAGS_S8);
718 
719 	/* Calculate the aux offset from start of the scan window */
720 	aux_offset_us += ftr->radio_end_us;
721 	aux_offset_us -= PDU_AC_US(pdu->len, phy, ftr->phy_flags);
722 	aux_offset_us -= EVENT_TICKER_RES_MARGIN_US;
723 	aux_offset_us -= EVENT_JITTER_US;
724 	aux_offset_us -= ready_delay_us;
725 	aux_offset_us -= window_widening_us;
726 
727 	/* TODO: active_to_start feature port */
728 	aux->ull.ticks_active_to_start = 0;
729 	aux->ull.ticks_prepare_to_start =
730 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
731 	aux->ull.ticks_preempt_to_start =
732 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
733 	aux->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(
734 		EVENT_OVERHEAD_START_US + ready_delay_us +
735 		PDU_AC_MAX_US(PDU_AC_EXT_PAYLOAD_RX_SIZE, lll_aux->phy) +
736 		EVENT_OVERHEAD_END_US);
737 
738 	ticks_slot_offset = MAX(aux->ull.ticks_active_to_start,
739 				aux->ull.ticks_prepare_to_start);
740 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
741 		ticks_slot_overhead = ticks_slot_offset;
742 	} else {
743 		ticks_slot_overhead = 0U;
744 	}
745 	ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
746 
747 	ticks_aux_offset = HAL_TICKER_US_TO_TICKS(aux_offset_us);
748 
749 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
750 	/* disable ticker job, in order to chain yield and start to reduce
751 	 * CPU use by reducing successive calls to ticker_job().
752 	 */
753 	mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 0);
754 #endif
755 
756 	/* Yield the primary scan window or auxiliary or periodic sync event
757 	 * in ticker.
758 	 */
759 	if (ticker_yield_handle != TICKER_NULL) {
760 		ticker_status = ticker_yield_abs(TICKER_INSTANCE_ID_CTLR,
761 						 TICKER_USER_ID_ULL_HIGH,
762 						 ticker_yield_handle,
763 						 (ftr->ticks_anchor +
764 						  ticks_aux_offset -
765 						  ticks_slot_offset),
766 						 NULL, NULL);
767 		LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
768 			  (ticker_status == TICKER_STATUS_BUSY));
769 	}
770 
771 	aux_handle = aux_handle_get(aux);
772 	ticker_status = ticker_start(TICKER_INSTANCE_ID_CTLR,
773 				     TICKER_USER_ID_ULL_HIGH,
774 				     TICKER_ID_SCAN_AUX_BASE + aux_handle,
775 				     ftr->ticks_anchor - ticks_slot_offset,
776 				     ticks_aux_offset,
777 				     TICKER_NULL_PERIOD,
778 				     TICKER_NULL_REMAINDER,
779 				     TICKER_NULL_LAZY,
780 				     (aux->ull.ticks_slot +
781 				      ticks_slot_overhead),
782 				     ticker_cb, aux, ticker_op_cb, aux);
783 	LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
784 		  (ticker_status == TICKER_STATUS_BUSY) ||
785 		  ((ticker_status == TICKER_STATUS_FAILURE) &&
786 		   IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT)));
787 
788 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
789 	/* enable ticker job, queued ticker operation will be handled
790 	 * thereafter.
791 	 */
792 	mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1);
793 #endif
794 
795 	return;
796 
797 ull_scan_aux_rx_flush:
798 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
799 	if (sync && (scan->periodic.state != LL_SYNC_STATE_CREATED)) {
800 		scan->periodic.state = LL_SYNC_STATE_IDLE;
801 	}
802 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
803 
804 	if (aux) {
805 		/* Enqueue last rx in aux context if possible, otherwise send
806 		 * immediately since we are in sync context.
807 		 */
808 		if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || aux->rx_last) {
809 			LL_ASSERT(scan);
810 
811 			/* If scan is being disabled, rx could already be
812 			 * enqueued before coming here to ull_scan_aux_rx_flush.
813 			 * Check if rx not the last in the list of received PDUs
814 			 * then add it, else do not add it, to avoid duplicate
815 			 * report generation, release and probable infinite loop
816 			 * processing of the list.
817 			 */
818 			if (unlikely(scan->is_stop)) {
819 				/* Add the node rx to aux context list of node
820 				 * rx if not already added when coming here to
821 				 * ull_scan_aux_rx_flush. This is handling a
822 				 * race condition where in the last PDU in
823 				 * chain is received and at the same time scan
824 				 * is being disabled.
825 				 */
826 				if (aux->rx_last != rx) {
827 					aux->rx_last->rx_ftr.extra = rx;
828 					aux->rx_last = rx;
829 				}
830 
831 				return;
832 			}
833 
834 			aux->rx_last->rx_ftr.extra = rx;
835 			aux->rx_last = rx;
836 		} else {
837 			const struct ll_sync_set *sync_set;
838 
839 			LL_ASSERT(sync_lll);
840 
841 			ll_rx_put_sched(link, rx);
842 
843 			sync_set = HDR_LLL2ULL(sync_lll);
844 			if (unlikely(sync_set->is_stop && sync_lll->lll_aux)) {
845 				return;
846 			}
847 		}
848 
849 		LL_ASSERT(aux->parent);
850 
851 		flush_safe(aux);
852 
853 		return;
854 	}
855 
856 	ll_rx_put(link, rx);
857 
858 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && rx_incomplete) {
859 		rx_release_put(rx_incomplete);
860 	}
861 
862 	ll_rx_sched();
863 }
864 
ull_scan_aux_done(struct node_rx_event_done * done)865 void ull_scan_aux_done(struct node_rx_event_done *done)
866 {
867 	struct ll_scan_aux_set *aux;
868 
869 	/* Get reference to ULL context */
870 	aux = CONTAINER_OF(done->param, struct ll_scan_aux_set, ull);
871 
872 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) &&
873 	    !ull_scan_aux_is_valid_get(aux)) {
874 		struct ll_sync_set *sync;
875 
876 		sync = CONTAINER_OF(done->param, struct ll_sync_set, ull);
877 		LL_ASSERT(ull_sync_is_valid_get(sync));
878 
879 		/* Auxiliary context will be flushed by ull_scan_aux_stop() */
880 		if (unlikely(sync->is_stop) || !sync->lll.lll_aux) {
881 			return;
882 		}
883 
884 		aux = HDR_LLL2ULL(sync->lll.lll_aux);
885 		LL_ASSERT(aux->parent);
886 	} else {
887 		struct ll_scan_set *scan;
888 		struct lll_scan *lll;
889 
890 		lll = aux->parent;
891 		LL_ASSERT(lll);
892 
893 		scan = HDR_LLL2ULL(lll);
894 		LL_ASSERT(ull_scan_is_valid_get(scan));
895 
896 		/* Auxiliary context will be flushed by ull_scan_aux_stop() */
897 		if (unlikely(scan->is_stop)) {
898 			return;
899 		}
900 	}
901 
902 	flush(aux);
903 }
904 
ull_scan_aux_set_get(uint8_t handle)905 struct ll_scan_aux_set *ull_scan_aux_set_get(uint8_t handle)
906 {
907 	if (handle >= CONFIG_BT_CTLR_SCAN_AUX_SET) {
908 		return NULL;
909 	}
910 
911 	return &ll_scan_aux_pool[handle];
912 }
913 
ull_scan_aux_lll_handle_get(struct lll_scan_aux * lll)914 uint8_t ull_scan_aux_lll_handle_get(struct lll_scan_aux *lll)
915 {
916 	struct ll_scan_aux_set *aux;
917 
918 	aux = HDR_LLL2ULL(lll);
919 
920 	return aux_handle_get(aux);
921 }
922 
ull_scan_aux_lll_parent_get(struct lll_scan_aux * lll,uint8_t * is_lll_scan)923 void *ull_scan_aux_lll_parent_get(struct lll_scan_aux *lll,
924 				  uint8_t *is_lll_scan)
925 {
926 	struct ll_scan_aux_set *aux;
927 
928 	aux = HDR_LLL2ULL(lll);
929 
930 	if (is_lll_scan) {
931 		struct ll_scan_set *scan;
932 		struct lll_scan *lllscan;
933 
934 		lllscan = aux->parent;
935 		LL_ASSERT(lllscan);
936 
937 		scan = HDR_LLL2ULL(lllscan);
938 		*is_lll_scan = !!ull_scan_is_valid_get(scan);
939 	}
940 
941 	return aux->parent;
942 }
943 
ull_scan_aux_is_valid_get(struct ll_scan_aux_set * aux)944 struct ll_scan_aux_set *ull_scan_aux_is_valid_get(struct ll_scan_aux_set *aux)
945 {
946 	if (((uint8_t *)aux < (uint8_t *)ll_scan_aux_pool) ||
947 	    ((uint8_t *)aux > ((uint8_t *)ll_scan_aux_pool +
948 			       (sizeof(struct ll_scan_aux_set) *
949 				(CONFIG_BT_CTLR_SCAN_AUX_SET - 1))))) {
950 		return NULL;
951 	}
952 
953 	return aux;
954 }
955 
ull_scan_aux_lll_is_valid_get(struct lll_scan_aux * lll)956 struct lll_scan_aux *ull_scan_aux_lll_is_valid_get(struct lll_scan_aux *lll)
957 {
958 	struct ll_scan_aux_set *aux;
959 
960 	aux = HDR_LLL2ULL(lll);
961 	aux = ull_scan_aux_is_valid_get(aux);
962 	if (aux) {
963 		return &aux->lll;
964 	}
965 
966 	return NULL;
967 }
968 
ull_scan_aux_release(memq_link_t * link,struct node_rx_pdu * rx)969 void ull_scan_aux_release(memq_link_t *link, struct node_rx_pdu *rx)
970 {
971 	struct lll_scan_aux *lll_aux;
972 	void *param_ull;
973 
974 	param_ull = HDR_LLL2ULL(rx->rx_ftr.param);
975 
976 	if (ull_scan_is_valid_get(param_ull)) {
977 		struct lll_scan *lll;
978 
979 		/* Mark for buffer for release */
980 		rx->hdr.type = NODE_RX_TYPE_RELEASE;
981 
982 		lll = rx->rx_ftr.param;
983 		lll_aux = lll->lll_aux;
984 
985 	} else if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) ||
986 		   ull_scan_aux_is_valid_get(param_ull)) {
987 		/* Mark for buffer for release */
988 		rx->hdr.type = NODE_RX_TYPE_RELEASE;
989 
990 		lll_aux = rx->rx_ftr.param;
991 
992 	} else if (ull_sync_is_valid_get(param_ull)) {
993 		struct ll_sync_set *sync;
994 		struct lll_sync *lll;
995 
996 		sync = param_ull;
997 
998 		/* reset data len total */
999 		sync->data_len = 0U;
1000 
1001 		lll = rx->rx_ftr.param;
1002 		lll_aux = lll->lll_aux;
1003 
1004 		/* Change node type so HCI can dispatch report for truncated
1005 		 * data properly.
1006 		 */
1007 		rx->hdr.type = NODE_RX_TYPE_SYNC_REPORT;
1008 		rx->hdr.handle = ull_sync_handle_get(sync);
1009 
1010 		/* Dequeue will try releasing list of node rx, set the extra
1011 		 * pointer to NULL.
1012 		 */
1013 		rx->rx_ftr.extra = NULL;
1014 
1015 	} else {
1016 		LL_ASSERT(0);
1017 		lll_aux = NULL;
1018 	}
1019 
1020 	if (lll_aux) {
1021 		struct ll_scan_aux_set *aux;
1022 		struct ll_scan_set *scan;
1023 		struct lll_scan *lll;
1024 		uint8_t is_stop;
1025 
1026 		aux = HDR_LLL2ULL(lll_aux);
1027 		lll = aux->parent;
1028 		LL_ASSERT(lll);
1029 
1030 		scan = HDR_LLL2ULL(lll);
1031 		scan = ull_scan_is_valid_get(scan);
1032 		if (scan) {
1033 			is_stop = scan->is_stop;
1034 		} else {
1035 			struct lll_sync *sync_lll;
1036 			struct ll_sync_set *sync;
1037 
1038 			sync_lll = (void *)lll;
1039 			sync = HDR_LLL2ULL(sync_lll);
1040 			is_stop = sync->is_stop;
1041 		}
1042 
1043 		if (!is_stop) {
1044 			LL_ASSERT(aux->parent);
1045 
1046 			flush_safe(aux);
1047 
1048 		} else if (!scan) {
1049 			/* Sync terminate requested, enqueue node rx so that it
1050 			 * be flushed by ull_scan_aux_stop().
1051 			 */
1052 			rx->hdr.link = link;
1053 			if (aux->rx_last) {
1054 				aux->rx_last->rx_ftr.extra = rx;
1055 			} else {
1056 				aux->rx_head = rx;
1057 			}
1058 			aux->rx_last = rx;
1059 
1060 			return;
1061 		}
1062 	}
1063 
1064 	ll_rx_put(link, rx);
1065 	ll_rx_sched();
1066 }
1067 
ull_scan_aux_stop(struct ll_scan_aux_set * aux)1068 int ull_scan_aux_stop(struct ll_scan_aux_set *aux)
1069 {
1070 	static memq_link_t link;
1071 	static struct mayfly mfy = {0, 0, &link, NULL, NULL};
1072 	uint8_t aux_handle;
1073 	uint32_t ret;
1074 	int err;
1075 
1076 	/* Stop any ULL scheduling of auxiliary PDU scan */
1077 	aux_handle = aux_handle_get(aux);
1078 	err = ull_ticker_stop_with_mark(TICKER_ID_SCAN_AUX_BASE + aux_handle,
1079 					aux, &aux->lll);
1080 	if (err && (err != -EALREADY)) {
1081 		return err;
1082 	}
1083 
1084 	/* Abort LLL event if ULL scheduling not used or already in prepare */
1085 	if (err == -EALREADY) {
1086 		err = ull_disable(&aux->lll);
1087 		if (err && (err != -EALREADY)) {
1088 			return err;
1089 		}
1090 
1091 		mfy.fp = flush;
1092 
1093 	} else if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC)) {
1094 		/* ULL scan auxiliary PDU reception scheduling stopped
1095 		 * before prepare.
1096 		 */
1097 		mfy.fp = flush;
1098 
1099 	} else {
1100 		struct ll_scan_set *scan;
1101 		struct lll_scan *lll;
1102 
1103 		lll = aux->parent;
1104 		LL_ASSERT(lll);
1105 
1106 		scan = HDR_LLL2ULL(lll);
1107 		scan = ull_scan_is_valid_get(scan);
1108 		if (scan) {
1109 			/* ULL scan auxiliary PDU reception scheduling stopped
1110 			 * before prepare.
1111 			 */
1112 			mfy.fp = flush;
1113 		} else {
1114 			/* ULL sync chain reception scheduling stopped before
1115 			 * prepare.
1116 			 */
1117 			mfy.fp = aux_sync_incomplete;
1118 		}
1119 	}
1120 
1121 	/* Release auxiliary context in ULL execution context */
1122 	mfy.param = aux;
1123 	ret = mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_ULL_HIGH,
1124 			     0, &mfy);
1125 	LL_ASSERT(!ret);
1126 
1127 	return 0;
1128 }
1129 
init_reset(void)1130 static int init_reset(void)
1131 {
1132 	/* Initialize adv aux pool. */
1133 	mem_init(ll_scan_aux_pool, sizeof(struct ll_scan_aux_set),
1134 		 sizeof(ll_scan_aux_pool) / sizeof(struct ll_scan_aux_set),
1135 		 &scan_aux_free);
1136 
1137 	return 0;
1138 }
1139 
aux_acquire(void)1140 static inline struct ll_scan_aux_set *aux_acquire(void)
1141 {
1142 	return mem_acquire(&scan_aux_free);
1143 }
1144 
aux_release(struct ll_scan_aux_set * aux)1145 static inline void aux_release(struct ll_scan_aux_set *aux)
1146 {
1147 	/* Clear the parent so that when scan is being disabled then this
1148 	 * auxiliary context shall not associate itself from being disable.
1149 	 */
1150 	LL_ASSERT(aux->parent);
1151 	aux->parent = NULL;
1152 
1153 	mem_release(aux, &scan_aux_free);
1154 }
1155 
aux_handle_get(struct ll_scan_aux_set * aux)1156 static inline uint8_t aux_handle_get(struct ll_scan_aux_set *aux)
1157 {
1158 	return mem_index_get(aux, ll_scan_aux_pool,
1159 			     sizeof(struct ll_scan_aux_set));
1160 }
1161 
sync_create_get(struct ll_scan_set * scan)1162 static inline struct ll_sync_set *sync_create_get(struct ll_scan_set *scan)
1163 {
1164 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1165 	return (!scan->periodic.cancelled) ? scan->periodic.sync : NULL;
1166 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
1167 	return NULL;
1168 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
1169 }
1170 
1171 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1172 static inline struct ll_sync_iso_set *
sync_iso_create_get(struct ll_sync_set * sync)1173 	sync_iso_create_get(struct ll_sync_set *sync)
1174 {
1175 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1176 	return sync->iso.sync_iso;
1177 #else /* !CONFIG_BT_CTLR_SYNC_ISO */
1178 	return NULL;
1179 #endif /* !CONFIG_BT_CTLR_SYNC_ISO */
1180 }
1181 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1182 
done_disabled_cb(void * param)1183 static void done_disabled_cb(void *param)
1184 {
1185 	struct ll_scan_aux_set *aux;
1186 
1187 	aux = param;
1188 	LL_ASSERT(aux->parent);
1189 
1190 	flush(aux);
1191 }
1192 
flush_safe(void * param)1193 static void flush_safe(void *param)
1194 {
1195 	struct ll_scan_aux_set *aux;
1196 	struct ull_hdr *hdr;
1197 	uint8_t ref;
1198 
1199 	aux = param;
1200 	LL_ASSERT(aux->parent);
1201 
1202 	/* ref == 0
1203 	 * All PDUs were scheduled from LLL and there is no pending done
1204 	 * event, we can flush here.
1205 	 *
1206 	 * ref == 1
1207 	 * There is pending done event so we need to flush from disabled
1208 	 * callback. Flushing here would release aux context and thus
1209 	 * ull_hdr before done event was processed.
1210 	 */
1211 	hdr = &aux->ull;
1212 	ref = ull_ref_get(hdr);
1213 	if (ref == 0U) {
1214 		flush(aux);
1215 	} else {
1216 		/* A specific single shot scheduled aux context
1217 		 * cannot overlap, i.e. ULL reference count
1218 		 * shall be less than 2.
1219 		 */
1220 		LL_ASSERT(ref < 2U);
1221 
1222 		LL_ASSERT(!hdr->disabled_cb);
1223 		hdr->disabled_param = aux;
1224 		hdr->disabled_cb = done_disabled_cb;
1225 	}
1226 }
1227 
flush(void * param)1228 static void flush(void *param)
1229 {
1230 	struct ll_scan_aux_set *aux;
1231 	struct ll_scan_set *scan;
1232 	struct node_rx_pdu *rx;
1233 	struct lll_scan *lll;
1234 	bool sched = false;
1235 
1236 	/* Debug check that parent was assigned when allocated for reception of
1237 	 * auxiliary channel PDUs.
1238 	 */
1239 	aux = param;
1240 	LL_ASSERT(aux->parent);
1241 
1242 	rx = aux->rx_head;
1243 	if (rx) {
1244 		aux->rx_head = NULL;
1245 
1246 		ll_rx_put(rx->hdr.link, rx);
1247 		sched = true;
1248 	}
1249 
1250 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1251 	rx = aux->rx_incomplete;
1252 	if (rx) {
1253 		aux->rx_incomplete = NULL;
1254 
1255 		rx_release_put(rx);
1256 		sched = true;
1257 	}
1258 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1259 
1260 	if (sched) {
1261 		ll_rx_sched();
1262 	}
1263 
1264 	lll = aux->parent;
1265 	scan = HDR_LLL2ULL(lll);
1266 	scan = ull_scan_is_valid_get(scan);
1267 	if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || scan) {
1268 		lll->lll_aux = NULL;
1269 	} else {
1270 		struct lll_sync *sync_lll;
1271 		struct ll_sync_set *sync;
1272 
1273 		sync_lll = aux->parent;
1274 		sync = HDR_LLL2ULL(sync_lll);
1275 
1276 		LL_ASSERT(sync->is_stop || sync_lll->lll_aux);
1277 		sync_lll->lll_aux = NULL;
1278 	}
1279 
1280 	aux_release(aux);
1281 }
1282 
rx_release_put(struct node_rx_pdu * rx)1283 static void rx_release_put(struct node_rx_pdu *rx)
1284 {
1285 	rx->hdr.type = NODE_RX_TYPE_RELEASE;
1286 
1287 	ll_rx_put(rx->hdr.link, rx);
1288 }
1289 
aux_sync_partial(void * param)1290 static void aux_sync_partial(void *param)
1291 {
1292 	struct ll_scan_aux_set *aux;
1293 	struct node_rx_pdu *rx;
1294 
1295 	aux = param;
1296 	rx = aux->rx_head;
1297 	aux->rx_head = NULL;
1298 
1299 	LL_ASSERT(rx);
1300 	rx->rx_ftr.aux_sched = 1U;
1301 
1302 	ll_rx_put_sched(rx->hdr.link, rx);
1303 }
1304 
aux_sync_incomplete(void * param)1305 static void aux_sync_incomplete(void *param)
1306 {
1307 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1308 	struct ll_scan_aux_set *aux;
1309 
1310 	aux = param;
1311 	LL_ASSERT(aux->parent);
1312 
1313 	/* ULL scheduling succeeded hence no backup node rx present, use the
1314 	 * extra node rx reserved for incomplete data status generation.
1315 	 */
1316 	if (!aux->rx_head) {
1317 		struct ll_sync_set *sync;
1318 		struct node_rx_pdu *rx;
1319 		struct lll_sync *lll;
1320 
1321 		/* get reference to sync context */
1322 		lll = aux->parent;
1323 		LL_ASSERT(lll);
1324 		sync = HDR_LLL2ULL(lll);
1325 
1326 		/* reset data len total */
1327 		sync->data_len = 0U;
1328 
1329 		/* pick extra node rx stored in aux context */
1330 		rx = aux->rx_incomplete;
1331 		LL_ASSERT(rx);
1332 		aux->rx_incomplete = NULL;
1333 
1334 		/* prepare sync report with failure */
1335 		rx->hdr.type = NODE_RX_TYPE_SYNC_REPORT;
1336 		rx->hdr.handle = ull_sync_handle_get(sync);
1337 		rx->rx_ftr.param = lll;
1338 
1339 		/* flag chain reception failure */
1340 		rx->rx_ftr.aux_failed = 1U;
1341 
1342 		/* Dequeue will try releasing list of node rx,
1343 		 * set the extra pointer to NULL.
1344 		 */
1345 		rx->rx_ftr.extra = NULL;
1346 
1347 		/* add to rx list, will be flushed */
1348 		aux->rx_head = rx;
1349 	}
1350 
1351 	LL_ASSERT(!ull_ref_get(&aux->ull));
1352 
1353 	flush(aux);
1354 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1355 }
1356 
ticker_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)1357 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
1358 		      uint32_t remainder, uint16_t lazy, uint8_t force,
1359 		      void *param)
1360 {
1361 	static memq_link_t link;
1362 	static struct mayfly mfy = {0, 0, &link, NULL, lll_scan_aux_prepare};
1363 	struct ll_scan_aux_set *aux = param;
1364 	static struct lll_prepare_param p;
1365 	uint32_t ret;
1366 	uint8_t ref;
1367 
1368 	DEBUG_RADIO_PREPARE_O(1);
1369 
1370 	/* Increment prepare reference count */
1371 	ref = ull_ref_inc(&aux->ull);
1372 	LL_ASSERT(ref);
1373 
1374 	/* Append timing parameters */
1375 	p.ticks_at_expire = ticks_at_expire;
1376 	p.remainder = 0; /* FIXME: remainder; */
1377 	p.lazy = lazy;
1378 	p.force = force;
1379 	p.param = &aux->lll;
1380 	mfy.param = &p;
1381 
1382 	/* Kick LLL prepare */
1383 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
1384 			     0, &mfy);
1385 	LL_ASSERT(!ret);
1386 
1387 	DEBUG_RADIO_PREPARE_O(1);
1388 }
1389 
ticker_op_cb(uint32_t status,void * param)1390 static void ticker_op_cb(uint32_t status, void *param)
1391 {
1392 	static memq_link_t link;
1393 	static struct mayfly mfy = {0, 0, &link, NULL, NULL};
1394 	struct ll_sync_set *sync;
1395 	uint32_t ret;
1396 
1397 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC)) {
1398 		struct ll_scan_aux_set *aux;
1399 		struct lll_sync *sync_lll;
1400 
1401 		aux = param;
1402 		sync_lll = aux->parent;
1403 		LL_ASSERT(sync_lll);
1404 
1405 		sync = HDR_LLL2ULL(sync_lll);
1406 		sync = ull_sync_is_valid_get(sync);
1407 	} else {
1408 		sync = NULL;
1409 	}
1410 
1411 	if (status == TICKER_STATUS_SUCCESS) {
1412 		if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync) {
1413 			mfy.fp = aux_sync_partial;
1414 		} else {
1415 			return;
1416 		}
1417 	} else {
1418 		if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync) {
1419 			mfy.fp = aux_sync_incomplete;
1420 		} else {
1421 			struct ll_scan_aux_set *aux;
1422 
1423 			aux = param;
1424 			LL_ASSERT(aux->parent);
1425 
1426 			mfy.fp = flush_safe;
1427 		}
1428 	}
1429 
1430 	mfy.param = param;
1431 
1432 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW, TICKER_USER_ID_ULL_HIGH,
1433 			     0, &mfy);
1434 	LL_ASSERT(!ret);
1435 }
1436