1 /*
2  * Copyright (c) 2020 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/sys/byteorder.h>
8 #include <zephyr/sys/util.h>
9 
10 #include "util/mem.h"
11 #include "util/memq.h"
12 #include "util/mayfly.h"
13 #include "util/util.h"
14 #include "util/dbuf.h"
15 
16 #include "hal/ticker.h"
17 #include "hal/ccm.h"
18 
19 #include "ticker/ticker.h"
20 
21 #include "pdu_df.h"
22 #include "lll/pdu_vendor.h"
23 #include "pdu.h"
24 
25 #include "lll.h"
26 #include "lll/lll_vendor.h"
27 #include "lll_scan.h"
28 #include "lll_scan_aux.h"
29 #include "lll/lll_df_types.h"
30 #include "lll_conn.h"
31 #include "lll_sync.h"
32 #include "lll_sync_iso.h"
33 
34 #include "ull_scan_types.h"
35 #include "ull_sync_types.h"
36 
37 #include "ull_internal.h"
38 #include "ull_scan_internal.h"
39 #include "ull_sync_internal.h"
40 #include "ull_sync_iso_internal.h"
41 #include "ull_df_internal.h"
42 
43 #include <zephyr/bluetooth/hci_types.h>
44 
45 #include <soc.h>
46 #include "hal/debug.h"
47 
48 static int init_reset(void);
49 static inline struct ll_scan_aux_set *aux_acquire(void);
50 static inline void aux_release(struct ll_scan_aux_set *aux);
51 static inline uint8_t aux_handle_get(struct ll_scan_aux_set *aux);
52 static inline struct ll_sync_set *sync_create_get(struct ll_scan_set *scan);
53 static inline struct ll_sync_iso_set *
54 	sync_iso_create_get(struct ll_sync_set *sync);
55 static void done_disabled_cb(void *param);
56 static void flush_safe(void *param);
57 static void flush(void *param);
58 static void rx_release_put(struct node_rx_hdr *rx);
59 static void aux_sync_incomplete(void *param);
60 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
61 		      uint32_t remainder, uint16_t lazy, uint8_t force,
62 		      void *param);
63 static void ticker_op_cb(uint32_t status, void *param);
64 
65 /* Auxiliary context pool used for reception of PDUs at aux offsets, common for
66  * both Extended Advertising and Periodic Advertising.
67  * Increasing the count allows simultaneous reception of interleaved chain PDUs
68  * from multiple advertisers.
69  */
70 static struct ll_scan_aux_set ll_scan_aux_pool[CONFIG_BT_CTLR_SCAN_AUX_SET];
71 static void *scan_aux_free;
72 
ull_scan_aux_init(void)73 int ull_scan_aux_init(void)
74 {
75 	int err;
76 
77 	err = init_reset();
78 	if (err) {
79 		return err;
80 	}
81 
82 	return 0;
83 }
84 
ull_scan_aux_reset(void)85 int ull_scan_aux_reset(void)
86 {
87 	int err;
88 
89 	err = init_reset();
90 	if (err) {
91 		return err;
92 	}
93 
94 	return 0;
95 }
96 
ull_scan_aux_setup(memq_link_t * link,struct node_rx_hdr * rx)97 void ull_scan_aux_setup(memq_link_t *link, struct node_rx_hdr *rx)
98 {
99 	struct node_rx_hdr *rx_incomplete;
100 	struct ll_sync_iso_set *sync_iso;
101 	struct pdu_adv_aux_ptr *aux_ptr;
102 	struct pdu_adv_com_ext_adv *p;
103 	uint32_t ticks_slot_overhead;
104 	struct lll_scan_aux *lll_aux;
105 	struct ll_scan_aux_set *aux;
106 	uint8_t ticker_yield_handle;
107 	uint32_t window_widening_us;
108 	uint32_t ticks_slot_offset;
109 	uint32_t ticks_aux_offset;
110 	struct pdu_adv_ext_hdr *h;
111 	struct lll_sync *sync_lll;
112 	struct ll_scan_set *scan;
113 	struct ll_sync_set *sync;
114 	struct pdu_adv_adi *adi;
115 	struct node_rx_ftr *ftr;
116 	uint32_t ready_delay_us;
117 	uint32_t aux_offset_us;
118 	uint32_t ticker_status;
119 	struct lll_scan *lll;
120 	struct pdu_adv *pdu;
121 	uint8_t hdr_buf_len;
122 	uint8_t aux_handle;
123 	bool is_scan_req;
124 	uint8_t acad_len;
125 	uint8_t data_len;
126 	uint8_t hdr_len;
127 	uint8_t *ptr;
128 	uint8_t phy;
129 
130 	is_scan_req = false;
131 	ftr = &rx->rx_ftr;
132 
133 	switch (rx->type) {
134 	case NODE_RX_TYPE_EXT_1M_REPORT:
135 		lll_aux = NULL;
136 		aux = NULL;
137 		sync_lll = NULL;
138 		sync_iso = NULL;
139 		rx_incomplete = NULL;
140 
141 		lll = ftr->param;
142 		LL_ASSERT(!lll->lll_aux);
143 
144 		scan = HDR_LLL2ULL(lll);
145 		sync = sync_create_get(scan);
146 		phy = BT_HCI_LE_EXT_SCAN_PHY_1M;
147 
148 		ticker_yield_handle = TICKER_ID_SCAN_BASE +
149 				      ull_scan_handle_get(scan);
150 		break;
151 
152 	case NODE_RX_TYPE_EXT_CODED_REPORT:
153 		lll_aux = NULL;
154 		aux = NULL;
155 		sync_lll = NULL;
156 		sync_iso = NULL;
157 		rx_incomplete = NULL;
158 
159 		lll = ftr->param;
160 		LL_ASSERT(!lll->lll_aux);
161 
162 		scan = HDR_LLL2ULL(lll);
163 		sync = sync_create_get(scan);
164 		phy = BT_HCI_LE_EXT_SCAN_PHY_CODED;
165 
166 		ticker_yield_handle = TICKER_ID_SCAN_BASE +
167 				      ull_scan_handle_get(scan);
168 		break;
169 
170 	case NODE_RX_TYPE_EXT_AUX_REPORT:
171 		sync_iso = NULL;
172 		rx_incomplete = NULL;
173 		if (ull_scan_aux_is_valid_get(HDR_LLL2ULL(ftr->param))) {
174 			sync_lll = NULL;
175 
176 			/* Node has valid aux context so its scan was scheduled
177 			 * from ULL.
178 			 */
179 			lll_aux = ftr->param;
180 			aux = HDR_LLL2ULL(lll_aux);
181 
182 			/* aux parent will be NULL for periodic sync */
183 			lll = aux->parent;
184 			LL_ASSERT(lll);
185 
186 			ticker_yield_handle = TICKER_ID_SCAN_AUX_BASE +
187 					      aux_handle_get(aux);
188 
189 		} else if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) ||
190 			   ull_scan_is_valid_get(HDR_LLL2ULL(ftr->param))) {
191 			sync_lll = NULL;
192 
193 			/* Node that does not have valid aux context but has
194 			 * valid scan set was scheduled from LLL. We can
195 			 * retrieve aux context from lll_scan as it was stored
196 			 * there when superior PDU was handled.
197 			 */
198 			lll = ftr->param;
199 
200 			lll_aux = lll->lll_aux;
201 			LL_ASSERT(lll_aux);
202 
203 			aux = HDR_LLL2ULL(lll_aux);
204 			LL_ASSERT(lll == aux->parent);
205 
206 			ticker_yield_handle = TICKER_NULL;
207 
208 		} else {
209 			lll = NULL;
210 
211 			/* If none of the above, node is part of sync scanning
212 			 */
213 			sync_lll = ftr->param;
214 
215 			lll_aux = sync_lll->lll_aux;
216 			LL_ASSERT(lll_aux);
217 
218 			aux = HDR_LLL2ULL(lll_aux);
219 			LL_ASSERT(sync_lll == aux->parent);
220 
221 			ticker_yield_handle = TICKER_NULL;
222 		}
223 
224 		if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || lll) {
225 			scan = HDR_LLL2ULL(lll);
226 			sync = (void *)scan;
227 			scan = ull_scan_is_valid_get(scan);
228 			if (scan) {
229 				sync = NULL;
230 			}
231 		} else {
232 			scan = NULL;
233 			sync = HDR_LLL2ULL(sync_lll);
234 		}
235 
236 		phy = lll_aux->phy;
237 		if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || scan) {
238 			/* Here we are scanner context */
239 			sync = sync_create_get(scan);
240 
241 			/* Generate report based on PHY scanned */
242 			switch (phy) {
243 			case PHY_1M:
244 				rx->type = NODE_RX_TYPE_EXT_1M_REPORT;
245 				break;
246 			case PHY_2M:
247 				rx->type = NODE_RX_TYPE_EXT_2M_REPORT;
248 				break;
249 			case PHY_CODED:
250 				rx->type = NODE_RX_TYPE_EXT_CODED_REPORT;
251 				break;
252 			default:
253 				LL_ASSERT(0);
254 				return;
255 			}
256 
257 			/* Backup scan requested flag as it is in union with
258 			 * `extra` struct member which will be set to NULL
259 			 * in subsequent code.
260 			 */
261 			is_scan_req = !!ftr->scan_req;
262 
263 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
264 		} else {
265 			/* Here we are periodic sync context */
266 			rx->type = NODE_RX_TYPE_SYNC_REPORT;
267 			rx->handle = ull_sync_handle_get(sync);
268 
269 			/* Check if we need to create BIG sync */
270 			sync_iso = sync_iso_create_get(sync);
271 
272 			/* lll_aux and aux are auxiliary channel context,
273 			 * reuse the existing aux context to scan the chain.
274 			 * hence lll_aux and aux are not released or set to NULL.
275 			 */
276 			sync = NULL;
277 		}
278 		break;
279 
280 	case NODE_RX_TYPE_SYNC_REPORT:
281 		{
282 			struct ll_sync_set *ull_sync;
283 
284 			/* set the sync handle corresponding to the LLL context
285 			 * passed in the node rx footer field.
286 			 */
287 			sync_lll = ftr->param;
288 			LL_ASSERT(!sync_lll->lll_aux);
289 
290 			ull_sync = HDR_LLL2ULL(sync_lll);
291 			rx->handle = ull_sync_handle_get(ull_sync);
292 
293 			/* Check if we need to create BIG sync */
294 			sync_iso = sync_iso_create_get(ull_sync);
295 
296 			/* FIXME: we will need lll_scan if chain was scheduled
297 			 *        from LLL; should we store lll_scan_set in
298 			 *        sync_lll instead?
299 			 */
300 			lll = NULL;
301 			lll_aux = NULL;
302 			aux = NULL;
303 			scan = NULL;
304 			sync = NULL;
305 			phy =  sync_lll->phy;
306 
307 			/* backup extra node_rx supplied for generating
308 			 * incomplete report
309 			 */
310 			rx_incomplete = ftr->extra;
311 
312 			ticker_yield_handle = TICKER_ID_SCAN_SYNC_BASE +
313 					      ull_sync_handle_get(ull_sync);
314 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
315 
316 		}
317 		break;
318 	default:
319 		LL_ASSERT(0);
320 		return;
321 	}
322 
323 	rx->link = link;
324 	ftr->extra = NULL;
325 
326 	ftr->aux_sched = 0U;
327 
328 	pdu = (void *)((struct node_rx_pdu *)rx)->pdu;
329 	p = (void *)&pdu->adv_ext_ind;
330 	if (!pdu->len || !p->ext_hdr_len) {
331 		if (pdu->len) {
332 			data_len = pdu->len - PDU_AC_EXT_HEADER_SIZE_MIN;
333 		} else {
334 			data_len = 0U;
335 		}
336 
337 		if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll) {
338 			struct ll_sync_set *sync;
339 
340 			sync = HDR_LLL2ULL(sync_lll);
341 			ftr->aux_data_len = sync->data_len + data_len;
342 			sync->data_len = 0U;
343 		} else if (aux) {
344 			aux->data_len += data_len;
345 			ftr->aux_data_len = aux->data_len;
346 		} else {
347 			ftr->aux_data_len = data_len;
348 		}
349 
350 		goto ull_scan_aux_rx_flush;
351 	}
352 
353 	h = (void *)p->ext_hdr_adv_data;
354 
355 	/* Regard PDU as invalid if a RFU field is set, we do not know the
356 	 * size of this future field, hence will cause incorrect calculation of
357 	 * offset to ACAD field.
358 	 */
359 	if (h->rfu) {
360 		goto ull_scan_aux_rx_flush;
361 	}
362 
363 	ptr = h->data;
364 
365 	if (h->adv_addr) {
366 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
367 		/* Check if Periodic Advertising Synchronization to be created
368 		 */
369 		if (sync && (scan->periodic.state != LL_SYNC_STATE_CREATED)) {
370 			/* Check address and update internal state */
371 #if defined(CONFIG_BT_CTLR_PRIVACY)
372 			ull_sync_setup_addr_check(scan, pdu->tx_addr, ptr,
373 						  ftr->rl_idx);
374 #else /* !CONFIG_BT_CTLR_PRIVACY */
375 			ull_sync_setup_addr_check(scan, pdu->tx_addr, ptr, 0U);
376 #endif /* !CONFIG_BT_CTLR_PRIVACY */
377 
378 		}
379 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
380 
381 		ptr += BDADDR_SIZE;
382 	}
383 
384 	if (h->tgt_addr) {
385 		ptr += BDADDR_SIZE;
386 	}
387 
388 	if (h->cte_info) {
389 		ptr += sizeof(struct pdu_cte_info);
390 	}
391 
392 	adi = NULL;
393 	if (h->adi) {
394 		adi = (void *)ptr;
395 		ptr += sizeof(*adi);
396 	}
397 
398 	aux_ptr = NULL;
399 	if (h->aux_ptr) {
400 		aux_ptr = (void *)ptr;
401 		ptr += sizeof(*aux_ptr);
402 	}
403 
404 	if (h->sync_info) {
405 		struct pdu_adv_sync_info *si;
406 
407 		si = (void *)ptr;
408 		ptr += sizeof(*si);
409 
410 		/* Check if Periodic Advertising Synchronization to be created.
411 		 * Setup synchronization if address and SID match in the
412 		 * Periodic Advertiser List or with the explicitly supplied.
413 		 */
414 		if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync && adi &&
415 		    ull_sync_setup_sid_match(scan, PDU_ADV_ADI_SID_GET(adi))) {
416 			ull_sync_setup(scan, aux, rx, si);
417 		}
418 	}
419 
420 	if (h->tx_pwr) {
421 		ptr++;
422 	}
423 
424 	/* Calculate ACAD Len */
425 	hdr_len = ptr - (uint8_t *)p;
426 	hdr_buf_len = PDU_AC_EXT_HEADER_SIZE_MIN + p->ext_hdr_len;
427 	if (hdr_len > hdr_buf_len) {
428 		/* FIXME: Handle invalid header length */
429 		acad_len = 0U;
430 	} else {
431 		acad_len = hdr_buf_len - hdr_len;
432 		hdr_len += acad_len;
433 	}
434 
435 	/* calculate total data length */
436 	if (hdr_len < pdu->len) {
437 		data_len = pdu->len - hdr_len;
438 	} else {
439 		data_len = 0U;
440 	}
441 
442 	/* Periodic Advertising Channel Map Indication and/or Broadcast ISO
443 	 * synchronization
444 	 */
445 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) &&
446 	    (rx->type == NODE_RX_TYPE_SYNC_REPORT) &&
447 	    acad_len) {
448 		/* Periodic Advertising Channel Map Indication */
449 		ull_sync_chm_update(rx->handle, ptr, acad_len);
450 
451 		/* Broadcast ISO synchronize */
452 		if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_ISO) && sync_iso) {
453 			ull_sync_iso_setup(sync_iso, rx, ptr, acad_len);
454 		}
455 	}
456 
457 	/* Do not ULL schedule auxiliary PDU reception if no aux pointer
458 	 * or aux pointer is zero or scannable advertising has erroneous aux
459 	 * pointer being present or PHY in the aux pointer is invalid.
460 	 */
461 	if (!aux_ptr || !PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr) || is_scan_req ||
462 	    (PDU_ADV_AUX_PTR_PHY_GET(aux_ptr) > EXT_ADV_AUX_PHY_LE_CODED)) {
463 		if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll) {
464 			struct ll_sync_set *sync;
465 
466 			sync = HDR_LLL2ULL(sync_lll);
467 			ftr->aux_data_len = sync->data_len + data_len;
468 			sync->data_len = 0U;
469 		} else if (aux) {
470 			aux->data_len += data_len;
471 			ftr->aux_data_len = aux->data_len;
472 		} else {
473 			ftr->aux_data_len = data_len;
474 		}
475 
476 		if (is_scan_req) {
477 			LL_ASSERT(aux && aux->rx_last);
478 
479 			aux->rx_last->rx_ftr.extra = rx;
480 			aux->rx_last = rx;
481 
482 			return;
483 		}
484 
485 		goto ull_scan_aux_rx_flush;
486 	}
487 
488 	if (!aux) {
489 		aux = aux_acquire();
490 		if (!aux) {
491 			/* As LLL scheduling has been used and will fail due to
492 			 * non-allocation of aux context, a sync report with
493 			 * aux_failed flag set will be generated. Let the
494 			 * current sync report be set as partial, and the
495 			 * sync report corresponding to ull_scan_aux_release
496 			 * have the incomplete data status.
497 			 */
498 			if (ftr->aux_lll_sched) {
499 				ftr->aux_sched = 1U;
500 			}
501 
502 			if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) &&
503 			    sync_lll) {
504 				struct ll_sync_set *sync;
505 
506 				sync = HDR_LLL2ULL(sync_lll);
507 				ftr->aux_data_len = sync->data_len + data_len;
508 				sync->data_len = 0U;
509 
510 			}
511 
512 			goto ull_scan_aux_rx_flush;
513 		}
514 
515 		aux->rx_head = aux->rx_last = NULL;
516 		aux->data_len = data_len;
517 		lll_aux = &aux->lll;
518 		lll_aux->is_chain_sched = 0U;
519 
520 		ull_hdr_init(&aux->ull);
521 		lll_hdr_init(lll_aux, aux);
522 
523 		aux->parent = lll ? (void *)lll : (void *)sync_lll;
524 
525 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
526 		aux->rx_incomplete = rx_incomplete;
527 		rx_incomplete = NULL;
528 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
529 
530 	} else {
531 		aux->data_len += data_len;
532 	}
533 
534 	/* In sync context we can dispatch rx immediately, in scan context we
535 	 * enqueue rx in aux context and will flush them after scan is complete.
536 	 */
537 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll) {
538 		struct ll_sync_set *sync;
539 
540 		sync = HDR_LLL2ULL(sync_lll);
541 		sync->data_len += data_len;
542 		ftr->aux_data_len = sync->data_len;
543 	} else {
544 		if (aux->rx_last) {
545 			aux->rx_last->rx_ftr.extra = rx;
546 		} else {
547 			aux->rx_head = rx;
548 		}
549 		aux->rx_last = rx;
550 
551 		ftr->aux_data_len = aux->data_len;
552 	}
553 
554 	/* Initialize the channel index and PHY for the Auxiliary PDU reception.
555 	 */
556 	lll_aux->chan = aux_ptr->chan_idx;
557 	lll_aux->phy = BIT(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
558 
559 	/* See if this was already scheduled from LLL. If so, store aux context
560 	 * in global scan struct so we can pick it when scanned node is received
561 	 * with a valid context.
562 	 */
563 	if (ftr->aux_lll_sched) {
564 		/* AUX_ADV_IND/AUX_CHAIN_IND PDU reception is being setup */
565 		ftr->aux_sched = 1U;
566 
567 		if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll) {
568 			sync_lll->lll_aux = lll_aux;
569 
570 			/* In sync context, dispatch immediately */
571 			ll_rx_put_sched(link, rx);
572 		} else {
573 			lll->lll_aux = lll_aux;
574 		}
575 
576 		/* Reset auxiliary channel PDU scan state which otherwise is
577 		 * done in the prepare_cb when ULL scheduling is used.
578 		 */
579 		lll_aux->state = 0U;
580 
581 		return;
582 	}
583 
584 	/* Switching to ULL scheduling to receive auxiliary PDUs */
585 	if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || lll) {
586 		/* Do not ULL schedule if scan disable requested */
587 		if (unlikely(scan->is_stop)) {
588 			goto ull_scan_aux_rx_flush;
589 		}
590 
591 		/* Remove auxiliary context association with scan context so
592 		 * that LLL can differentiate it to being ULL scheduling.
593 		 */
594 		lll->lll_aux = NULL;
595 	} else {
596 		struct ll_sync_set *sync;
597 
598 		LL_ASSERT(sync_lll &&
599 			  (!sync_lll->lll_aux || sync_lll->lll_aux == lll_aux));
600 
601 		/* Do not ULL schedule if sync terminate requested */
602 		sync = HDR_LLL2ULL(sync_lll);
603 		if (unlikely(sync->is_stop)) {
604 			goto ull_scan_aux_rx_flush;
605 		}
606 
607 		/* Associate the auxiliary context with sync context */
608 		sync_lll->lll_aux = lll_aux;
609 
610 		/* Backup the node rx to be dispatch on successfully ULL
611 		 * scheduling setup.
612 		 */
613 		aux->rx_head = rx;
614 	}
615 
616 	/* Determine the window size */
617 	if (aux_ptr->offs_units) {
618 		lll_aux->window_size_us = OFFS_UNIT_300_US;
619 	} else {
620 		lll_aux->window_size_us = OFFS_UNIT_30_US;
621 	}
622 
623 	aux_offset_us = (uint32_t)PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr) * lll_aux->window_size_us;
624 
625 	/* CA field contains the clock accuracy of the advertiser;
626 	 * 0 - 51 ppm to 500 ppm
627 	 * 1 - 0 ppm to 50 ppm
628 	 */
629 	if (aux_ptr->ca) {
630 		window_widening_us = SCA_DRIFT_50_PPM_US(aux_offset_us);
631 	} else {
632 		window_widening_us = SCA_DRIFT_500_PPM_US(aux_offset_us);
633 	}
634 
635 	lll_aux->window_size_us += (EVENT_TICKER_RES_MARGIN_US +
636 				    ((EVENT_JITTER_US + window_widening_us) << 1));
637 
638 	ready_delay_us = lll_radio_rx_ready_delay_get(lll_aux->phy,
639 						      PHY_FLAGS_S8);
640 
641 	/* Calculate the aux offset from start of the scan window */
642 	aux_offset_us += ftr->radio_end_us;
643 	aux_offset_us -= PDU_AC_US(pdu->len, phy, ftr->phy_flags);
644 	aux_offset_us -= EVENT_TICKER_RES_MARGIN_US;
645 	aux_offset_us -= EVENT_JITTER_US;
646 	aux_offset_us -= ready_delay_us;
647 	aux_offset_us -= window_widening_us;
648 
649 	/* TODO: active_to_start feature port */
650 	aux->ull.ticks_active_to_start = 0;
651 	aux->ull.ticks_prepare_to_start =
652 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
653 	aux->ull.ticks_preempt_to_start =
654 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
655 	aux->ull.ticks_slot =
656 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US +
657 				       ready_delay_us +
658 				       PDU_AC_MAX_US(PDU_AC_EXT_PAYLOAD_RX_SIZE,
659 						     lll_aux->phy) +
660 				       EVENT_OVERHEAD_END_US);
661 
662 	ticks_slot_offset = MAX(aux->ull.ticks_active_to_start,
663 				aux->ull.ticks_prepare_to_start);
664 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
665 		ticks_slot_overhead = ticks_slot_offset;
666 	} else {
667 		ticks_slot_overhead = 0U;
668 	}
669 	ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
670 
671 	ticks_aux_offset = HAL_TICKER_US_TO_TICKS(aux_offset_us);
672 
673 	/* Yield the primary scan window or auxiliary or periodic sync event
674 	 * in ticker.
675 	 */
676 	if (ticker_yield_handle != TICKER_NULL) {
677 		ticker_status = ticker_yield_abs(TICKER_INSTANCE_ID_CTLR,
678 						 TICKER_USER_ID_ULL_HIGH,
679 						 ticker_yield_handle,
680 						 (ftr->ticks_anchor +
681 						  ticks_aux_offset -
682 						  ticks_slot_offset),
683 						 NULL, NULL);
684 		LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
685 			  (ticker_status == TICKER_STATUS_BUSY));
686 	}
687 
688 	aux_handle = aux_handle_get(aux);
689 	ticker_status = ticker_start(TICKER_INSTANCE_ID_CTLR,
690 				     TICKER_USER_ID_ULL_HIGH,
691 				     TICKER_ID_SCAN_AUX_BASE + aux_handle,
692 				     ftr->ticks_anchor - ticks_slot_offset,
693 				     ticks_aux_offset,
694 				     TICKER_NULL_PERIOD,
695 				     TICKER_NULL_REMAINDER,
696 				     TICKER_NULL_LAZY,
697 				     (aux->ull.ticks_slot +
698 				      ticks_slot_overhead),
699 				     ticker_cb, aux, ticker_op_cb, aux);
700 	LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
701 		  (ticker_status == TICKER_STATUS_BUSY) ||
702 		  ((ticker_status == TICKER_STATUS_FAILURE) &&
703 		   IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT)));
704 
705 	return;
706 
707 ull_scan_aux_rx_flush:
708 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
709 	if (sync && (scan->periodic.state != LL_SYNC_STATE_CREATED)) {
710 		scan->periodic.state = LL_SYNC_STATE_IDLE;
711 	}
712 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
713 
714 	if (aux) {
715 		/* Enqueue last rx in aux context if possible, otherwise send
716 		 * immediately since we are in sync context.
717 		 */
718 		if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || aux->rx_last) {
719 			/* If scan is being disabled, rx could already be
720 			 * enqueued before coming here to ull_scan_aux_rx_flush.
721 			 * Check if rx not the last in the list of received PDUs
722 			 * then add it, else do not add it, to avoid duplicate
723 			 * report generation, release and probable infinite loop
724 			 * processing of the list.
725 			 */
726 			if (unlikely(scan->is_stop)) {
727 				/* Add the node rx to aux context list of node
728 				 * rx if not already added when coming here to
729 				 * ull_scan_aux_rx_flush. This is handling a
730 				 * race condition where in the last PDU in
731 				 * chain is received and at the same time scan
732 				 * is being disabled.
733 				 */
734 				if (aux->rx_last != rx) {
735 					aux->rx_last->rx_ftr.extra = rx;
736 					aux->rx_last = rx;
737 				}
738 
739 				return;
740 			}
741 
742 			aux->rx_last->rx_ftr.extra = rx;
743 			aux->rx_last = rx;
744 		} else {
745 			const struct ll_sync_set *sync;
746 
747 			LL_ASSERT(sync_lll);
748 
749 			ll_rx_put_sched(link, rx);
750 
751 			sync = HDR_LLL2ULL(sync_lll);
752 			if (unlikely(sync->is_stop && sync_lll->lll_aux)) {
753 				return;
754 			}
755 		}
756 
757 		LL_ASSERT(aux->parent);
758 
759 		flush_safe(aux);
760 
761 		return;
762 	}
763 
764 	ll_rx_put(link, rx);
765 
766 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && rx_incomplete) {
767 		rx_release_put(rx_incomplete);
768 	}
769 
770 	ll_rx_sched();
771 }
772 
ull_scan_aux_done(struct node_rx_event_done * done)773 void ull_scan_aux_done(struct node_rx_event_done *done)
774 {
775 	struct ll_scan_aux_set *aux;
776 
777 	/* Get reference to ULL context */
778 	aux = CONTAINER_OF(done->param, struct ll_scan_aux_set, ull);
779 
780 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) &&
781 	    !ull_scan_aux_is_valid_get(aux)) {
782 		struct ll_sync_set *sync;
783 
784 		sync = CONTAINER_OF(done->param, struct ll_sync_set, ull);
785 		LL_ASSERT(ull_sync_is_valid_get(sync));
786 
787 		/* Auxiliary context will be flushed by ull_scan_aux_stop() */
788 		if (unlikely(sync->is_stop) || !sync->lll.lll_aux) {
789 			return;
790 		}
791 
792 		aux = HDR_LLL2ULL(sync->lll.lll_aux);
793 		LL_ASSERT(aux->parent);
794 	} else {
795 		struct ll_scan_set *scan;
796 		struct lll_scan *lll;
797 
798 		lll = aux->parent;
799 		LL_ASSERT(lll);
800 
801 		scan = HDR_LLL2ULL(lll);
802 		LL_ASSERT(ull_scan_is_valid_get(scan));
803 
804 		/* Auxiliary context will be flushed by ull_scan_aux_stop() */
805 		if (unlikely(scan->is_stop)) {
806 			return;
807 		}
808 	}
809 
810 	flush(aux);
811 }
812 
ull_scan_aux_set_get(uint8_t handle)813 struct ll_scan_aux_set *ull_scan_aux_set_get(uint8_t handle)
814 {
815 	if (handle >= CONFIG_BT_CTLR_SCAN_AUX_SET) {
816 		return NULL;
817 	}
818 
819 	return &ll_scan_aux_pool[handle];
820 }
821 
ull_scan_aux_lll_handle_get(struct lll_scan_aux * lll)822 uint8_t ull_scan_aux_lll_handle_get(struct lll_scan_aux *lll)
823 {
824 	struct ll_scan_aux_set *aux;
825 
826 	aux = HDR_LLL2ULL(lll);
827 
828 	return aux_handle_get(aux);
829 }
830 
ull_scan_aux_lll_parent_get(struct lll_scan_aux * lll,uint8_t * is_lll_scan)831 void *ull_scan_aux_lll_parent_get(struct lll_scan_aux *lll,
832 				  uint8_t *is_lll_scan)
833 {
834 	struct ll_scan_aux_set *aux;
835 
836 	aux = HDR_LLL2ULL(lll);
837 
838 	if (is_lll_scan) {
839 		struct ll_scan_set *scan;
840 		struct lll_scan *lll;
841 
842 		lll = aux->parent;
843 		LL_ASSERT(lll);
844 
845 		scan = HDR_LLL2ULL(lll);
846 		*is_lll_scan = !!ull_scan_is_valid_get(scan);
847 	}
848 
849 	return aux->parent;
850 }
851 
ull_scan_aux_is_valid_get(struct ll_scan_aux_set * aux)852 struct ll_scan_aux_set *ull_scan_aux_is_valid_get(struct ll_scan_aux_set *aux)
853 {
854 	if (((uint8_t *)aux < (uint8_t *)ll_scan_aux_pool) ||
855 	    ((uint8_t *)aux > ((uint8_t *)ll_scan_aux_pool +
856 			       (sizeof(struct ll_scan_aux_set) *
857 				(CONFIG_BT_CTLR_SCAN_AUX_SET - 1))))) {
858 		return NULL;
859 	}
860 
861 	return aux;
862 }
863 
ull_scan_aux_lll_is_valid_get(struct lll_scan_aux * lll)864 struct lll_scan_aux *ull_scan_aux_lll_is_valid_get(struct lll_scan_aux *lll)
865 {
866 	struct ll_scan_aux_set *aux;
867 
868 	aux = HDR_LLL2ULL(lll);
869 	aux = ull_scan_aux_is_valid_get(aux);
870 	if (aux) {
871 		return &aux->lll;
872 	}
873 
874 	return NULL;
875 }
876 
ull_scan_aux_release(memq_link_t * link,struct node_rx_hdr * rx)877 void ull_scan_aux_release(memq_link_t *link, struct node_rx_hdr *rx)
878 {
879 	struct lll_scan_aux *lll_aux;
880 	void *param_ull;
881 
882 	param_ull = HDR_LLL2ULL(rx->rx_ftr.param);
883 
884 	if (ull_scan_is_valid_get(param_ull)) {
885 		struct lll_scan *lll;
886 
887 		/* Mark for buffer for release */
888 		rx->type = NODE_RX_TYPE_RELEASE;
889 
890 		lll = rx->rx_ftr.param;
891 		lll_aux = lll->lll_aux;
892 
893 	} else if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) ||
894 		   ull_scan_aux_is_valid_get(param_ull)) {
895 		/* Mark for buffer for release */
896 		rx->type = NODE_RX_TYPE_RELEASE;
897 
898 		lll_aux = rx->rx_ftr.param;
899 
900 	} else if (ull_sync_is_valid_get(param_ull)) {
901 		struct ll_sync_set *sync;
902 		struct lll_sync *lll;
903 
904 		sync = param_ull;
905 
906 		/* reset data len total */
907 		sync->data_len = 0U;
908 
909 		lll = rx->rx_ftr.param;
910 		lll_aux = lll->lll_aux;
911 
912 		/* Change node type so HCI can dispatch report for truncated
913 		 * data properly.
914 		 */
915 		rx->type = NODE_RX_TYPE_SYNC_REPORT;
916 		rx->handle = ull_sync_handle_get(sync);
917 
918 		/* Dequeue will try releasing list of node rx, set the extra
919 		 * pointer to NULL.
920 		 */
921 		rx->rx_ftr.extra = NULL;
922 
923 	} else {
924 		LL_ASSERT(0);
925 		lll_aux = NULL;
926 	}
927 
928 	if (lll_aux) {
929 		struct ll_scan_aux_set *aux;
930 		struct ll_scan_set *scan;
931 		struct lll_scan *lll;
932 		uint8_t is_stop;
933 
934 		aux = HDR_LLL2ULL(lll_aux);
935 		lll = aux->parent;
936 		LL_ASSERT(lll);
937 
938 		scan = HDR_LLL2ULL(lll);
939 		scan = ull_scan_is_valid_get(scan);
940 		if (scan) {
941 			is_stop = scan->is_stop;
942 		} else {
943 			struct lll_sync *sync_lll;
944 			struct ll_sync_set *sync;
945 
946 			sync_lll = (void *)lll;
947 			sync = HDR_LLL2ULL(sync_lll);
948 			is_stop = sync->is_stop;
949 		}
950 
951 		if (!is_stop) {
952 			LL_ASSERT(aux->parent);
953 
954 			flush_safe(aux);
955 
956 		} else if (!scan) {
957 			/* Sync terminate requested, enqueue node rx so that it
958 			 * be flushed by ull_scan_aux_stop().
959 			 */
960 			rx->link = link;
961 			if (aux->rx_last) {
962 				aux->rx_last->rx_ftr.extra = rx;
963 			} else {
964 				aux->rx_head = rx;
965 			}
966 			aux->rx_last = rx;
967 
968 			return;
969 		}
970 	}
971 
972 	ll_rx_put(link, rx);
973 	ll_rx_sched();
974 }
975 
ull_scan_aux_stop(struct ll_scan_aux_set * aux)976 int ull_scan_aux_stop(struct ll_scan_aux_set *aux)
977 {
978 	static memq_link_t link;
979 	static struct mayfly mfy = {0, 0, &link, NULL, NULL};
980 	uint8_t aux_handle;
981 	uint32_t ret;
982 	int err;
983 
984 	/* Stop any ULL scheduling of auxiliary PDU scan */
985 	aux_handle = aux_handle_get(aux);
986 	err = ull_ticker_stop_with_mark(TICKER_ID_SCAN_AUX_BASE + aux_handle,
987 					aux, &aux->lll);
988 	if (err && (err != -EALREADY)) {
989 		return err;
990 	}
991 
992 	/* Abort LLL event if ULL scheduling not used or already in prepare */
993 	if (err == -EALREADY) {
994 		err = ull_disable(&aux->lll);
995 		if (err && (err != -EALREADY)) {
996 			return err;
997 		}
998 
999 		mfy.fp = flush;
1000 
1001 	} else if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC)) {
1002 		/* ULL scan auxiliary PDU reception scheduling stopped
1003 		 * before prepare.
1004 		 */
1005 		mfy.fp = flush;
1006 
1007 	} else {
1008 		struct ll_scan_set *scan;
1009 		struct lll_scan *lll;
1010 
1011 		lll = aux->parent;
1012 		LL_ASSERT(lll);
1013 
1014 		scan = HDR_LLL2ULL(lll);
1015 		scan = ull_scan_is_valid_get(scan);
1016 		if (scan) {
1017 			/* ULL scan auxiliary PDU reception scheduling stopped
1018 			 * before prepare.
1019 			 */
1020 			mfy.fp = flush;
1021 		} else {
1022 			/* ULL sync chain reception scheduling stopped before
1023 			 * prepare.
1024 			 */
1025 			mfy.fp = aux_sync_incomplete;
1026 		}
1027 	}
1028 
1029 	/* Release auxiliary context in ULL execution context */
1030 	mfy.param = aux;
1031 	ret = mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_ULL_HIGH,
1032 			     0, &mfy);
1033 	LL_ASSERT(!ret);
1034 
1035 	return 0;
1036 }
1037 
init_reset(void)1038 static int init_reset(void)
1039 {
1040 	/* Initialize adv aux pool. */
1041 	mem_init(ll_scan_aux_pool, sizeof(struct ll_scan_aux_set),
1042 		 sizeof(ll_scan_aux_pool) / sizeof(struct ll_scan_aux_set),
1043 		 &scan_aux_free);
1044 
1045 	return 0;
1046 }
1047 
aux_acquire(void)1048 static inline struct ll_scan_aux_set *aux_acquire(void)
1049 {
1050 	return mem_acquire(&scan_aux_free);
1051 }
1052 
aux_release(struct ll_scan_aux_set * aux)1053 static inline void aux_release(struct ll_scan_aux_set *aux)
1054 {
1055 	/* Clear the parent so that when scan is being disabled then this
1056 	 * auxiliary context shall not associate itself from being disable.
1057 	 */
1058 	LL_ASSERT(aux->parent);
1059 	aux->parent = NULL;
1060 
1061 	mem_release(aux, &scan_aux_free);
1062 }
1063 
aux_handle_get(struct ll_scan_aux_set * aux)1064 static inline uint8_t aux_handle_get(struct ll_scan_aux_set *aux)
1065 {
1066 	return mem_index_get(aux, ll_scan_aux_pool,
1067 			     sizeof(struct ll_scan_aux_set));
1068 }
1069 
sync_create_get(struct ll_scan_set * scan)1070 static inline struct ll_sync_set *sync_create_get(struct ll_scan_set *scan)
1071 {
1072 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1073 	return (!scan->periodic.cancelled) ? scan->periodic.sync : NULL;
1074 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
1075 	return NULL;
1076 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
1077 }
1078 
1079 static inline struct ll_sync_iso_set *
sync_iso_create_get(struct ll_sync_set * sync)1080 	sync_iso_create_get(struct ll_sync_set *sync)
1081 {
1082 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1083 	return sync->iso.sync_iso;
1084 #else /* !CONFIG_BT_CTLR_SYNC_ISO */
1085 	return NULL;
1086 #endif /* !CONFIG_BT_CTLR_SYNC_ISO */
1087 }
1088 
done_disabled_cb(void * param)1089 static void done_disabled_cb(void *param)
1090 {
1091 	struct ll_scan_aux_set *aux;
1092 
1093 	aux = param;
1094 	LL_ASSERT(aux->parent);
1095 
1096 	flush(aux);
1097 }
1098 
flush_safe(void * param)1099 static void flush_safe(void *param)
1100 {
1101 	struct ll_scan_aux_set *aux;
1102 	struct ull_hdr *hdr;
1103 	uint8_t ref;
1104 
1105 	aux = param;
1106 	LL_ASSERT(aux->parent);
1107 
1108 	/* ref == 0
1109 	 * All PDUs were scheduled from LLL and there is no pending done
1110 	 * event, we can flush here.
1111 	 *
1112 	 * ref == 1
1113 	 * There is pending done event so we need to flush from disabled
1114 	 * callback. Flushing here would release aux context and thus
1115 	 * ull_hdr before done event was processed.
1116 	 */
1117 	hdr = &aux->ull;
1118 	ref = ull_ref_get(hdr);
1119 	if (ref == 0U) {
1120 		flush(aux);
1121 	} else {
1122 		/* A specific single shot scheduled aux context
1123 		 * cannot overlap, i.e. ULL reference count
1124 		 * shall be less than 2.
1125 		 */
1126 		LL_ASSERT(ref < 2U);
1127 
1128 		LL_ASSERT(!hdr->disabled_cb);
1129 		hdr->disabled_param = aux;
1130 		hdr->disabled_cb = done_disabled_cb;
1131 	}
1132 }
1133 
flush(void * param)1134 static void flush(void *param)
1135 {
1136 	struct ll_scan_aux_set *aux;
1137 	struct ll_scan_set *scan;
1138 	struct node_rx_hdr *rx;
1139 	struct lll_scan *lll;
1140 	bool sched = false;
1141 
1142 	/* Debug check that parent was assigned when allocated for reception of
1143 	 * auxiliary channel PDUs.
1144 	 */
1145 	aux = param;
1146 	LL_ASSERT(aux->parent);
1147 
1148 	rx = aux->rx_head;
1149 	if (rx) {
1150 		aux->rx_head = NULL;
1151 
1152 		ll_rx_put(rx->link, rx);
1153 		sched = true;
1154 	}
1155 
1156 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1157 	rx = aux->rx_incomplete;
1158 	if (rx) {
1159 		aux->rx_incomplete = NULL;
1160 
1161 		rx_release_put(rx);
1162 		sched = true;
1163 	}
1164 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1165 
1166 	if (sched) {
1167 		ll_rx_sched();
1168 	}
1169 
1170 	lll = aux->parent;
1171 	scan = HDR_LLL2ULL(lll);
1172 	scan = ull_scan_is_valid_get(scan);
1173 	if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || scan) {
1174 		lll->lll_aux = NULL;
1175 	} else {
1176 		struct lll_sync *sync_lll;
1177 		struct ll_sync_set *sync;
1178 
1179 		sync_lll = aux->parent;
1180 		sync = HDR_LLL2ULL(sync_lll);
1181 
1182 		LL_ASSERT(sync->is_stop || sync_lll->lll_aux);
1183 		sync_lll->lll_aux = NULL;
1184 	}
1185 
1186 	aux_release(aux);
1187 }
1188 
rx_release_put(struct node_rx_hdr * rx)1189 static void rx_release_put(struct node_rx_hdr *rx)
1190 {
1191 	rx->type = NODE_RX_TYPE_RELEASE;
1192 
1193 	ll_rx_put(rx->link, rx);
1194 }
1195 
aux_sync_partial(void * param)1196 static void aux_sync_partial(void *param)
1197 {
1198 	struct ll_scan_aux_set *aux;
1199 	struct node_rx_hdr *rx;
1200 
1201 	aux = param;
1202 	rx = aux->rx_head;
1203 	aux->rx_head = NULL;
1204 
1205 	LL_ASSERT(rx);
1206 	rx->rx_ftr.aux_sched = 1U;
1207 
1208 	ll_rx_put_sched(rx->link, rx);
1209 }
1210 
aux_sync_incomplete(void * param)1211 static void aux_sync_incomplete(void *param)
1212 {
1213 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1214 	struct ll_scan_aux_set *aux;
1215 
1216 	aux = param;
1217 	LL_ASSERT(aux->parent);
1218 
1219 	/* ULL scheduling succeeded hence no backup node rx present, use the
1220 	 * extra node rx reserved for incomplete data status generation.
1221 	 */
1222 	if (!aux->rx_head) {
1223 		struct ll_sync_set *sync;
1224 		struct node_rx_hdr *rx;
1225 		struct lll_sync *lll;
1226 
1227 		/* get reference to sync context */
1228 		lll = aux->parent;
1229 		LL_ASSERT(lll);
1230 		sync = HDR_LLL2ULL(lll);
1231 
1232 		/* reset data len total */
1233 		sync->data_len = 0U;
1234 
1235 		/* pick extra node rx stored in aux context */
1236 		rx = aux->rx_incomplete;
1237 		LL_ASSERT(rx);
1238 		aux->rx_incomplete = NULL;
1239 
1240 		/* prepare sync report with failure */
1241 		rx->type = NODE_RX_TYPE_SYNC_REPORT;
1242 		rx->handle = ull_sync_handle_get(sync);
1243 		rx->rx_ftr.param = lll;
1244 
1245 		/* flag chain reception failure */
1246 		rx->rx_ftr.aux_failed = 1U;
1247 
1248 		/* Dequeue will try releasing list of node rx,
1249 		 * set the extra pointer to NULL.
1250 		 */
1251 		rx->rx_ftr.extra = NULL;
1252 
1253 		/* add to rx list, will be flushed */
1254 		aux->rx_head = rx;
1255 	}
1256 
1257 	LL_ASSERT(!ull_ref_get(&aux->ull));
1258 
1259 	flush(aux);
1260 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1261 }
1262 
ticker_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)1263 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
1264 		      uint32_t remainder, uint16_t lazy, uint8_t force,
1265 		      void *param)
1266 {
1267 	static memq_link_t link;
1268 	static struct mayfly mfy = {0, 0, &link, NULL, lll_scan_aux_prepare};
1269 	struct ll_scan_aux_set *aux = param;
1270 	static struct lll_prepare_param p;
1271 	uint32_t ret;
1272 	uint8_t ref;
1273 
1274 	DEBUG_RADIO_PREPARE_O(1);
1275 
1276 	/* Increment prepare reference count */
1277 	ref = ull_ref_inc(&aux->ull);
1278 	LL_ASSERT(ref);
1279 
1280 	/* Append timing parameters */
1281 	p.ticks_at_expire = ticks_at_expire;
1282 	p.remainder = 0; /* FIXME: remainder; */
1283 	p.lazy = lazy;
1284 	p.force = force;
1285 	p.param = &aux->lll;
1286 	mfy.param = &p;
1287 
1288 	/* Kick LLL prepare */
1289 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
1290 			     0, &mfy);
1291 	LL_ASSERT(!ret);
1292 
1293 	DEBUG_RADIO_PREPARE_O(1);
1294 }
1295 
ticker_op_cb(uint32_t status,void * param)1296 static void ticker_op_cb(uint32_t status, void *param)
1297 {
1298 	static memq_link_t link;
1299 	static struct mayfly mfy = {0, 0, &link, NULL, NULL};
1300 	struct ll_sync_set *sync;
1301 	uint32_t ret;
1302 
1303 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC)) {
1304 		struct ll_scan_aux_set *aux;
1305 		struct lll_sync *sync_lll;
1306 
1307 		aux = param;
1308 		sync_lll = aux->parent;
1309 		LL_ASSERT(sync_lll);
1310 
1311 		sync = HDR_LLL2ULL(sync_lll);
1312 		sync = ull_sync_is_valid_get(sync);
1313 	} else {
1314 		sync = NULL;
1315 	}
1316 
1317 	if (status == TICKER_STATUS_SUCCESS) {
1318 		if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync) {
1319 			mfy.fp = aux_sync_partial;
1320 		} else {
1321 			return;
1322 		}
1323 	} else {
1324 		if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync) {
1325 			mfy.fp = aux_sync_incomplete;
1326 		} else {
1327 			struct ll_scan_aux_set *aux;
1328 
1329 			aux = param;
1330 			LL_ASSERT(aux->parent);
1331 
1332 			mfy.fp = flush_safe;
1333 		}
1334 	}
1335 
1336 	mfy.param = param;
1337 
1338 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW, TICKER_USER_ID_ULL_HIGH,
1339 			     0, &mfy);
1340 	LL_ASSERT(!ret);
1341 }
1342