1 /*
2 * Copyright (c) 2020 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <sys/byteorder.h>
8 #include <sys/util.h>
9
10 #include "util/mem.h"
11 #include "util/memq.h"
12 #include "util/mayfly.h"
13 #include "util/util.h"
14
15 #include "hal/ticker.h"
16
17 #include "ticker/ticker.h"
18
19 #include "pdu.h"
20
21 #include "lll.h"
22 #include "lll/lll_vendor.h"
23 #include "lll_scan.h"
24 #include "lll_scan_aux.h"
25 #include "lll/lll_df_types.h"
26 #include "lll_sync.h"
27 #include "lll_sync_iso.h"
28
29 #include "ull_scan_types.h"
30 #include "ull_sync_types.h"
31
32 #include "ull_internal.h"
33 #include "ull_scan_internal.h"
34 #include "ull_sync_internal.h"
35
36 #define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_HCI_DRIVER)
37 #define LOG_MODULE_NAME bt_ctlr_ull_scan_aux
38 #include "common/log.h"
39 #include <soc.h>
40 #include "hal/debug.h"
41
42 static int init_reset(void);
43 static inline struct ll_scan_aux_set *aux_acquire(void);
44 static inline void aux_release(struct ll_scan_aux_set *aux);
45 static inline uint8_t aux_handle_get(struct ll_scan_aux_set *aux);
46 static inline struct ll_sync_set *sync_create_get(struct ll_scan_set *scan);
47 static void last_disabled_cb(void *param);
48 static void done_disabled_cb(void *param);
49 static void flush(struct ll_scan_aux_set *aux);
50 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
51 uint32_t remainder, uint16_t lazy, uint8_t force,
52 void *param);
53 static void ticker_op_cb(uint32_t status, void *param);
54 static void ticker_op_aux_failure(void *param);
55
56 static struct ll_scan_aux_set ll_scan_aux_pool[CONFIG_BT_CTLR_SCAN_AUX_SET];
57 static void *scan_aux_free;
58
ull_scan_aux_init(void)59 int ull_scan_aux_init(void)
60 {
61 int err;
62
63 err = init_reset();
64 if (err) {
65 return err;
66 }
67
68 return 0;
69 }
70
ull_scan_aux_reset(void)71 int ull_scan_aux_reset(void)
72 {
73 int err;
74
75 err = init_reset();
76 if (err) {
77 return err;
78 }
79
80 return 0;
81 }
82
ull_scan_aux_setup(memq_link_t * link,struct node_rx_hdr * rx)83 void ull_scan_aux_setup(memq_link_t *link, struct node_rx_hdr *rx)
84 {
85 struct pdu_adv_aux_ptr *aux_ptr;
86 struct pdu_adv_com_ext_adv *p;
87 uint32_t ticks_slot_overhead;
88 struct lll_scan_aux *lll_aux;
89 struct ll_scan_aux_set *aux;
90 uint32_t window_widening_us;
91 uint32_t ticks_slot_offset;
92 uint32_t ticks_aux_offset;
93 struct pdu_adv_ext_hdr *h;
94 struct lll_sync *sync_lll;
95 struct ll_scan_set *scan;
96 struct ll_sync_set *sync;
97 struct pdu_adv_adi *adi;
98 struct node_rx_ftr *ftr;
99 uint32_t ready_delay_us;
100 uint32_t aux_offset_us;
101 uint32_t ticker_status;
102 struct lll_scan *lll;
103 struct pdu_adv *pdu;
104 uint8_t aux_handle;
105 bool is_scan_req;
106 uint8_t acad_len;
107 uint8_t hdr_len;
108 uint8_t *ptr;
109 uint8_t phy;
110
111 is_scan_req = false;
112 ftr = &rx->rx_ftr;
113
114 sync_lll = NULL;
115
116 switch (rx->type) {
117 case NODE_RX_TYPE_EXT_1M_REPORT:
118 lll_aux = NULL;
119 aux = NULL;
120 lll = ftr->param;
121 scan = HDR_LLL2ULL(lll);
122 sync = sync_create_get(scan);
123 phy = BT_HCI_LE_EXT_SCAN_PHY_1M;
124 break;
125 case NODE_RX_TYPE_EXT_CODED_REPORT:
126 lll_aux = NULL;
127 aux = NULL;
128 lll = ftr->param;
129 scan = HDR_LLL2ULL(lll);
130 sync = sync_create_get(scan);
131 phy = BT_HCI_LE_EXT_SCAN_PHY_CODED;
132 break;
133 case NODE_RX_TYPE_EXT_AUX_REPORT:
134 if (ull_scan_aux_is_valid_get(HDR_LLL2ULL(ftr->param))) {
135 /* Node has valid aux context so its scan was scheduled
136 * from ULL.
137 */
138 lll_aux = ftr->param;
139 aux = HDR_LLL2ULL(lll_aux);
140
141 /* aux parent will be NULL for periodic sync */
142 lll = aux->parent;
143 } else if (ull_scan_is_valid_get(HDR_LLL2ULL(ftr->param))) {
144 /* Node that does not have valid aux context but has
145 * valid scan set was scheduled from LLL. We can
146 * retrieve aux context from lll_scan as it was stored
147 * there when superior PDU was handled.
148 */
149 lll = ftr->param;
150
151 lll_aux = lll->lll_aux;
152 LL_ASSERT(lll_aux);
153
154 aux = HDR_LLL2ULL(lll_aux);
155 LL_ASSERT(lll == aux->parent);
156 } else {
157 /* If none of the above, node is part of sync scanning
158 */
159 lll = NULL;
160 sync_lll = ftr->param;
161
162 lll_aux = sync_lll->lll_aux;
163 aux = HDR_LLL2ULL(lll_aux);
164 }
165
166 if (lll) {
167 scan = HDR_LLL2ULL(lll);
168 sync = (void *)scan;
169 scan = ull_scan_is_valid_get(scan);
170 if (scan) {
171 sync = NULL;
172 }
173 } else {
174 scan = NULL;
175 sync = HDR_LLL2ULL(sync_lll);
176 }
177
178 phy = lll_aux->phy;
179 if (scan) {
180 /* Here we are scanner context */
181 sync = sync_create_get(scan);
182
183 /* Generate report based on PHY scanned */
184 switch (phy) {
185 case PHY_1M:
186 rx->type = NODE_RX_TYPE_EXT_1M_REPORT;
187 break;
188 case PHY_2M:
189 rx->type = NODE_RX_TYPE_EXT_2M_REPORT;
190 break;
191 case PHY_CODED:
192 rx->type = NODE_RX_TYPE_EXT_CODED_REPORT;
193 break;
194 default:
195 LL_ASSERT(0);
196 return;
197 }
198
199 /* Backup scan requested flag as it is in union with
200 * `extra` struct member which will be set to NULL
201 * in subsequent code.
202 */
203 is_scan_req = !!ftr->scan_req;
204
205 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
206 } else {
207 /* Here we are periodic sync context */
208 rx->type = NODE_RX_TYPE_SYNC_REPORT;
209 rx->handle = ull_sync_handle_get(sync);
210
211 sync_lll = &sync->lll;
212
213 /* lll_aux and aux are auxiliary channel context,
214 * reuse the existing aux context to scan the chain.
215 * hence lll_aux and aux are not released or set to NULL.
216 */
217 sync = NULL;
218 }
219 break;
220
221 case NODE_RX_TYPE_SYNC_REPORT:
222 {
223 struct ll_sync_set *ull_sync;
224
225 /* set the sync handle corresponding to the LLL context
226 * passed in the node rx footer field.
227 */
228 sync_lll = ftr->param;
229 ull_sync = HDR_LLL2ULL(sync_lll);
230 rx->handle = ull_sync_handle_get(ull_sync);
231
232 /* FIXME: we will need lll_scan if chain was scheduled
233 * from LLL; should we store lll_scan_set in
234 * sync_lll instead?
235 */
236 lll = NULL;
237 lll_aux = NULL;
238 aux = NULL;
239 scan = NULL;
240 sync = NULL;
241 phy = sync_lll->phy;
242 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
243
244 }
245 break;
246 default:
247 LL_ASSERT(0);
248 return;
249 }
250
251 rx->link = link;
252 ftr->extra = NULL;
253
254 ftr->aux_w4next = 0;
255
256 pdu = (void *)((struct node_rx_pdu *)rx)->pdu;
257 p = (void *)&pdu->adv_ext_ind;
258 if (!p->ext_hdr_len) {
259 goto ull_scan_aux_rx_flush;
260 }
261
262 h = (void *)p->ext_hdr_adv_data;
263
264 /* Regard PDU as invalid if a RFU field is set, we do not know the
265 * size of this future field, hence will cause incorrect calculation of
266 * offset to ACAD field.
267 */
268 if (h->rfu) {
269 goto ull_scan_aux_rx_flush;
270 }
271
272 ptr = h->data;
273
274 if (h->adv_addr) {
275 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
276 if (sync && (pdu->tx_addr == scan->per_scan.adv_addr_type) &&
277 !memcmp(ptr, scan->per_scan.adv_addr, BDADDR_SIZE)) {
278 scan->per_scan.state = LL_SYNC_STATE_ADDR_MATCH;
279 }
280 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
281
282 ptr += BDADDR_SIZE;
283 }
284
285 if (h->tgt_addr) {
286 ptr += BDADDR_SIZE;
287 }
288
289 if (h->cte_info) {
290 ptr += sizeof(struct pdu_cte_info);
291 }
292
293 adi = NULL;
294 if (h->adi) {
295 adi = (void *)ptr;
296 ptr += sizeof(*adi);
297 }
298
299 aux_ptr = NULL;
300 if (h->aux_ptr) {
301 aux_ptr = (void *)ptr;
302 ptr += sizeof(*aux_ptr);
303 }
304
305 if (h->sync_info) {
306 struct pdu_adv_sync_info *si;
307
308 si = (void *)ptr;
309 ptr += sizeof(*si);
310
311 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
312 if (sync && adi && (adi->sid == scan->per_scan.sid) &&
313 (scan->per_scan.state == LL_SYNC_STATE_ADDR_MATCH)) {
314 ull_sync_setup(scan, aux, rx, si);
315 }
316 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
317 }
318
319 if (h->tx_pwr) {
320 ptr++;
321 }
322
323 /* Calculate ACAD Len */
324 hdr_len = ptr - (uint8_t *)p;
325 if (hdr_len <= (p->ext_hdr_len + offsetof(struct pdu_adv_com_ext_adv,
326 ext_hdr_adv_data))) {
327 acad_len = p->ext_hdr_len +
328 offsetof(struct pdu_adv_com_ext_adv,
329 ext_hdr_adv_data) -
330 hdr_len;
331 } else {
332 acad_len = 0U;
333 }
334
335 /* Periodic Advertising Channel Map Indication and/or Broadcast ISO
336 * synchronization
337 */
338 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) &&
339 (rx->type == NODE_RX_TYPE_SYNC_REPORT) &&
340 acad_len) {
341 /* Periodic Advertising Channel Map Indication */
342 ull_sync_chm_update(rx->handle, ptr, acad_len);
343 }
344
345 /* Do not ULL schedule auxiliary PDU reception if no aux pointer
346 * or aux pointer is zero or scannable advertising has erroneous aux
347 * pointer being present or PHY in the aux pointer is invalid.
348 */
349 if (!aux_ptr || !aux_ptr->offs || is_scan_req ||
350 (aux_ptr->phy > EXT_ADV_AUX_PHY_LE_CODED)) {
351 if (is_scan_req) {
352 LL_ASSERT(aux && aux->rx_last);
353
354 aux->rx_last->rx_ftr.extra = rx;
355 aux->rx_last = rx;
356
357 return;
358 }
359
360 goto ull_scan_aux_rx_flush;
361 }
362
363 if (!aux) {
364 aux = aux_acquire();
365 if (!aux) {
366 goto ull_scan_aux_rx_flush;
367 }
368
369 aux->rx_head = aux->rx_last = NULL;
370 lll_aux = &aux->lll;
371 lll_aux->is_chain_sched = 0U;
372
373 ull_hdr_init(&aux->ull);
374 lll_hdr_init(lll_aux, aux);
375
376 aux->parent = lll ? (void *)lll : (void *)sync_lll;
377 }
378
379 /* In sync context we can dispatch rx immediately, in scan context we
380 * enqueue rx in aux context and will flush them after scan is complete.
381 */
382 if (0) {
383 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
384 } else if (sync_lll) {
385 ll_rx_put(link, rx);
386 ll_rx_sched();
387 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
388 } else {
389 if (aux->rx_last) {
390 aux->rx_last->rx_ftr.extra = rx;
391 } else {
392 aux->rx_head = rx;
393 }
394 aux->rx_last = rx;
395 }
396
397 lll_aux->chan = aux_ptr->chan_idx;
398 lll_aux->phy = BIT(aux_ptr->phy);
399
400 ftr->aux_w4next = 1;
401
402 /* See if this was already scheduled from LLL. If so, store aux context
403 * in global scan struct so we can pick it when scanned node is received
404 * with a valid context.
405 */
406 if (ftr->aux_lll_sched) {
407 if (0) {
408 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
409 } else if (sync_lll) {
410 sync_lll->lll_aux = lll_aux;
411 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
412 } else {
413 lll->lll_aux = lll_aux;
414 }
415
416 /* Reset auxiliary channel PDU scan state which otherwise is
417 * done in the prepare_cb when ULL scheduling is used.
418 */
419 lll_aux->state = 0U;
420
421 return;
422 }
423
424 /* Switching to ULL scheduling to receive auxiliary PDUs */
425 if (lll) {
426 lll->lll_aux = NULL;
427 } else {
428 LL_ASSERT(sync_lll);
429 /* XXX: keep lll_aux for now since node scheduled from ULL has
430 * sync_lll as ftr->param and we still need to restore
431 * lll_aux somehow.
432 */
433 /* sync_lll->lll_aux = NULL; */
434 }
435
436 /* Determine the window size */
437 if (aux_ptr->offs_units) {
438 lll_aux->window_size_us = OFFS_UNIT_300_US;
439 } else {
440 lll_aux->window_size_us = OFFS_UNIT_30_US;
441 }
442
443 aux_offset_us = (uint32_t)aux_ptr->offs * lll_aux->window_size_us;
444
445 /* CA field contains the clock accuracy of the advertiser;
446 * 0 - 51 ppm to 500 ppm
447 * 1 - 0 ppm to 50 ppm
448 */
449 if (aux_ptr->ca) {
450 window_widening_us = SCA_DRIFT_50_PPM_US(aux_offset_us);
451 } else {
452 window_widening_us = SCA_DRIFT_500_PPM_US(aux_offset_us);
453 }
454
455 lll_aux->window_size_us += (EVENT_TICKER_RES_MARGIN_US +
456 ((EVENT_JITTER_US + window_widening_us) << 1));
457
458 ready_delay_us = lll_radio_rx_ready_delay_get(lll_aux->phy, 1);
459
460 /* Calculate the aux offset from start of the scan window */
461 aux_offset_us += ftr->radio_end_us;
462 aux_offset_us -= PDU_AC_US(pdu->len, phy, ftr->phy_flags);
463 aux_offset_us -= EVENT_JITTER_US;
464 aux_offset_us -= ready_delay_us;
465 aux_offset_us -= window_widening_us;
466
467 /* TODO: active_to_start feature port */
468 aux->ull.ticks_active_to_start = 0;
469 aux->ull.ticks_prepare_to_start =
470 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
471 aux->ull.ticks_preempt_to_start =
472 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
473 aux->ull.ticks_slot =
474 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US +
475 ready_delay_us +
476 PDU_AC_MAX_US(PDU_AC_EXT_PAYLOAD_SIZE_MAX,
477 lll_aux->phy) +
478 EVENT_OVERHEAD_END_US);
479
480 ticks_slot_offset = MAX(aux->ull.ticks_active_to_start,
481 aux->ull.ticks_prepare_to_start);
482 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
483 ticks_slot_overhead = ticks_slot_offset;
484 } else {
485 ticks_slot_overhead = 0U;
486 }
487 ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
488
489 ticks_aux_offset = HAL_TICKER_US_TO_TICKS(aux_offset_us);
490
491 /* Yield the primary scan window ticks in ticker */
492 if (scan) {
493 uint8_t handle;
494
495 handle = ull_scan_handle_get(scan);
496
497 ticker_status = ticker_yield_abs(TICKER_INSTANCE_ID_CTLR,
498 TICKER_USER_ID_ULL_HIGH,
499 (TICKER_ID_SCAN_BASE + handle),
500 (ftr->ticks_anchor +
501 ticks_aux_offset -
502 ticks_slot_offset),
503 NULL, NULL);
504 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
505 (ticker_status == TICKER_STATUS_BUSY));
506 }
507
508 aux_handle = aux_handle_get(aux);
509
510 ticker_status = ticker_start(TICKER_INSTANCE_ID_CTLR,
511 TICKER_USER_ID_ULL_HIGH,
512 TICKER_ID_SCAN_AUX_BASE + aux_handle,
513 ftr->ticks_anchor - ticks_slot_offset,
514 ticks_aux_offset,
515 TICKER_NULL_PERIOD,
516 TICKER_NULL_REMAINDER,
517 TICKER_NULL_LAZY,
518 (aux->ull.ticks_slot +
519 ticks_slot_overhead),
520 ticker_cb, aux, ticker_op_cb, aux);
521 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
522 (ticker_status == TICKER_STATUS_BUSY));
523
524 return;
525
526 ull_scan_aux_rx_flush:
527 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
528 if (sync) {
529 scan->per_scan.state = LL_SYNC_STATE_IDLE;
530 }
531 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
532
533 if (aux) {
534 struct ull_hdr *hdr;
535
536 /* Enqueue last rx in aux context if possible, otherwise send
537 * immediately since we are in sync context.
538 */
539 if (aux->rx_last) {
540 aux->rx_last->rx_ftr.extra = rx;
541 } else {
542 LL_ASSERT(sync_lll);
543 ll_rx_put(link, rx);
544 ll_rx_sched();
545 }
546
547 /* ref == 0
548 * All PDUs were scheduled from LLL and there is no pending done
549 * event, we can flush here.
550 *
551 * ref == 1
552 * There is pending done event so we need to flush from disabled
553 * callback. Flushing here would release aux context and thus
554 * ull_hdr before done event was processed.
555 */
556 hdr = &aux->ull;
557 LL_ASSERT(ull_ref_get(hdr) < 2);
558 if (ull_ref_get(hdr) == 0) {
559 flush(aux);
560 } else {
561 LL_ASSERT(!hdr->disabled_cb);
562
563 hdr->disabled_param = aux;
564 hdr->disabled_cb = last_disabled_cb;
565 }
566
567 return;
568 }
569
570 ll_rx_put(link, rx);
571 ll_rx_sched();
572 }
573
ull_scan_aux_done(struct node_rx_event_done * done)574 void ull_scan_aux_done(struct node_rx_event_done *done)
575 {
576 struct ll_scan_aux_set *aux;
577 struct ull_hdr *hdr;
578
579 /* Get reference to ULL context */
580 aux = CONTAINER_OF(done->param, struct ll_scan_aux_set, ull);
581
582 if (0) {
583 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
584 } else if (!ull_scan_aux_is_valid_get(aux)) {
585 struct ll_sync_set *sync;
586
587 sync = CONTAINER_OF(done->param, struct ll_sync_set, ull);
588 LL_ASSERT(ull_sync_is_valid_get(sync));
589 hdr = &sync->ull;
590
591 if (!sync->lll.lll_aux) {
592 return;
593 }
594
595 aux = HDR_LLL2ULL(sync->lll.lll_aux);
596 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
597 } else {
598 /* Setup the disabled callback to flush the auxiliary PDUs */
599 hdr = &aux->ull;
600 }
601
602 LL_ASSERT(!hdr->disabled_cb);
603 hdr->disabled_param = aux;
604 hdr->disabled_cb = done_disabled_cb;
605 }
606
ull_scan_aux_lll_handle_get(struct lll_scan_aux * lll)607 uint8_t ull_scan_aux_lll_handle_get(struct lll_scan_aux *lll)
608 {
609 return aux_handle_get((void *)lll->hdr.parent);
610 }
611
ull_scan_aux_lll_parent_get(struct lll_scan_aux * lll,uint8_t * is_lll_scan)612 void *ull_scan_aux_lll_parent_get(struct lll_scan_aux *lll,
613 uint8_t *is_lll_scan)
614 {
615 struct ll_scan_aux_set *aux_set;
616 struct ll_scan_set *scan_set;
617
618 aux_set = HDR_LLL2ULL(lll);
619 scan_set = HDR_LLL2ULL(aux_set->parent);
620
621 if (is_lll_scan) {
622 *is_lll_scan = !!ull_scan_is_valid_get(scan_set);
623 }
624
625 return aux_set->parent;
626 }
627
ull_scan_aux_is_valid_get(struct ll_scan_aux_set * aux)628 struct ll_scan_aux_set *ull_scan_aux_is_valid_get(struct ll_scan_aux_set *aux)
629 {
630 if (((uint8_t *)aux < (uint8_t *)ll_scan_aux_pool) ||
631 ((uint8_t *)aux > ((uint8_t *)ll_scan_aux_pool +
632 (sizeof(struct ll_scan_aux_set) *
633 (CONFIG_BT_CTLR_SCAN_AUX_SET - 1))))) {
634 return NULL;
635 }
636
637 return aux;
638 }
639
ull_scan_aux_release(memq_link_t * link,struct node_rx_hdr * rx)640 void ull_scan_aux_release(memq_link_t *link, struct node_rx_hdr *rx)
641 {
642 struct lll_scan_aux *lll_aux;
643 void *param_ull;
644
645 param_ull = HDR_LLL2ULL(rx->rx_ftr.param);
646
647 if (ull_scan_is_valid_get(param_ull)) {
648 struct lll_scan *lll;
649
650 /* Mark for buffer for release */
651 rx->type = NODE_RX_TYPE_RELEASE;
652
653 lll = rx->rx_ftr.param;
654 lll_aux = lll->lll_aux;
655 } else if (ull_scan_aux_is_valid_get(param_ull)) {
656 /* Mark for buffer for release */
657 rx->type = NODE_RX_TYPE_RELEASE;
658
659 lll_aux = rx->rx_ftr.param;
660 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
661 } else if (ull_sync_is_valid_get(param_ull)) {
662 struct lll_sync *lll;
663
664 lll = rx->rx_ftr.param;
665 lll_aux = lll->lll_aux;
666
667 /* Change node type so HCI can dispatch report for truncated
668 * data properly.
669 */
670 rx->type = NODE_RX_TYPE_SYNC_REPORT;
671 rx->handle = ull_sync_handle_get(param_ull);
672
673 /* Dequeue will try releasing list of node rx, set the extra
674 * pointer to NULL.
675 */
676 rx->rx_ftr.extra = NULL;
677 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
678 } else {
679 LL_ASSERT(0);
680 lll_aux = NULL;
681 }
682
683 if (lll_aux) {
684 struct ll_scan_aux_set *aux;
685 struct ull_hdr *hdr;
686
687 aux = HDR_LLL2ULL(lll_aux);
688 hdr = &aux->ull;
689
690 LL_ASSERT(ull_ref_get(hdr) < 2);
691
692 /* Flush from here of from done event, if one is pending */
693 if (ull_ref_get(hdr) == 0) {
694 flush(aux);
695 } else {
696 LL_ASSERT(!hdr->disabled_cb);
697
698 hdr->disabled_param = aux;
699 hdr->disabled_cb = last_disabled_cb;
700 }
701 }
702
703 ll_rx_put(link, rx);
704 ll_rx_sched();
705 }
706
init_reset(void)707 static int init_reset(void)
708 {
709 /* Initialize adv aux pool. */
710 mem_init(ll_scan_aux_pool, sizeof(struct ll_scan_aux_set),
711 sizeof(ll_scan_aux_pool) / sizeof(struct ll_scan_aux_set),
712 &scan_aux_free);
713
714 return 0;
715 }
716
aux_acquire(void)717 static inline struct ll_scan_aux_set *aux_acquire(void)
718 {
719 return mem_acquire(&scan_aux_free);
720 }
721
aux_release(struct ll_scan_aux_set * aux)722 static inline void aux_release(struct ll_scan_aux_set *aux)
723 {
724 mem_release(aux, &scan_aux_free);
725 }
726
aux_handle_get(struct ll_scan_aux_set * aux)727 static inline uint8_t aux_handle_get(struct ll_scan_aux_set *aux)
728 {
729 return mem_index_get(aux, ll_scan_aux_pool,
730 sizeof(struct ll_scan_aux_set));
731 }
732
sync_create_get(struct ll_scan_set * scan)733 static inline struct ll_sync_set *sync_create_get(struct ll_scan_set *scan)
734 {
735 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
736 return scan->per_scan.sync;
737 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
738 return NULL;
739 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
740 }
741
last_disabled_cb(void * param)742 static void last_disabled_cb(void *param)
743 {
744 flush(param);
745 }
746
done_disabled_cb(void * param)747 static void done_disabled_cb(void *param)
748 {
749 struct ll_scan_aux_set *aux;
750
751 aux = param;
752 LL_ASSERT(ull_scan_aux_is_valid_get(aux));
753
754 aux = ull_scan_aux_is_valid_get(aux);
755 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
756 if (!aux) {
757 struct lll_sync *sync_lll;
758
759 sync_lll = param;
760 LL_ASSERT(sync_lll->lll_aux);
761 aux = HDR_LLL2ULL(sync_lll->lll_aux);
762 }
763 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
764
765 flush(aux);
766 }
767
flush(struct ll_scan_aux_set * aux)768 static void flush(struct ll_scan_aux_set *aux)
769 {
770 struct node_rx_hdr *rx;
771
772 /* Nodes are enqueued only in scan context so need to send them now */
773 rx = aux->rx_head;
774 if (rx) {
775 struct lll_scan *lll;
776
777 lll = aux->parent;
778 lll->lll_aux = NULL;
779
780 ll_rx_put(rx->link, rx);
781 ll_rx_sched();
782 } else {
783 struct lll_sync *lll;
784
785 lll = aux->parent;
786 lll->lll_aux = NULL;
787 }
788
789 aux_release(aux);
790 }
791
ticker_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)792 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
793 uint32_t remainder, uint16_t lazy, uint8_t force,
794 void *param)
795 {
796 static memq_link_t link;
797 static struct mayfly mfy = {0, 0, &link, NULL, lll_scan_aux_prepare};
798 struct ll_scan_aux_set *aux = param;
799 static struct lll_prepare_param p;
800 uint32_t ret;
801 uint8_t ref;
802
803 DEBUG_RADIO_PREPARE_O(1);
804
805 /* Increment prepare reference count */
806 ref = ull_ref_inc(&aux->ull);
807 LL_ASSERT(ref);
808
809 /* Append timing parameters */
810 p.ticks_at_expire = ticks_at_expire;
811 p.remainder = 0; /* FIXME: remainder; */
812 p.lazy = lazy;
813 p.force = force;
814 p.param = &aux->lll;
815 mfy.param = &p;
816
817 /* Kick LLL prepare */
818 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
819 0, &mfy);
820 LL_ASSERT(!ret);
821
822 DEBUG_RADIO_PREPARE_O(1);
823 }
824
ticker_op_cb(uint32_t status,void * param)825 static void ticker_op_cb(uint32_t status, void *param)
826 {
827 static memq_link_t link;
828 static struct mayfly mfy = {0, 0, &link, NULL, ticker_op_aux_failure};
829 uint32_t ret;
830
831 if (status == TICKER_STATUS_SUCCESS) {
832 return;
833 }
834
835 mfy.param = param;
836
837 ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW, TICKER_USER_ID_ULL_HIGH,
838 0, &mfy);
839 LL_ASSERT(!ret);
840 }
841
ticker_op_aux_failure(void * param)842 static void ticker_op_aux_failure(void *param)
843 {
844 flush(param);
845 }
846