1 /*
2 * Copyright (c) 2020 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/sys/byteorder.h>
8 #include <zephyr/sys/slist.h>
9 #include <zephyr/sys/util.h>
10
11 #include "util/mem.h"
12 #include "util/memq.h"
13 #include "util/mayfly.h"
14 #include "util/util.h"
15 #include "util/dbuf.h"
16
17 #include "hal/ticker.h"
18 #include "hal/ccm.h"
19
20 #include "ticker/ticker.h"
21
22 #include "pdu_df.h"
23 #include "lll/pdu_vendor.h"
24 #include "pdu.h"
25
26 #include "lll.h"
27 #include "lll/lll_vendor.h"
28 #include "lll_scan.h"
29 #include "lll_scan_aux.h"
30 #include "lll/lll_df_types.h"
31 #include "lll_conn.h"
32 #include "lll_conn_iso.h"
33 #include "lll_sync.h"
34 #include "lll_sync_iso.h"
35 #include "lll/lll_adv_types.h"
36 #include "lll_adv.h"
37 #include "lll/lll_adv_pdu.h"
38
39 #include "ll_sw/ull_tx_queue.h"
40
41 #include "isoal.h"
42 #include "ull_scan_types.h"
43 #include "ull_conn_types.h"
44 #include "ull_iso_types.h"
45 #include "ull_conn_iso_types.h"
46 #include "ull_sync_types.h"
47 #include "ull_adv_types.h"
48 #include "ull_adv_internal.h"
49
50 #include "ull_internal.h"
51 #include "ull_scan_internal.h"
52 #include "ull_conn_internal.h"
53 #include "ull_sync_internal.h"
54 #include "ull_sync_iso_internal.h"
55 #include "ull_df_internal.h"
56
57 #include <zephyr/bluetooth/hci_types.h>
58
59 #include <soc.h>
60 #include "hal/debug.h"
61
62 static int init_reset(void);
63 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
64 uint32_t remainder, uint16_t lazy, uint8_t force,
65 void *param);
66 static void ticker_op_cb(uint32_t status, void *param);
67 static void flush_safe(void *param);
68 static void done_disabled_cb(void *param);
69
70 #if !defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
71
72 static inline struct ll_scan_aux_set *aux_acquire(void);
73 static inline void aux_release(struct ll_scan_aux_set *aux);
74 static inline uint8_t aux_handle_get(struct ll_scan_aux_set *aux);
75 static void flush(void *param);
76 static void aux_sync_incomplete(void *param);
77
78 /* Auxiliary context pool used for reception of PDUs at aux offsets, common for
79 * both Extended Advertising and Periodic Advertising.
80 * Increasing the count allows simultaneous reception of interleaved chain PDUs
81 * from multiple advertisers.
82 */
83 static struct ll_scan_aux_set ll_scan_aux_pool[CONFIG_BT_CTLR_SCAN_AUX_SET];
84 static void *scan_aux_free;
85
86 #else /* CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
87
88 static inline struct ll_scan_aux_chain *aux_chain_acquire(void);
89 static inline void aux_chain_release(struct ll_scan_aux_chain *chain);
90 struct ll_scan_aux_chain *scan_aux_chain_is_valid_get(struct ll_scan_aux_chain *chain);
91 struct ll_scan_aux_chain *lll_scan_aux_chain_is_valid_get(struct lll_scan_aux *lll);
92 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
93 static void aux_sync_incomplete(struct ll_scan_aux_chain *chain);
94 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
95 static void flush(struct ll_scan_aux_chain *chain);
96 static void chain_start_ticker(struct ll_scan_aux_chain *chain, bool replace);
97 static bool chain_insert_in_sched_list(struct ll_scan_aux_chain *chain);
98 static void chain_remove_from_list(struct ll_scan_aux_chain **head,
99 struct ll_scan_aux_chain *chain);
100 static void chain_append_to_list(struct ll_scan_aux_chain **head, struct ll_scan_aux_chain *chain);
101 static bool chain_is_in_list(struct ll_scan_aux_chain *head, struct ll_scan_aux_chain *chain);
102
103 /* Auxiliary context pool used for reception of PDUs at aux offsets, common for
104 * both Extended Advertising and Periodic Advertising.
105 * Increasing the count allows simultaneous reception of interleaved chain PDUs
106 * from multiple advertisers.
107 */
108 static struct ll_scan_aux_chain ll_scan_aux_pool[CONFIG_BT_CTLR_SCAN_AUX_CHAIN_COUNT];
109 static struct ll_scan_aux_set scan_aux_set;
110 static void *scan_aux_free;
111
112 static K_SEM_DEFINE(sem_scan_aux_stop, 0, 1);
113
114 #endif /* CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
115
ull_scan_aux_init(void)116 int ull_scan_aux_init(void)
117 {
118 int err;
119
120 err = init_reset();
121 if (err) {
122 return err;
123 }
124
125 return 0;
126 }
127
ull_scan_aux_reset(void)128 int ull_scan_aux_reset(void)
129 {
130 int err;
131
132 err = init_reset();
133 if (err) {
134 return err;
135 }
136
137 return 0;
138 }
139
rx_release_put(struct node_rx_pdu * rx)140 static void rx_release_put(struct node_rx_pdu *rx)
141 {
142 rx->hdr.type = NODE_RX_TYPE_RELEASE;
143
144 ll_rx_put(rx->hdr.link, rx);
145 }
146
sync_create_get(struct ll_scan_set * scan)147 static inline struct ll_sync_set *sync_create_get(struct ll_scan_set *scan)
148 {
149 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
150 return (!scan->periodic.cancelled) ? scan->periodic.sync : NULL;
151 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
152 return NULL;
153 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC */
154 }
155
156 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
157 static inline struct ll_sync_iso_set *
sync_iso_create_get(struct ll_sync_set * sync)158 sync_iso_create_get(struct ll_sync_set *sync)
159 {
160 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
161 return sync->iso.sync_iso;
162 #else /* !CONFIG_BT_CTLR_SYNC_ISO */
163 return NULL;
164 #endif /* !CONFIG_BT_CTLR_SYNC_ISO */
165 }
166 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
167
168 #if !defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
ull_scan_aux_setup(memq_link_t * link,struct node_rx_pdu * rx)169 void ull_scan_aux_setup(memq_link_t *link, struct node_rx_pdu *rx)
170 {
171 struct node_rx_pdu *rx_incomplete;
172 struct ll_sync_iso_set *sync_iso;
173 struct pdu_adv_aux_ptr *aux_ptr;
174 struct pdu_adv_com_ext_adv *p;
175 uint32_t ticks_slot_overhead;
176 struct lll_scan_aux *lll_aux;
177 struct ll_scan_aux_set *aux;
178 uint8_t ticker_yield_handle;
179 uint32_t window_widening_us;
180 uint32_t ticks_slot_offset;
181 uint32_t ticks_aux_offset;
182 struct pdu_adv_ext_hdr *h;
183 struct lll_sync *sync_lll;
184 struct ll_scan_set *scan;
185 struct ll_sync_set *sync;
186 struct pdu_adv_adi *adi;
187 struct node_rx_ftr *ftr;
188 uint32_t ready_delay_us;
189 uint16_t window_size_us;
190 uint32_t aux_offset_us;
191 uint32_t ticker_status;
192 struct lll_scan *lll;
193 struct pdu_adv *pdu;
194 uint8_t hdr_buf_len;
195 uint8_t aux_handle;
196 bool is_scan_req;
197 uint8_t acad_len;
198 uint8_t data_len;
199 uint8_t hdr_len;
200 uint32_t pdu_us;
201 uint8_t phy_aux;
202 uint8_t *ptr;
203 uint8_t phy;
204
205 is_scan_req = false;
206 ftr = &rx->rx_ftr;
207
208 switch (rx->hdr.type) {
209 case NODE_RX_TYPE_EXT_1M_REPORT:
210 lll_aux = NULL;
211 aux = NULL;
212 sync_lll = NULL;
213 sync_iso = NULL;
214 rx_incomplete = NULL;
215
216 lll = ftr->param;
217 LL_ASSERT(!lll->lll_aux);
218
219 scan = HDR_LLL2ULL(lll);
220 sync = sync_create_get(scan);
221 phy = BT_HCI_LE_EXT_SCAN_PHY_1M;
222
223 ticker_yield_handle = TICKER_ID_SCAN_BASE +
224 ull_scan_handle_get(scan);
225 break;
226
227 #if defined(CONFIG_BT_CTLR_PHY_CODED)
228 case NODE_RX_TYPE_EXT_CODED_REPORT:
229 lll_aux = NULL;
230 aux = NULL;
231 sync_lll = NULL;
232 sync_iso = NULL;
233 rx_incomplete = NULL;
234
235 lll = ftr->param;
236 LL_ASSERT(!lll->lll_aux);
237
238 scan = HDR_LLL2ULL(lll);
239 sync = sync_create_get(scan);
240 phy = BT_HCI_LE_EXT_SCAN_PHY_CODED;
241
242 ticker_yield_handle = TICKER_ID_SCAN_BASE +
243 ull_scan_handle_get(scan);
244 break;
245 #endif /* CONFIG_BT_CTLR_PHY_CODED */
246
247 case NODE_RX_TYPE_EXT_AUX_REPORT:
248 sync_iso = NULL;
249 rx_incomplete = NULL;
250 if (ull_scan_aux_is_valid_get(HDR_LLL2ULL(ftr->param))) {
251 sync_lll = NULL;
252
253 /* Node has valid aux context so its scan was scheduled
254 * from ULL.
255 */
256 lll_aux = ftr->param;
257 aux = HDR_LLL2ULL(lll_aux);
258
259 /* aux parent will be NULL for periodic sync */
260 lll = aux->parent;
261 LL_ASSERT(lll);
262
263 ticker_yield_handle = TICKER_ID_SCAN_AUX_BASE +
264 aux_handle_get(aux);
265
266 } else if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) ||
267 ull_scan_is_valid_get(HDR_LLL2ULL(ftr->param))) {
268 sync_lll = NULL;
269
270 /* Node that does not have valid aux context but has
271 * valid scan set was scheduled from LLL.
272 */
273 lll = ftr->param;
274
275 /* We can not retrieve aux context that was stored in
276 * lll_scan when superior PDU was handled, as it may be
277 * reset to NULL before this node rx is processed here.
278 * The reset happens when new extended advertising chain
279 * is being received before we process the node here.
280 */
281 lll_aux = ftr->lll_aux;
282 LL_ASSERT(lll_aux);
283
284 aux = HDR_LLL2ULL(lll_aux);
285 LL_ASSERT(lll == aux->parent);
286
287 ticker_yield_handle = TICKER_NULL;
288
289 } else {
290 lll = NULL;
291
292 /* If none of the above, node is part of sync scanning
293 */
294 sync_lll = ftr->param;
295
296 /* We can not retrieve aux context that was stored in
297 * lll_sync when superior PDU was handled, as it may be
298 * reset to NULL before this node rx is processed here.
299 * The reset happens when new Periodic Advertising chain
300 * is being received before we process the node here.
301 */
302 lll_aux = ftr->lll_aux;
303 LL_ASSERT(lll_aux);
304
305 aux = HDR_LLL2ULL(lll_aux);
306 LL_ASSERT(sync_lll == aux->parent);
307
308 ticker_yield_handle = TICKER_NULL;
309 }
310
311 if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || lll) {
312 scan = HDR_LLL2ULL(lll);
313 sync = (void *)scan;
314 scan = ull_scan_is_valid_get(scan);
315 if (scan) {
316 sync = NULL;
317 }
318 } else {
319 scan = NULL;
320 sync = HDR_LLL2ULL(sync_lll);
321 }
322
323 phy = lll_aux->phy;
324 if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || scan) {
325 /* Here we are scanner context */
326 sync = sync_create_get(scan);
327
328 /* Generate report based on PHY scanned */
329 switch (phy) {
330 case PHY_1M:
331 rx->hdr.type = NODE_RX_TYPE_EXT_1M_REPORT;
332 break;
333 case PHY_2M:
334 rx->hdr.type = NODE_RX_TYPE_EXT_2M_REPORT;
335 break;
336 #if defined(CONFIG_BT_CTLR_PHY_CODED)
337 case PHY_CODED:
338 rx->hdr.type = NODE_RX_TYPE_EXT_CODED_REPORT;
339 break;
340 #endif /* CONFIG_BT_CTLR_PHY_CODED */
341 default:
342 LL_ASSERT(0);
343 return;
344 }
345
346 /* Backup scan requested flag as it is in union with
347 * `extra` struct member which will be set to NULL
348 * in subsequent code.
349 */
350 is_scan_req = !!ftr->scan_req;
351
352 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
353 } else {
354 /* Here we are periodic sync context */
355 rx->hdr.type = NODE_RX_TYPE_SYNC_REPORT;
356 rx->hdr.handle = ull_sync_handle_get(sync);
357
358 /* Check if we need to create BIG sync */
359 sync_iso = sync_iso_create_get(sync);
360
361 /* lll_aux and aux are auxiliary channel context,
362 * reuse the existing aux context to scan the chain.
363 * hence lll_aux and aux are not released or set to NULL.
364 */
365 sync = NULL;
366 }
367 break;
368
369 case NODE_RX_TYPE_SYNC_REPORT:
370 {
371 struct ll_sync_set *ull_sync;
372
373 /* set the sync handle corresponding to the LLL context
374 * passed in the node rx footer field.
375 */
376 sync_lll = ftr->param;
377 LL_ASSERT(!sync_lll->lll_aux);
378
379 ull_sync = HDR_LLL2ULL(sync_lll);
380 rx->hdr.handle = ull_sync_handle_get(ull_sync);
381
382 /* Check if we need to create BIG sync */
383 sync_iso = sync_iso_create_get(ull_sync);
384
385 /* FIXME: we will need lll_scan if chain was scheduled
386 * from LLL; should we store lll_scan_set in
387 * sync_lll instead?
388 */
389 lll = NULL;
390 lll_aux = NULL;
391 aux = NULL;
392 scan = NULL;
393 sync = NULL;
394 phy = sync_lll->phy;
395
396 /* backup extra node_rx supplied for generating
397 * incomplete report
398 */
399 rx_incomplete = ftr->extra;
400
401 ticker_yield_handle = TICKER_ID_SCAN_SYNC_BASE +
402 ull_sync_handle_get(ull_sync);
403 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
404
405 }
406 break;
407 default:
408 LL_ASSERT(0);
409 return;
410 }
411
412 rx->hdr.link = link;
413 ftr->extra = NULL;
414
415 ftr->aux_sched = 0U;
416
417 pdu = (void *)rx->pdu;
418 p = (void *)&pdu->adv_ext_ind;
419 if (!pdu->len || !p->ext_hdr_len) {
420 if (pdu->len) {
421 data_len = pdu->len - PDU_AC_EXT_HEADER_SIZE_MIN;
422 } else {
423 data_len = 0U;
424 }
425
426 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll) {
427 struct ll_sync_set *sync_set;
428
429 sync_set = HDR_LLL2ULL(sync_lll);
430 ftr->aux_data_len = sync_set->data_len + data_len;
431 sync_set->data_len = 0U;
432 } else if (aux) {
433 aux->data_len += data_len;
434 ftr->aux_data_len = aux->data_len;
435 } else {
436 ftr->aux_data_len = data_len;
437 }
438
439 goto ull_scan_aux_rx_flush;
440 }
441
442 h = (void *)p->ext_hdr_adv_data;
443
444 /* Note: The extended header contains a RFU flag that could potentially cause incorrect
445 * calculation of offset to ACAD field if it gets used to add a new header field; However,
446 * from discussion in BT errata ES-8080 it seems clear that BT SIG is aware that the RFU
447 * bit can not be used to add a new field since existing implementations will not be able
448 * to calculate the start of ACAD in that case
449 */
450
451 ptr = h->data;
452
453 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
454 bool is_aux_addr_match = false;
455 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
456
457 if (h->adv_addr) {
458 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
459 /* Check if Periodic Advertising Synchronization to be created
460 */
461 if (sync && (scan->periodic.state != LL_SYNC_STATE_CREATED)) {
462 #if defined(CONFIG_BT_CTLR_PRIVACY)
463 uint8_t rl_idx = ftr->rl_idx;
464 #else /* !CONFIG_BT_CTLR_PRIVACY */
465 uint8_t rl_idx = 0U;
466 #endif /* !CONFIG_BT_CTLR_PRIVACY */
467
468 /* Check address and update internal state */
469 is_aux_addr_match =
470 ull_sync_setup_addr_check(sync, scan->periodic.filter_policy,
471 pdu->tx_addr, ptr, rl_idx);
472 if (is_aux_addr_match) {
473 scan->periodic.state = LL_SYNC_STATE_ADDR_MATCH;
474 } else {
475 scan->periodic.state = LL_SYNC_STATE_IDLE;
476 }
477 }
478 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
479
480 ptr += BDADDR_SIZE;
481 }
482
483 if (h->tgt_addr) {
484 ptr += BDADDR_SIZE;
485 }
486
487 if (h->cte_info) {
488 ptr += sizeof(struct pdu_cte_info);
489 }
490
491 adi = NULL;
492 if (h->adi) {
493 adi = (void *)ptr;
494 ptr += sizeof(*adi);
495 }
496
497 aux_ptr = NULL;
498 if (h->aux_ptr) {
499 aux_ptr = (void *)ptr;
500 ptr += sizeof(*aux_ptr);
501 }
502
503 if (h->sync_info) {
504 struct pdu_adv_sync_info *si;
505
506 si = (void *)ptr;
507 ptr += sizeof(*si);
508
509 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
510 /* Check if Periodic Advertising Synchronization to be created.
511 * Setup synchronization if address and SID match in the
512 * Periodic Advertiser List or with the explicitly supplied.
513 *
514 * is_aux_addr_match, device address in auxiliary channel PDU;
515 * scan->periodic.param has not been assigned yet.
516 * Otherwise, address was in primary channel PDU and we are now
517 * checking SID (in SyncInfo) in auxiliary channel PDU.
518 */
519 if (sync && aux && (is_aux_addr_match || (scan->periodic.param == aux)) && adi &&
520 ull_sync_setup_sid_match(sync, scan, PDU_ADV_ADI_SID_GET(adi))) {
521 ull_sync_setup(scan, aux->lll.phy, rx, si);
522 }
523 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
524 }
525
526 if (h->tx_pwr) {
527 ptr++;
528 }
529
530 /* Calculate ACAD Len */
531 hdr_len = ptr - (uint8_t *)p;
532 hdr_buf_len = PDU_AC_EXT_HEADER_SIZE_MIN + p->ext_hdr_len;
533 if (hdr_len > hdr_buf_len) {
534 /* FIXME: Handle invalid header length */
535 acad_len = 0U;
536 } else {
537 acad_len = hdr_buf_len - hdr_len;
538 hdr_len += acad_len;
539 }
540
541 /* calculate total data length */
542 if (hdr_len < pdu->len) {
543 data_len = pdu->len - hdr_len;
544 } else {
545 data_len = 0U;
546 }
547
548 /* Periodic Advertising Channel Map Indication and/or Broadcast ISO
549 * synchronization
550 */
551 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) &&
552 (rx->hdr.type == NODE_RX_TYPE_SYNC_REPORT) &&
553 acad_len) {
554 /* Periodic Advertising Channel Map Indication */
555 ull_sync_chm_update(rx->hdr.handle, ptr, acad_len);
556
557 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
558 struct ll_sync_set *sync_set;
559 struct pdu_big_info *bi;
560 uint8_t bi_size;
561
562 sync_set = HDR_LLL2ULL(sync_lll);
563
564 /* Provide encryption information for BIG sync creation */
565 bi_size = ptr[PDU_ADV_DATA_HEADER_LEN_OFFSET] -
566 PDU_ADV_DATA_HEADER_TYPE_SIZE;
567 sync_set->enc = (bi_size == PDU_BIG_INFO_ENCRYPTED_SIZE);
568
569 /* Store number of BISes in the BIG */
570 bi = (void *)&ptr[PDU_ADV_DATA_HEADER_DATA_OFFSET];
571 sync_set->num_bis = PDU_BIG_INFO_NUM_BIS_GET(bi);
572
573 /* Broadcast ISO synchronize */
574 if (sync_iso) {
575 ull_sync_iso_setup(sync_iso, rx, ptr, acad_len);
576 }
577 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
578 }
579
580 /* Do not ULL schedule auxiliary PDU reception if no aux pointer
581 * or aux pointer is zero or scannable advertising has erroneous aux
582 * pointer being present or PHY in the aux pointer is invalid or unsupported.
583 */
584 if (!aux_ptr || !PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr) || is_scan_req ||
585 (PDU_ADV_AUX_PTR_PHY_GET(aux_ptr) > EXT_ADV_AUX_PHY_LE_CODED) ||
586 (!IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
587 PDU_ADV_AUX_PTR_PHY_GET(aux_ptr) == EXT_ADV_AUX_PHY_LE_CODED)) {
588 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll) {
589 struct ll_sync_set *sync_set;
590
591 sync_set = HDR_LLL2ULL(sync_lll);
592 ftr->aux_data_len = sync_set->data_len + data_len;
593 sync_set->data_len = 0U;
594 } else if (aux) {
595 aux->data_len += data_len;
596 ftr->aux_data_len = aux->data_len;
597 } else {
598 ftr->aux_data_len = data_len;
599 }
600
601 if (is_scan_req) {
602 LL_ASSERT(aux && aux->rx_last);
603
604 aux->rx_last->rx_ftr.extra = rx;
605 aux->rx_last = rx;
606
607 return;
608 }
609
610 goto ull_scan_aux_rx_flush;
611 }
612
613 /* Determine the window size */
614 if (aux_ptr->offs_units) {
615 window_size_us = OFFS_UNIT_300_US;
616 } else {
617 window_size_us = OFFS_UNIT_30_US;
618 }
619
620 /* Calculate received aux offset we need to have ULL schedule a reception */
621 aux_offset_us = (uint32_t)PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr) * window_size_us;
622
623 /* Skip reception if invalid aux offset */
624 pdu_us = PDU_AC_US(pdu->len, phy, ftr->phy_flags);
625 if (unlikely(!AUX_OFFSET_IS_VALID(aux_offset_us, window_size_us, pdu_us))) {
626 goto ull_scan_aux_rx_flush;
627 }
628
629 /* CA field contains the clock accuracy of the advertiser;
630 * 0 - 51 ppm to 500 ppm
631 * 1 - 0 ppm to 50 ppm
632 */
633 if (aux_ptr->ca) {
634 window_widening_us = SCA_DRIFT_50_PPM_US(aux_offset_us);
635 } else {
636 window_widening_us = SCA_DRIFT_500_PPM_US(aux_offset_us);
637 }
638
639 phy_aux = BIT(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
640 ready_delay_us = lll_radio_rx_ready_delay_get(phy_aux, PHY_FLAGS_S8);
641
642 /* Calculate the aux offset from start of the scan window */
643 aux_offset_us += ftr->radio_end_us;
644 aux_offset_us -= pdu_us;
645 aux_offset_us -= EVENT_TICKER_RES_MARGIN_US;
646 aux_offset_us -= EVENT_JITTER_US;
647 aux_offset_us -= ready_delay_us;
648 aux_offset_us -= window_widening_us;
649
650 ticks_aux_offset = HAL_TICKER_US_TO_TICKS(aux_offset_us);
651
652 /* Check if too late to ULL schedule an auxiliary PDU reception */
653 if (!ftr->aux_lll_sched) {
654 uint32_t ticks_at_expire;
655 uint32_t overhead_us;
656 uint32_t ticks_now;
657 uint32_t diff;
658
659 /* CPU execution overhead to setup the radio for reception plus the
660 * minimum prepare tick offset. And allow one additional event in
661 * between as overhead (say, an advertising event in between got closed
662 * when reception for auxiliary PDU is being setup).
663 */
664 overhead_us = (EVENT_OVERHEAD_END_US + EVENT_OVERHEAD_START_US +
665 HAL_TICKER_TICKS_TO_US(HAL_TICKER_CNTR_CMP_OFFSET_MIN)) << 1;
666
667 ticks_now = ticker_ticks_now_get();
668 ticks_at_expire = ftr->ticks_anchor + ticks_aux_offset -
669 HAL_TICKER_US_TO_TICKS(overhead_us);
670 diff = ticker_ticks_diff_get(ticks_now, ticks_at_expire);
671 if ((diff & BIT(HAL_TICKER_CNTR_MSBIT)) == 0U) {
672 goto ull_scan_aux_rx_flush;
673 }
674 }
675
676 if (!aux) {
677 aux = aux_acquire();
678 if (!aux) {
679 /* As LLL scheduling has been used and will fail due to
680 * non-allocation of aux context, a sync report with
681 * aux_failed flag set will be generated. Let the
682 * current sync report be set as partial, and the
683 * sync report corresponding to ull_scan_aux_release
684 * have the incomplete data status.
685 */
686 if (ftr->aux_lll_sched) {
687 ftr->aux_sched = 1U;
688 }
689
690 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) &&
691 sync_lll) {
692 struct ll_sync_set *sync_set;
693
694 sync_set = HDR_LLL2ULL(sync_lll);
695 ftr->aux_data_len = sync_set->data_len + data_len;
696 sync_set->data_len = 0U;
697
698 }
699
700 goto ull_scan_aux_rx_flush;
701 }
702
703 aux->rx_head = aux->rx_last = NULL;
704 aux->data_len = data_len;
705 lll_aux = &aux->lll;
706 lll_aux->is_chain_sched = 0U;
707
708 ull_hdr_init(&aux->ull);
709 lll_hdr_init(lll_aux, aux);
710
711 aux->parent = lll ? (void *)lll : (void *)sync_lll;
712 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
713 if (lll) {
714 lll_aux->hdr.score = lll->scan_aux_score;
715 }
716 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
717
718 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
719 /* Store the aux context that has Periodic Advertising
720 * Synchronization address match.
721 */
722 if (sync && (scan->periodic.state == LL_SYNC_STATE_ADDR_MATCH)) {
723 scan->periodic.param = aux;
724 }
725
726 /* Store the node rx allocated for incomplete report, if needed.
727 */
728 aux->rx_incomplete = rx_incomplete;
729 rx_incomplete = NULL;
730 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
731
732 } else if (!(IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll)) {
733 aux->data_len += data_len;
734
735 /* Flush auxiliary PDU receptions and stop any more ULL
736 * scheduling if accumulated data length exceeds configured
737 * maximum supported.
738 */
739 if (aux->data_len >= CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX) {
740 /* If LLL has already scheduled, then let it proceed.
741 *
742 * TODO: LLL to check accumulated data length and
743 * stop further reception.
744 * Currently LLL will schedule as long as there
745 * are free node rx available.
746 */
747 if (!ftr->aux_lll_sched) {
748 goto ull_scan_aux_rx_flush;
749 }
750 }
751 }
752
753 /* In sync context we can dispatch rx immediately, in scan context we
754 * enqueue rx in aux context and will flush them after scan is complete.
755 */
756 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll) {
757 struct ll_sync_set *sync_set;
758
759 sync_set = HDR_LLL2ULL(sync_lll);
760 sync_set->data_len += data_len;
761 ftr->aux_data_len = sync_set->data_len;
762
763 /* Flush auxiliary PDU receptions and stop any more ULL
764 * scheduling if accumulated data length exceeds configured
765 * maximum supported.
766 */
767 if (sync_set->data_len >= CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX) {
768 /* If LLL has already scheduled, then let it proceed.
769 *
770 * TODO: LLL to check accumulated data length and
771 * stop further reception.
772 * Currently LLL will schedule as long as there
773 * are free node rx available.
774 */
775 if (!ftr->aux_lll_sched) {
776 sync_set->data_len = 0U;
777 goto ull_scan_aux_rx_flush;
778 }
779 }
780 } else {
781 if (aux->rx_last) {
782 aux->rx_last->rx_ftr.extra = rx;
783 } else {
784 aux->rx_head = rx;
785 }
786 aux->rx_last = rx;
787
788 ftr->aux_data_len = aux->data_len;
789 }
790
791 /* Initialize the channel index and PHY for the Auxiliary PDU reception.
792 */
793 lll_aux->chan = aux_ptr->chan_idx;
794 lll_aux->phy = phy_aux;
795
796 /* See if this was already scheduled from LLL. If so, store aux context
797 * in global scan struct so we can pick it when scanned node is received
798 * with a valid context.
799 */
800 if (ftr->aux_lll_sched) {
801 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll) {
802 /* Associate Sync context with the Aux context so that
803 * it can continue reception in LLL scheduling.
804 */
805 sync_lll->lll_aux = lll_aux;
806
807 /* AUX_ADV_IND/AUX_CHAIN_IND PDU reception is being
808 * setup
809 */
810 ftr->aux_sched = 1U;
811
812 /* In sync context, dispatch immediately */
813 ll_rx_put_sched(link, rx);
814 } else {
815 /* check scan context is not already using LLL
816 * scheduling, or receiving a chain then it will
817 * reuse the aux context.
818 */
819 LL_ASSERT(!lll->lll_aux || (lll->lll_aux == lll_aux));
820
821 /* Associate Scan context with the Aux context so that
822 * it can continue reception in LLL scheduling.
823 */
824 lll->lll_aux = lll_aux;
825
826 /* AUX_ADV_IND/AUX_CHAIN_IND PDU reception is being
827 * setup
828 */
829 ftr->aux_sched = 1U;
830 }
831
832 /* Reset auxiliary channel PDU scan state which otherwise is
833 * done in the prepare_cb when ULL scheduling is used.
834 */
835 lll_aux->state = 0U;
836
837 return;
838 }
839
840 /* Switching to ULL scheduling to receive auxiliary PDUs */
841 if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || lll) {
842 LL_ASSERT(scan);
843
844 /* Do not ULL schedule if scan disable requested */
845 if (unlikely(scan->is_stop)) {
846 goto ull_scan_aux_rx_flush;
847 }
848 } else {
849 struct ll_sync_set *sync_set;
850
851 LL_ASSERT(sync_lll &&
852 (!sync_lll->lll_aux || sync_lll->lll_aux == lll_aux));
853
854 /* Do not ULL schedule if sync terminate requested */
855 sync_set = HDR_LLL2ULL(sync_lll);
856 if (unlikely(sync_set->is_stop)) {
857 goto ull_scan_aux_rx_flush;
858 }
859
860 /* Associate the auxiliary context with sync context, we do this
861 * for ULL scheduling also in constrast to how extended
862 * advertising only associates when LLL scheduling is used.
863 * Each Periodic Advertising chain is received by unique sync
864 * context, hence LLL and ULL scheduling is always associated
865 * with same unique sync context.
866 */
867 sync_lll->lll_aux = lll_aux;
868
869 /* Backup the node rx to be dispatch on successfully ULL
870 * scheduling setup.
871 */
872 aux->rx_head = rx;
873 }
874
875 /* TODO: active_to_start feature port */
876 aux->ull.ticks_active_to_start = 0;
877 aux->ull.ticks_prepare_to_start =
878 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
879 aux->ull.ticks_preempt_to_start =
880 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
881 aux->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(
882 EVENT_OVERHEAD_START_US + ready_delay_us +
883 PDU_AC_MAX_US(PDU_AC_EXT_PAYLOAD_RX_SIZE, lll_aux->phy) +
884 EVENT_OVERHEAD_END_US);
885
886 ticks_slot_offset = MAX(aux->ull.ticks_active_to_start,
887 aux->ull.ticks_prepare_to_start);
888 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
889 ticks_slot_overhead = ticks_slot_offset;
890 } else {
891 ticks_slot_overhead = 0U;
892 }
893 ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
894
895 /* Initialize the window size for the Auxiliary PDU reception. */
896 lll_aux->window_size_us = window_size_us;
897 lll_aux->window_size_us += ((EVENT_TICKER_RES_MARGIN_US + EVENT_JITTER_US +
898 window_widening_us) << 1);
899
900 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
901 /* disable ticker job, in order to chain yield and start to reduce
902 * CPU use by reducing successive calls to ticker_job().
903 */
904 mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 0);
905 #endif
906
907 /* Yield the primary scan window or auxiliary or periodic sync event
908 * in ticker.
909 */
910 if (ticker_yield_handle != TICKER_NULL) {
911 ticker_status = ticker_yield_abs(TICKER_INSTANCE_ID_CTLR,
912 TICKER_USER_ID_ULL_HIGH,
913 ticker_yield_handle,
914 (ftr->ticks_anchor +
915 ticks_aux_offset -
916 ticks_slot_offset),
917 NULL, NULL);
918 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
919 (ticker_status == TICKER_STATUS_BUSY));
920 }
921
922 aux_handle = aux_handle_get(aux);
923 ticker_status = ticker_start(TICKER_INSTANCE_ID_CTLR,
924 TICKER_USER_ID_ULL_HIGH,
925 TICKER_ID_SCAN_AUX_BASE + aux_handle,
926 ftr->ticks_anchor - ticks_slot_offset,
927 ticks_aux_offset,
928 TICKER_NULL_PERIOD,
929 TICKER_NULL_REMAINDER,
930 TICKER_NULL_LAZY,
931 (aux->ull.ticks_slot +
932 ticks_slot_overhead),
933 ticker_cb, aux, ticker_op_cb, aux);
934 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
935 (ticker_status == TICKER_STATUS_BUSY) ||
936 ((ticker_status == TICKER_STATUS_FAILURE) &&
937 IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT)));
938
939 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
940 /* enable ticker job, queued ticker operation will be handled
941 * thereafter.
942 */
943 mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1);
944 #endif
945
946 return;
947
948 ull_scan_aux_rx_flush:
949 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
950 if (sync && (scan->periodic.state != LL_SYNC_STATE_CREATED)) {
951 scan->periodic.state = LL_SYNC_STATE_IDLE;
952 scan->periodic.param = NULL;
953 }
954 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
955
956 if (aux) {
957 /* Enqueue last rx in aux context if possible, otherwise send
958 * immediately since we are in sync context.
959 */
960 if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || aux->rx_last) {
961 LL_ASSERT(scan);
962
963 /* If scan is being disabled, rx could already be
964 * enqueued before coming here to ull_scan_aux_rx_flush.
965 * Check if rx not the last in the list of received PDUs
966 * then add it, else do not add it, to avoid duplicate
967 * report generation, release and probable infinite loop
968 * processing of the list.
969 */
970 if (unlikely(scan->is_stop)) {
971 /* Add the node rx to aux context list of node
972 * rx if not already added when coming here to
973 * ull_scan_aux_rx_flush. This is handling a
974 * race condition where in the last PDU in
975 * chain is received and at the same time scan
976 * is being disabled.
977 */
978 if (aux->rx_last != rx) {
979 aux->rx_last->rx_ftr.extra = rx;
980 aux->rx_last = rx;
981 }
982
983 return;
984 }
985
986 aux->rx_last->rx_ftr.extra = rx;
987 aux->rx_last = rx;
988 } else {
989 const struct ll_sync_set *sync_set;
990
991 LL_ASSERT(sync_lll);
992
993 ll_rx_put_sched(link, rx);
994
995 sync_set = HDR_LLL2ULL(sync_lll);
996 if (unlikely(sync_set->is_stop && sync_lll->lll_aux)) {
997 return;
998 }
999 }
1000
1001 LL_ASSERT(aux->parent);
1002
1003 flush_safe(aux);
1004
1005 return;
1006 }
1007
1008 ll_rx_put(link, rx);
1009
1010 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && rx_incomplete) {
1011 rx_release_put(rx_incomplete);
1012 }
1013
1014 ll_rx_sched();
1015 }
1016
ull_scan_aux_done(struct node_rx_event_done * done)1017 void ull_scan_aux_done(struct node_rx_event_done *done)
1018 {
1019 struct ll_scan_aux_set *aux;
1020
1021 /* Get reference to ULL context */
1022 aux = CONTAINER_OF(done->param, struct ll_scan_aux_set, ull);
1023
1024 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) &&
1025 !ull_scan_aux_is_valid_get(aux)) {
1026 struct ll_sync_set *sync;
1027
1028 sync = CONTAINER_OF(done->param, struct ll_sync_set, ull);
1029 LL_ASSERT(ull_sync_is_valid_get(sync));
1030
1031 /* Auxiliary context will be flushed by ull_scan_aux_stop() */
1032 if (unlikely(sync->is_stop) || !sync->lll.lll_aux) {
1033 return;
1034 }
1035
1036 aux = HDR_LLL2ULL(sync->lll.lll_aux);
1037 LL_ASSERT(aux->parent);
1038 } else {
1039 struct ll_scan_set *scan;
1040 struct lll_scan *lll;
1041
1042 lll = aux->parent;
1043 LL_ASSERT(lll);
1044
1045 scan = HDR_LLL2ULL(lll);
1046 LL_ASSERT(ull_scan_is_valid_get(scan));
1047
1048 /* Auxiliary context will be flushed by ull_scan_aux_stop() */
1049 if (unlikely(scan->is_stop)) {
1050 return;
1051 }
1052 }
1053
1054 flush(aux);
1055 }
1056
ull_scan_aux_set_get(uint8_t handle)1057 struct ll_scan_aux_set *ull_scan_aux_set_get(uint8_t handle)
1058 {
1059 if (handle >= CONFIG_BT_CTLR_SCAN_AUX_SET) {
1060 return NULL;
1061 }
1062
1063 return &ll_scan_aux_pool[handle];
1064 }
1065
ull_scan_aux_lll_handle_get(struct lll_scan_aux * lll)1066 uint8_t ull_scan_aux_lll_handle_get(struct lll_scan_aux *lll)
1067 {
1068 struct ll_scan_aux_set *aux;
1069
1070 aux = HDR_LLL2ULL(lll);
1071
1072 return aux_handle_get(aux);
1073 }
1074
ull_scan_aux_lll_parent_get(struct lll_scan_aux * lll,uint8_t * is_lll_scan)1075 void *ull_scan_aux_lll_parent_get(struct lll_scan_aux *lll,
1076 uint8_t *is_lll_scan)
1077 {
1078 struct ll_scan_aux_set *aux;
1079
1080 aux = HDR_LLL2ULL(lll);
1081
1082 if (is_lll_scan) {
1083 struct ll_scan_set *scan;
1084 struct lll_scan *lllscan;
1085
1086 lllscan = aux->parent;
1087 LL_ASSERT(lllscan);
1088
1089 scan = HDR_LLL2ULL(lllscan);
1090 *is_lll_scan = !!ull_scan_is_valid_get(scan);
1091 }
1092
1093 return aux->parent;
1094 }
1095
ull_scan_aux_is_valid_get(struct ll_scan_aux_set * aux)1096 struct ll_scan_aux_set *ull_scan_aux_is_valid_get(struct ll_scan_aux_set *aux)
1097 {
1098 if (((uint8_t *)aux < (uint8_t *)ll_scan_aux_pool) ||
1099 ((uint8_t *)aux > ((uint8_t *)ll_scan_aux_pool +
1100 (sizeof(struct ll_scan_aux_set) *
1101 (CONFIG_BT_CTLR_SCAN_AUX_SET - 1))))) {
1102 return NULL;
1103 }
1104
1105 return aux;
1106 }
1107
ull_scan_aux_lll_is_valid_get(struct lll_scan_aux * lll)1108 struct lll_scan_aux *ull_scan_aux_lll_is_valid_get(struct lll_scan_aux *lll)
1109 {
1110 struct ll_scan_aux_set *aux;
1111
1112 aux = HDR_LLL2ULL(lll);
1113 aux = ull_scan_aux_is_valid_get(aux);
1114 if (aux) {
1115 return &aux->lll;
1116 }
1117
1118 return NULL;
1119 }
1120
ull_scan_aux_release(memq_link_t * link,struct node_rx_pdu * rx)1121 void ull_scan_aux_release(memq_link_t *link, struct node_rx_pdu *rx)
1122 {
1123 struct lll_scan_aux *lll_aux;
1124 void *param_ull;
1125
1126 param_ull = HDR_LLL2ULL(rx->rx_ftr.param);
1127
1128 if (ull_scan_is_valid_get(param_ull)) {
1129 /* Release aux context when LLL scheduled auxiliary PDU
1130 * reception is_abort on duration expire or aborted in the
1131 * unreserved time space.
1132 */
1133 struct lll_scan *lll;
1134
1135 /* Mark for buffer for release */
1136 rx->hdr.type = NODE_RX_TYPE_RELEASE;
1137
1138 lll = rx->rx_ftr.param;
1139 lll_aux = rx->rx_ftr.lll_aux;
1140
1141 /* Under race condition when LLL scheduling a reception of
1142 * auxiliary PDU, a scan aux context may be assigned late and
1143 * the node rx releasing the aux context will not have it.
1144 * Release the scan aux context assigned in the scan context.
1145 */
1146 if (!lll_aux) {
1147 lll_aux = lll->lll_aux;
1148 }
1149
1150 } else if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) ||
1151 ull_scan_aux_is_valid_get(param_ull)) {
1152 /* Release aux context when ULL scheduled auxiliary PDU
1153 * reception is aborted.
1154 */
1155
1156 /* Mark for buffer for release */
1157 rx->hdr.type = NODE_RX_TYPE_RELEASE;
1158
1159 lll_aux = rx->rx_ftr.param;
1160
1161 } else if (ull_sync_is_valid_get(param_ull)) {
1162 struct ll_sync_set *sync;
1163 struct lll_sync *lll;
1164
1165 sync = param_ull;
1166
1167 /* reset data len total */
1168 sync->data_len = 0U;
1169
1170 /* Release aux context in case of chain PDU reception, otherwise
1171 * lll_aux is NULL.
1172 */
1173 lll = rx->rx_ftr.param;
1174 lll_aux = rx->rx_ftr.lll_aux;
1175
1176 /* Under race condition when LLL scheduling a reception of
1177 * auxiliary PDU, a scan aux context may be assigned late and
1178 * the node rx releasing the aux context will not have it.
1179 * Release the scan aux context assigned in the sync context.
1180 */
1181 if (!lll_aux) {
1182 lll_aux = lll->lll_aux;
1183 }
1184
1185 /* Change node type so HCI can dispatch report for truncated
1186 * data properly.
1187 */
1188 rx->hdr.type = NODE_RX_TYPE_SYNC_REPORT;
1189 rx->hdr.handle = ull_sync_handle_get(sync);
1190
1191 /* Dequeue will try releasing list of node rx, set the extra
1192 * pointer to NULL.
1193 */
1194 rx->rx_ftr.extra = NULL;
1195
1196 } else {
1197 LL_ASSERT(0);
1198 lll_aux = NULL;
1199 }
1200
1201 if (lll_aux) {
1202 struct ll_scan_aux_set *aux;
1203 struct ll_scan_set *scan;
1204 struct lll_scan *lll;
1205 uint8_t is_stop;
1206
1207 aux = HDR_LLL2ULL(lll_aux);
1208 lll = aux->parent;
1209 LL_ASSERT(lll);
1210
1211 scan = HDR_LLL2ULL(lll);
1212 scan = ull_scan_is_valid_get(scan);
1213 if (scan) {
1214 is_stop = scan->is_stop;
1215 } else if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC)) {
1216 struct lll_sync *sync_lll;
1217 struct ll_sync_set *sync;
1218
1219 sync_lll = (void *)lll;
1220 sync = HDR_LLL2ULL(sync_lll);
1221 is_stop = sync->is_stop;
1222 } else {
1223 LL_ASSERT(0);
1224 return;
1225 }
1226
1227 if (!is_stop) {
1228 LL_ASSERT(aux->parent);
1229
1230 flush_safe(aux);
1231
1232 } else if (!scan) {
1233 /* Sync terminate requested, enqueue node rx so that it
1234 * be flushed by ull_scan_aux_stop().
1235 */
1236 rx->hdr.link = link;
1237 if (aux->rx_last) {
1238 aux->rx_last->rx_ftr.extra = rx;
1239 } else {
1240 aux->rx_head = rx;
1241 }
1242 aux->rx_last = rx;
1243
1244 return;
1245 }
1246 }
1247
1248 ll_rx_put(link, rx);
1249 ll_rx_sched();
1250 }
1251
ull_scan_aux_stop(struct ll_scan_aux_set * aux)1252 int ull_scan_aux_stop(struct ll_scan_aux_set *aux)
1253 {
1254 static memq_link_t link;
1255 static struct mayfly mfy = {0, 0, &link, NULL, NULL};
1256 uint8_t aux_handle;
1257 uint32_t ret;
1258 int err;
1259
1260 /* Stop any ULL scheduling of auxiliary PDU scan */
1261 aux_handle = aux_handle_get(aux);
1262 err = ull_ticker_stop_with_mark(TICKER_ID_SCAN_AUX_BASE + aux_handle,
1263 aux, &aux->lll);
1264 if (err && (err != -EALREADY)) {
1265 return err;
1266 }
1267
1268 /* Abort LLL event if ULL scheduling not used or already in prepare */
1269 if (err == -EALREADY) {
1270 err = ull_disable(&aux->lll);
1271 if (err && (err != -EALREADY)) {
1272 return err;
1273 }
1274
1275 mfy.fp = flush;
1276
1277 } else if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC)) {
1278 /* ULL scan auxiliary PDU reception scheduling stopped
1279 * before prepare.
1280 */
1281 mfy.fp = flush;
1282
1283 } else {
1284 struct ll_scan_set *scan;
1285 struct lll_scan *lll;
1286
1287 lll = aux->parent;
1288 LL_ASSERT(lll);
1289
1290 scan = HDR_LLL2ULL(lll);
1291 scan = ull_scan_is_valid_get(scan);
1292 if (scan) {
1293 /* ULL scan auxiliary PDU reception scheduling stopped
1294 * before prepare.
1295 */
1296 mfy.fp = flush;
1297 } else {
1298 /* ULL sync chain reception scheduling stopped before
1299 * prepare.
1300 */
1301 mfy.fp = aux_sync_incomplete;
1302 }
1303 }
1304
1305 /* Release auxiliary context in ULL execution context */
1306 mfy.param = aux;
1307 ret = mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_ULL_HIGH,
1308 0, &mfy);
1309 LL_ASSERT(!ret);
1310
1311 return 0;
1312 }
1313
init_reset(void)1314 static int init_reset(void)
1315 {
1316 /* Initialize adv aux pool. */
1317 mem_init(ll_scan_aux_pool, sizeof(struct ll_scan_aux_set),
1318 sizeof(ll_scan_aux_pool) / sizeof(struct ll_scan_aux_set),
1319 &scan_aux_free);
1320
1321 return 0;
1322 }
1323
aux_acquire(void)1324 static inline struct ll_scan_aux_set *aux_acquire(void)
1325 {
1326 return mem_acquire(&scan_aux_free);
1327 }
1328
aux_release(struct ll_scan_aux_set * aux)1329 static inline void aux_release(struct ll_scan_aux_set *aux)
1330 {
1331 /* Clear the parent so that when scan is being disabled then this
1332 * auxiliary context shall not associate itself from being disable.
1333 */
1334 LL_ASSERT(aux->parent);
1335 aux->parent = NULL;
1336
1337 mem_release(aux, &scan_aux_free);
1338 }
1339
aux_handle_get(struct ll_scan_aux_set * aux)1340 static inline uint8_t aux_handle_get(struct ll_scan_aux_set *aux)
1341 {
1342 return mem_index_get(aux, ll_scan_aux_pool,
1343 sizeof(struct ll_scan_aux_set));
1344 }
1345
done_disabled_cb(void * param)1346 static void done_disabled_cb(void *param)
1347 {
1348 struct ll_scan_aux_set *aux;
1349
1350 aux = param;
1351 LL_ASSERT(aux->parent);
1352
1353 flush(aux);
1354 }
1355
flush_safe(void * param)1356 static void flush_safe(void *param)
1357 {
1358 struct ll_scan_aux_set *aux;
1359 struct ull_hdr *hdr;
1360 uint8_t ref;
1361
1362 aux = param;
1363 LL_ASSERT(aux->parent);
1364
1365 /* ref == 0
1366 * All PDUs were scheduled from LLL and there is no pending done
1367 * event, we can flush here.
1368 *
1369 * ref == 1
1370 * There is pending done event so we need to flush from disabled
1371 * callback. Flushing here would release aux context and thus
1372 * ull_hdr before done event was processed.
1373 */
1374 hdr = &aux->ull;
1375 ref = ull_ref_get(hdr);
1376 if (ref == 0U) {
1377 flush(aux);
1378 } else {
1379 /* A specific single shot scheduled aux context
1380 * cannot overlap, i.e. ULL reference count
1381 * shall be less than 2.
1382 */
1383 LL_ASSERT(ref < 2U);
1384
1385 LL_ASSERT(!hdr->disabled_cb);
1386 hdr->disabled_param = aux;
1387 hdr->disabled_cb = done_disabled_cb;
1388 }
1389 }
1390
flush(void * param)1391 static void flush(void *param)
1392 {
1393 struct ll_scan_aux_set *aux;
1394 struct ll_scan_set *scan;
1395 struct node_rx_pdu *rx;
1396 struct lll_scan *lll;
1397 bool sched = false;
1398
1399 /* Debug check that parent was assigned when allocated for reception of
1400 * auxiliary channel PDUs.
1401 */
1402 aux = param;
1403 LL_ASSERT(aux->parent);
1404
1405 rx = aux->rx_head;
1406 if (rx) {
1407 aux->rx_head = NULL;
1408
1409 ll_rx_put(rx->hdr.link, rx);
1410 sched = true;
1411 }
1412
1413 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1414 rx = aux->rx_incomplete;
1415 if (rx) {
1416 aux->rx_incomplete = NULL;
1417
1418 rx_release_put(rx);
1419 sched = true;
1420 }
1421 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1422
1423 if (sched) {
1424 ll_rx_sched();
1425 }
1426
1427 lll = aux->parent;
1428 scan = HDR_LLL2ULL(lll);
1429 scan = ull_scan_is_valid_get(scan);
1430 if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || scan) {
1431 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1432 lll->scan_aux_score = aux->lll.hdr.score;
1433 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
1434 }
1435
1436 aux_release(aux);
1437 }
1438
aux_sync_partial(void * param)1439 static void aux_sync_partial(void *param)
1440 {
1441 struct ll_scan_aux_set *aux;
1442 struct node_rx_pdu *rx;
1443
1444 aux = param;
1445 rx = aux->rx_head;
1446 aux->rx_head = NULL;
1447
1448 LL_ASSERT(rx);
1449 rx->rx_ftr.aux_sched = 1U;
1450
1451 ll_rx_put_sched(rx->hdr.link, rx);
1452 }
1453
aux_sync_incomplete(void * param)1454 static void aux_sync_incomplete(void *param)
1455 {
1456 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1457 struct ll_scan_aux_set *aux;
1458
1459 aux = param;
1460 LL_ASSERT(aux->parent);
1461
1462 /* ULL scheduling succeeded hence no backup node rx present, use the
1463 * extra node rx reserved for incomplete data status generation.
1464 */
1465 if (!aux->rx_head) {
1466 struct ll_sync_set *sync;
1467 struct node_rx_pdu *rx;
1468 struct lll_sync *lll;
1469
1470 /* get reference to sync context */
1471 lll = aux->parent;
1472 LL_ASSERT(lll);
1473 sync = HDR_LLL2ULL(lll);
1474
1475 /* reset data len total */
1476 sync->data_len = 0U;
1477
1478 /* pick extra node rx stored in aux context */
1479 rx = aux->rx_incomplete;
1480 LL_ASSERT(rx);
1481 aux->rx_incomplete = NULL;
1482
1483 /* prepare sync report with failure */
1484 rx->hdr.type = NODE_RX_TYPE_SYNC_REPORT;
1485 rx->hdr.handle = ull_sync_handle_get(sync);
1486 rx->rx_ftr.param = lll;
1487
1488 /* flag chain reception failure */
1489 rx->rx_ftr.aux_failed = 1U;
1490
1491 /* Dequeue will try releasing list of node rx,
1492 * set the extra pointer to NULL.
1493 */
1494 rx->rx_ftr.extra = NULL;
1495
1496 /* add to rx list, will be flushed */
1497 aux->rx_head = rx;
1498 }
1499
1500 LL_ASSERT(!ull_ref_get(&aux->ull));
1501
1502 flush(aux);
1503 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1504 }
1505
ticker_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)1506 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
1507 uint32_t remainder, uint16_t lazy, uint8_t force,
1508 void *param)
1509 {
1510 static memq_link_t link;
1511 static struct mayfly mfy = {0, 0, &link, NULL, lll_scan_aux_prepare};
1512 struct ll_scan_aux_set *aux = param;
1513 static struct lll_prepare_param p;
1514 uint32_t ret;
1515 uint8_t ref;
1516
1517 DEBUG_RADIO_PREPARE_O(1);
1518
1519 /* Increment prepare reference count */
1520 ref = ull_ref_inc(&aux->ull);
1521 LL_ASSERT(ref);
1522
1523 /* Append timing parameters */
1524 p.ticks_at_expire = ticks_at_expire;
1525 p.remainder = 0; /* FIXME: remainder; */
1526 p.lazy = lazy;
1527 p.force = force;
1528 p.param = &aux->lll;
1529 mfy.param = &p;
1530
1531 /* Kick LLL prepare */
1532 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
1533 0, &mfy);
1534 LL_ASSERT(!ret);
1535
1536 DEBUG_RADIO_PREPARE_O(1);
1537 }
1538
ticker_op_cb(uint32_t status,void * param)1539 static void ticker_op_cb(uint32_t status, void *param)
1540 {
1541 static memq_link_t link;
1542 static struct mayfly mfy = {0, 0, &link, NULL, NULL};
1543 struct ll_sync_set *sync;
1544 uint32_t ret;
1545
1546 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC)) {
1547 struct ll_scan_aux_set *aux;
1548 struct lll_sync *sync_lll;
1549
1550 aux = param;
1551 sync_lll = aux->parent;
1552 LL_ASSERT(sync_lll);
1553
1554 sync = HDR_LLL2ULL(sync_lll);
1555 sync = ull_sync_is_valid_get(sync);
1556 } else {
1557 sync = NULL;
1558 }
1559
1560 if (status == TICKER_STATUS_SUCCESS) {
1561 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync) {
1562 mfy.fp = aux_sync_partial;
1563 } else {
1564 return;
1565 }
1566 } else {
1567 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync) {
1568 mfy.fp = aux_sync_incomplete;
1569 } else {
1570 struct ll_scan_aux_set *aux;
1571
1572 aux = param;
1573 LL_ASSERT(aux->parent);
1574
1575 mfy.fp = flush_safe;
1576 }
1577 }
1578
1579 mfy.param = param;
1580
1581 ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW, TICKER_USER_ID_ULL_HIGH,
1582 0, &mfy);
1583 LL_ASSERT(!ret);
1584 }
1585
1586 #else /* CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
1587 /* NOTE: BT_CTLR_SCAN_AUX_USE_CHAINS is alternative new design with less RAM
1588 * usage for supporting Extended Scanning of simultaneous interleaved
1589 * Extended Advertising chains.
1590 *
1591 * TODO: As the previous design has Bluetooth Qualified Design Listing by
1592 * Nordic Semiconductor ASA, both implementation are present in this file,
1593 * and default builds use CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS=n. Remove old
1594 * implementation when we have a new Bluetooth Qualified Design Listing
1595 * with the new Extended Scanning and Periodic Sync implementation.
1596 */
ull_scan_aux_setup(memq_link_t * link,struct node_rx_pdu * rx)1597 void ull_scan_aux_setup(memq_link_t *link, struct node_rx_pdu *rx)
1598 {
1599 struct node_rx_pdu *rx_incomplete;
1600 struct ll_sync_iso_set *sync_iso;
1601 struct ll_scan_aux_chain *chain;
1602 struct pdu_adv_aux_ptr *aux_ptr;
1603 struct pdu_adv_com_ext_adv *p;
1604 struct lll_scan_aux *lll_aux;
1605 uint32_t window_widening_us;
1606 uint32_t ticks_aux_offset;
1607 struct pdu_adv_ext_hdr *h;
1608 struct lll_sync *sync_lll;
1609 struct ll_scan_set *scan;
1610 struct ll_sync_set *sync;
1611 struct pdu_adv_adi *adi;
1612 struct node_rx_ftr *ftr;
1613 uint32_t ready_delay_us;
1614 uint16_t window_size_us;
1615 uint32_t aux_offset_us;
1616 struct lll_scan *lll;
1617 struct pdu_adv *pdu;
1618 uint8_t hdr_buf_len;
1619 bool is_scan_req;
1620 uint8_t acad_len;
1621 uint8_t data_len;
1622 uint8_t hdr_len;
1623 uint32_t pdu_us;
1624 uint8_t phy_aux;
1625 uint8_t *ptr;
1626 uint8_t phy;
1627
1628 is_scan_req = false;
1629 ftr = &rx->rx_ftr;
1630
1631 switch (rx->hdr.type) {
1632 case NODE_RX_TYPE_EXT_1M_REPORT:
1633 lll_aux = NULL;
1634 chain = NULL;
1635 sync_lll = NULL;
1636 sync_iso = NULL;
1637 rx_incomplete = NULL;
1638
1639 lll = ftr->param;
1640 LL_ASSERT(!lll->lll_aux);
1641
1642 scan = HDR_LLL2ULL(lll);
1643 sync = sync_create_get(scan);
1644 phy = BT_HCI_LE_EXT_SCAN_PHY_1M;
1645 break;
1646
1647 #if defined(CONFIG_BT_CTLR_PHY_CODED)
1648 case NODE_RX_TYPE_EXT_CODED_REPORT:
1649 lll_aux = NULL;
1650 chain = NULL;
1651 sync_lll = NULL;
1652 sync_iso = NULL;
1653 rx_incomplete = NULL;
1654
1655 lll = ftr->param;
1656 LL_ASSERT(!lll->lll_aux);
1657
1658 scan = HDR_LLL2ULL(lll);
1659 sync = sync_create_get(scan);
1660 phy = BT_HCI_LE_EXT_SCAN_PHY_CODED;
1661 break;
1662 #endif /* CONFIG_BT_CTLR_PHY_CODED */
1663
1664 case NODE_RX_TYPE_EXT_AUX_REPORT:
1665 sync_iso = NULL;
1666 rx_incomplete = NULL;
1667 if (lll_scan_aux_chain_is_valid_get(ftr->param)) {
1668 sync_lll = NULL;
1669
1670 /* Node has valid chain context so its scan was scheduled
1671 * from ULL.
1672 */
1673 lll_aux = ftr->param;
1674 chain = CONTAINER_OF(lll_aux, struct ll_scan_aux_chain, lll);
1675
1676 /* chain parent will be NULL for periodic sync */
1677 lll = chain->parent;
1678 LL_ASSERT(lll);
1679
1680 } else if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) ||
1681 ull_scan_is_valid_get(HDR_LLL2ULL(ftr->param))) {
1682 sync_lll = NULL;
1683
1684 /* Node that does not have valid chain context but has
1685 * valid scan set was scheduled from LLL. We can
1686 * retrieve chain context from lll_scan as it was stored
1687 * there when superior PDU was handled.
1688 */
1689 lll = ftr->param;
1690
1691 lll_aux = lll->lll_aux;
1692 LL_ASSERT(lll_aux);
1693
1694 chain = CONTAINER_OF(lll_aux, struct ll_scan_aux_chain, lll);
1695 LL_ASSERT(lll == chain->parent);
1696
1697 } else {
1698 lll = NULL;
1699
1700 /* If none of the above, node is part of sync scanning
1701 */
1702 sync_lll = ftr->param;
1703
1704 lll_aux = sync_lll->lll_aux;
1705 LL_ASSERT(lll_aux);
1706
1707 chain = CONTAINER_OF(lll_aux, struct ll_scan_aux_chain, lll);
1708 LL_ASSERT(sync_lll == chain->parent);
1709 }
1710
1711 if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || lll) {
1712 scan = HDR_LLL2ULL(lll);
1713 sync = (void *)scan;
1714 scan = ull_scan_is_valid_get(scan);
1715 if (scan) {
1716 sync = NULL;
1717 }
1718 } else {
1719 scan = NULL;
1720 sync = HDR_LLL2ULL(sync_lll);
1721 }
1722
1723 phy = lll_aux->phy;
1724 if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || scan) {
1725 /* Here we are scanner context */
1726 sync = sync_create_get(scan);
1727
1728 /* Generate report based on PHY scanned */
1729 switch (phy) {
1730 case PHY_1M:
1731 rx->hdr.type = NODE_RX_TYPE_EXT_1M_REPORT;
1732 break;
1733 case PHY_2M:
1734 rx->hdr.type = NODE_RX_TYPE_EXT_2M_REPORT;
1735 break;
1736 #if defined(CONFIG_BT_CTLR_PHY_CODED)
1737 case PHY_CODED:
1738 rx->hdr.type = NODE_RX_TYPE_EXT_CODED_REPORT;
1739 break;
1740 #endif /* CONFIG_BT_CTLR_PHY_CODED */
1741 default:
1742 LL_ASSERT(0);
1743 return;
1744 }
1745
1746 /* Backup scan requested flag as it is in union with
1747 * `extra` struct member which will be set to NULL
1748 * in subsequent code.
1749 */
1750 is_scan_req = !!ftr->scan_req;
1751
1752 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1753 } else {
1754 /* Here we are periodic sync context */
1755 rx->hdr.type = NODE_RX_TYPE_SYNC_REPORT;
1756 rx->hdr.handle = ull_sync_handle_get(sync);
1757
1758 /* Check if we need to create BIG sync */
1759 sync_iso = sync_iso_create_get(sync);
1760
1761 /* lll_aux and aux are auxiliary channel context,
1762 * reuse the existing aux context to scan the chain.
1763 * hence lll_aux and aux are not released or set to NULL.
1764 */
1765 sync = NULL;
1766 }
1767 break;
1768
1769 case NODE_RX_TYPE_SYNC_REPORT:
1770 {
1771 struct ll_sync_set *ull_sync;
1772
1773 /* set the sync handle corresponding to the LLL context
1774 * passed in the node rx footer field.
1775 */
1776 sync_lll = ftr->param;
1777 LL_ASSERT(!sync_lll->lll_aux);
1778
1779 ull_sync = HDR_LLL2ULL(sync_lll);
1780 rx->hdr.handle = ull_sync_handle_get(ull_sync);
1781
1782 /* Check if we need to create BIG sync */
1783 sync_iso = sync_iso_create_get(ull_sync);
1784
1785 /* FIXME: we will need lll_scan if chain was scheduled
1786 * from LLL; should we store lll_scan_set in
1787 * sync_lll instead?
1788 */
1789 lll = NULL;
1790 lll_aux = NULL;
1791 chain = NULL;
1792 scan = NULL;
1793 sync = NULL;
1794 phy = sync_lll->phy;
1795
1796 /* backup extra node_rx supplied for generating
1797 * incomplete report
1798 */
1799 rx_incomplete = ftr->extra;
1800 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1801
1802 }
1803 break;
1804 default:
1805 LL_ASSERT(0);
1806 return;
1807 }
1808
1809 rx->hdr.link = link;
1810 ftr->extra = NULL;
1811
1812 ftr->aux_sched = 0U;
1813
1814 if (chain) {
1815 chain->aux_sched = 0U;
1816
1817 if (!is_scan_req) {
1818 /* Remove chain from active list */
1819 chain_remove_from_list(&scan_aux_set.active_chains, chain);
1820
1821 /* Reset LLL scheduled flag */
1822 chain->is_lll_sched = 0U;
1823 }
1824 }
1825
1826 pdu = (void *)rx->pdu;
1827 p = (void *)&pdu->adv_ext_ind;
1828 if (!pdu->len || !p->ext_hdr_len) {
1829 if (pdu->len) {
1830 data_len = pdu->len - PDU_AC_EXT_HEADER_SIZE_MIN;
1831 } else {
1832 data_len = 0U;
1833 }
1834
1835 if (chain) {
1836 chain->data_len += data_len;
1837 ftr->aux_data_len = chain->data_len;
1838 } else {
1839 ftr->aux_data_len = data_len;
1840 }
1841
1842 goto ull_scan_aux_rx_flush;
1843 }
1844
1845 h = (void *)p->ext_hdr_adv_data;
1846
1847 /* Note: The extended header contains a RFU flag that could potentially cause incorrect
1848 * calculation of offset to ACAD field if it gets used to add a new header field; However,
1849 * from discussion in BT errata ES-8080 it seems clear that BT SIG is aware that the RFU
1850 * bit can not be used to add a new field since existing implementations will not be able
1851 * to calculate the start of ACAD in that case
1852 */
1853
1854 ptr = h->data;
1855
1856 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1857 bool is_aux_addr_match = false;
1858 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1859
1860 if (h->adv_addr) {
1861 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1862 /* Check if Periodic Advertising Synchronization to be created
1863 */
1864 if (sync && (scan->periodic.state != LL_SYNC_STATE_CREATED)) {
1865 #if defined(CONFIG_BT_CTLR_PRIVACY)
1866 uint8_t rl_idx = ftr->rl_idx;
1867 #else /* !CONFIG_BT_CTLR_PRIVACY */
1868 uint8_t rl_idx = 0U;
1869 #endif /* !CONFIG_BT_CTLR_PRIVACY */
1870
1871 /* Check address and update internal state */
1872 is_aux_addr_match =
1873 ull_sync_setup_addr_check(sync, scan->periodic.filter_policy,
1874 pdu->tx_addr, ptr, rl_idx);
1875 if (is_aux_addr_match) {
1876 scan->periodic.state = LL_SYNC_STATE_ADDR_MATCH;
1877 } else {
1878 scan->periodic.state = LL_SYNC_STATE_IDLE;
1879 }
1880 }
1881 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1882
1883 ptr += BDADDR_SIZE;
1884 }
1885
1886 if (h->tgt_addr) {
1887 ptr += BDADDR_SIZE;
1888 }
1889
1890 if (h->cte_info) {
1891 ptr += sizeof(struct pdu_cte_info);
1892 }
1893
1894 adi = NULL;
1895 if (h->adi) {
1896 adi = (void *)ptr;
1897 ptr += sizeof(*adi);
1898 }
1899
1900 aux_ptr = NULL;
1901 if (h->aux_ptr) {
1902 aux_ptr = (void *)ptr;
1903 ptr += sizeof(*aux_ptr);
1904 }
1905
1906 if (h->sync_info) {
1907 struct pdu_adv_sync_info *si;
1908
1909 si = (void *)ptr;
1910 ptr += sizeof(*si);
1911
1912 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
1913 /* Check if Periodic Advertising Synchronization to be created.
1914 * Setup synchronization if address and SID match in the
1915 * Periodic Advertiser List or with the explicitly supplied.
1916 *
1917 * is_aux_addr_match, device address in auxiliary channel PDU;
1918 * scan->periodic.param has not been assigned yet.
1919 * Otherwise, address was in primary channel PDU and we are now
1920 * checking SID (in SyncInfo) in auxiliary channel PDU.
1921 */
1922 if (sync && chain && (is_aux_addr_match || (scan->periodic.param == chain)) &&
1923 adi && ull_sync_setup_sid_match(sync, scan, PDU_ADV_ADI_SID_GET(adi))) {
1924 ull_sync_setup(scan, chain->lll.phy, rx, si);
1925 }
1926 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
1927 }
1928
1929 if (h->tx_pwr) {
1930 ptr++;
1931 }
1932
1933 /* Calculate ACAD Len */
1934 hdr_len = ptr - (uint8_t *)p;
1935 hdr_buf_len = PDU_AC_EXT_HEADER_SIZE_MIN + p->ext_hdr_len;
1936 if (hdr_len > hdr_buf_len) {
1937 /* FIXME: Handle invalid header length */
1938 acad_len = 0U;
1939 } else {
1940 acad_len = hdr_buf_len - hdr_len;
1941 hdr_len += acad_len;
1942 }
1943
1944 /* calculate and set total data length */
1945 if (hdr_len < pdu->len) {
1946 data_len = pdu->len - hdr_len;
1947 } else {
1948 data_len = 0U;
1949 }
1950
1951 if (chain) {
1952 chain->data_len += data_len;
1953 ftr->aux_data_len = chain->data_len;
1954 } else {
1955 ftr->aux_data_len = data_len;
1956 }
1957
1958 /* Periodic Advertising Channel Map Indication and/or Broadcast ISO
1959 * synchronization
1960 */
1961 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) &&
1962 (rx->hdr.type == NODE_RX_TYPE_SYNC_REPORT) &&
1963 acad_len) {
1964 /* Periodic Advertising Channel Map Indication */
1965 ull_sync_chm_update(rx->hdr.handle, ptr, acad_len);
1966
1967 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1968 struct ll_sync_set *sync_set;
1969 struct pdu_big_info *bi;
1970 uint8_t bi_size;
1971
1972 sync_set = HDR_LLL2ULL(sync_lll);
1973
1974 /* Provide encryption information for BIG sync creation */
1975 bi_size = ptr[PDU_ADV_DATA_HEADER_LEN_OFFSET] -
1976 PDU_ADV_DATA_HEADER_TYPE_SIZE;
1977 sync_set->enc = (bi_size == PDU_BIG_INFO_ENCRYPTED_SIZE);
1978
1979 /* Store number of BISes in the BIG */
1980 bi = (void *)&ptr[PDU_ADV_DATA_HEADER_DATA_OFFSET];
1981 sync_set->num_bis = PDU_BIG_INFO_NUM_BIS_GET(bi);
1982
1983 /* Broadcast ISO synchronize */
1984 if (sync_iso) {
1985 ull_sync_iso_setup(sync_iso, rx, ptr, acad_len);
1986 }
1987 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1988 }
1989
1990 /* Do not ULL schedule auxiliary PDU reception if no aux pointer
1991 * or aux pointer is zero or scannable advertising has erroneous aux
1992 * pointer being present or PHY in the aux pointer is invalid or unsupported
1993 * or if scanning and scan has been stopped
1994 */
1995 if (!aux_ptr || !PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr) || is_scan_req ||
1996 (PDU_ADV_AUX_PTR_PHY_GET(aux_ptr) > EXT_ADV_AUX_PHY_LE_CODED) ||
1997 (!IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
1998 PDU_ADV_AUX_PTR_PHY_GET(aux_ptr) == EXT_ADV_AUX_PHY_LE_CODED)) {
1999
2000 if (is_scan_req) {
2001 LL_ASSERT(chain && chain->rx_last);
2002
2003 chain->rx_last->rx_ftr.extra = rx;
2004 chain->rx_last = rx;
2005
2006 return;
2007 }
2008
2009 goto ull_scan_aux_rx_flush;
2010 }
2011
2012 /* Determine the window size */
2013 if (aux_ptr->offs_units) {
2014 window_size_us = OFFS_UNIT_300_US;
2015 } else {
2016 window_size_us = OFFS_UNIT_30_US;
2017 }
2018
2019 /* Calculate received aux offset we need to have ULL schedule a reception */
2020 aux_offset_us = (uint32_t)PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr) * window_size_us;
2021
2022 /* Skip reception if invalid aux offset */
2023 pdu_us = PDU_AC_US(pdu->len, phy, ftr->phy_flags);
2024 if (unlikely(!AUX_OFFSET_IS_VALID(aux_offset_us, window_size_us, pdu_us))) {
2025 goto ull_scan_aux_rx_flush;
2026 }
2027
2028 /* CA field contains the clock accuracy of the advertiser;
2029 * 0 - 51 ppm to 500 ppm
2030 * 1 - 0 ppm to 50 ppm
2031 */
2032 if (aux_ptr->ca) {
2033 window_widening_us = SCA_DRIFT_50_PPM_US(aux_offset_us);
2034 } else {
2035 window_widening_us = SCA_DRIFT_500_PPM_US(aux_offset_us);
2036 }
2037
2038 phy_aux = BIT(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
2039 ready_delay_us = lll_radio_rx_ready_delay_get(phy_aux, PHY_FLAGS_S8);
2040
2041 /* Calculate the aux offset from start of the scan window */
2042 aux_offset_us += ftr->radio_end_us;
2043 aux_offset_us -= pdu_us;
2044 aux_offset_us -= EVENT_TICKER_RES_MARGIN_US;
2045 aux_offset_us -= EVENT_JITTER_US;
2046 aux_offset_us -= ready_delay_us;
2047 aux_offset_us -= window_widening_us;
2048
2049 ticks_aux_offset = HAL_TICKER_US_TO_TICKS(aux_offset_us);
2050
2051 /* Check if too late to ULL schedule an auxiliary PDU reception */
2052 if (!ftr->aux_lll_sched) {
2053 uint32_t ticks_at_expire;
2054 uint32_t overhead_us;
2055 uint32_t ticks_now;
2056 uint32_t diff;
2057
2058 /* CPU execution overhead to setup the radio for reception plus the
2059 * minimum prepare tick offset. And allow one additional event in
2060 * between as overhead (say, an advertising event in between got closed
2061 * when reception for auxiliary PDU is being setup).
2062 */
2063 overhead_us = (EVENT_OVERHEAD_END_US + EVENT_OVERHEAD_START_US +
2064 HAL_TICKER_TICKS_TO_US(HAL_TICKER_CNTR_CMP_OFFSET_MIN)) << 1;
2065
2066 ticks_now = ticker_ticks_now_get();
2067 ticks_at_expire = ftr->ticks_anchor + ticks_aux_offset -
2068 HAL_TICKER_US_TO_TICKS(overhead_us);
2069 diff = ticker_ticks_diff_get(ticks_now, ticks_at_expire);
2070 if ((diff & BIT(HAL_TICKER_CNTR_MSBIT)) == 0U) {
2071 goto ull_scan_aux_rx_flush;
2072 }
2073 }
2074
2075 if (!chain) {
2076 chain = aux_chain_acquire();
2077 if (!chain) {
2078 /* As LLL scheduling has been used and will fail due to
2079 * non-allocation of a new chain context, a sync report with
2080 * aux_failed flag set will be generated. Let the
2081 * current sync report be set as partial, and the
2082 * sync report corresponding to ull_scan_aux_release
2083 * have the incomplete data status.
2084 */
2085 if (ftr->aux_lll_sched) {
2086 ftr->aux_sched = 1U;
2087 }
2088
2089 goto ull_scan_aux_rx_flush;
2090 }
2091
2092 chain->rx_head = chain->rx_last = NULL;
2093 chain->data_len = data_len;
2094 chain->is_lll_sched = ftr->aux_lll_sched;
2095 lll_aux = &chain->lll;
2096 lll_aux->is_chain_sched = 0U;
2097
2098 lll_hdr_init(lll_aux, &scan_aux_set);
2099
2100 chain->parent = lll ? (void *)lll : (void *)sync_lll;
2101 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2102 if (lll) {
2103 lll_aux->hdr.score = lll->scan_aux_score;
2104 }
2105 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
2106
2107 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2108 /* Store the chain context that has Periodic Advertising
2109 * Synchronization address match.
2110 */
2111 if (sync && (scan->periodic.state == LL_SYNC_STATE_ADDR_MATCH)) {
2112 scan->periodic.param = chain;
2113 }
2114
2115 if (sync_lll) {
2116 struct ll_sync_set *sync_set = HDR_LLL2ULL(sync_lll);
2117
2118 sync_set->rx_incomplete = rx_incomplete;
2119 rx_incomplete = NULL;
2120 }
2121 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2122
2123 /* See if this was already scheduled from LLL. If so, store aux context
2124 * in global scan/sync struct so we can pick it when scanned node is received
2125 * with a valid context.
2126 */
2127 if (ftr->aux_lll_sched) {
2128
2129 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll) {
2130 sync_lll->lll_aux = lll_aux;
2131 } else {
2132 lll->lll_aux = lll_aux;
2133 }
2134
2135 /* Reset auxiliary channel PDU scan state which otherwise is
2136 * done in the prepare_cb when ULL scheduling is used.
2137 */
2138 lll_aux->state = 0U;
2139 }
2140 } else if (chain->data_len >= CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX) {
2141
2142 /* Flush auxiliary PDU receptions and stop any more ULL
2143 * scheduling if accumulated data length exceeds configured
2144 * maximum supported.
2145 */
2146
2147 /* If LLL has already scheduled, then let it proceed.
2148 *
2149 * TODO: LLL to check accumulated data length and
2150 * stop further reception.
2151 * Currently LLL will schedule as long as there
2152 * are free node rx available.
2153 */
2154 if (!ftr->aux_lll_sched) {
2155 goto ull_scan_aux_rx_flush;
2156 }
2157 }
2158
2159 /* In sync context we can dispatch rx immediately, in scan context we
2160 * enqueue rx in aux context and will flush them after scan is complete.
2161 */
2162 if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || scan) {
2163 if (chain->rx_last) {
2164 chain->rx_last->rx_ftr.extra = rx;
2165 } else {
2166 chain->rx_head = rx;
2167 }
2168 chain->rx_last = rx;
2169 }
2170
2171 /* Initialize the channel index and PHY for the Auxiliary PDU reception.
2172 */
2173 lll_aux->chan = aux_ptr->chan_idx;
2174 lll_aux->phy = phy_aux;
2175
2176 if (ftr->aux_lll_sched) {
2177 /* AUX_ADV_IND/AUX_CHAIN_IND PDU reception is being setup */
2178 ftr->aux_sched = 1U;
2179 chain->aux_sched = 1U;
2180
2181 chain->next = scan_aux_set.active_chains;
2182 scan_aux_set.active_chains = chain;
2183
2184 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync_lll) {
2185 /* In sync context, dispatch immediately */
2186 ll_rx_put_sched(link, rx);
2187 }
2188
2189 return;
2190 }
2191
2192 /* Switching to ULL scheduling to receive auxiliary PDUs */
2193 if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || lll) {
2194 LL_ASSERT(scan);
2195
2196 /* Do not ULL schedule if scan disable requested */
2197 if (unlikely(scan->is_stop)) {
2198 goto ull_scan_aux_rx_flush;
2199 }
2200
2201 /* Remove auxiliary context association with scan context so
2202 * that LLL can differentiate it to being ULL scheduling.
2203 */
2204 if (lll->lll_aux == &chain->lll) {
2205 lll->lll_aux = NULL;
2206 }
2207 } else {
2208 struct ll_sync_set *sync_set;
2209
2210 LL_ASSERT(sync_lll &&
2211 (!sync_lll->lll_aux || sync_lll->lll_aux == lll_aux));
2212
2213 /* Do not ULL schedule if sync terminate requested */
2214 sync_set = HDR_LLL2ULL(sync_lll);
2215 if (unlikely(sync_set->is_stop)) {
2216 goto ull_scan_aux_rx_flush;
2217 }
2218
2219 /* Associate the auxiliary context with sync context, we do this
2220 * for ULL scheduling also in constrast to how extended
2221 * advertising only associates when LLL scheduling is used.
2222 * Each Periodic Advertising chain is received by unique sync
2223 * context, hence LLL and ULL scheduling is always associated
2224 * with same unique sync context.
2225 */
2226 sync_lll->lll_aux = lll_aux;
2227
2228 }
2229
2230 lll_aux->window_size_us = window_size_us;
2231 lll_aux->window_size_us += ((EVENT_TICKER_RES_MARGIN_US + EVENT_JITTER_US +
2232 window_widening_us) << 1);
2233
2234 chain->ticker_ticks = (ftr->ticks_anchor + ticks_aux_offset) & HAL_TICKER_CNTR_MASK;
2235
2236 if (!chain_insert_in_sched_list(chain)) {
2237 /* Failed to add chain - flush */
2238 goto ull_scan_aux_rx_flush;
2239 }
2240
2241 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2242 if (sync_lll) {
2243 /* In sync context, dispatch immediately */
2244 rx->rx_ftr.aux_sched = 1U;
2245 chain->aux_sched = 1U;
2246 ll_rx_put_sched(link, rx);
2247 }
2248 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2249
2250 return;
2251
2252 ull_scan_aux_rx_flush:
2253 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2254 if (sync && (scan->periodic.state != LL_SYNC_STATE_CREATED)) {
2255 scan->periodic.state = LL_SYNC_STATE_IDLE;
2256 scan->periodic.param = NULL;
2257 }
2258 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2259
2260 if (chain) {
2261 /* Enqueue last rx in chain context if possible, otherwise send
2262 * immediately since we are in sync context.
2263 */
2264 if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || chain->rx_last) {
2265 LL_ASSERT(scan);
2266
2267 /* rx could already be enqueued before coming here -
2268 * check if rx not the last in the list of received PDUs
2269 * then add it, else do not add it, to avoid duplicate
2270 * report generation, release and probable infinite loop
2271 * processing of the list.
2272 */
2273 if (chain->rx_last != rx) {
2274 chain->rx_last->rx_ftr.extra = rx;
2275 chain->rx_last = rx;
2276 }
2277 } else {
2278 LL_ASSERT(sync_lll);
2279
2280 ll_rx_put_sched(link, rx);
2281 }
2282
2283 LL_ASSERT(chain->parent);
2284
2285 flush_safe(chain);
2286
2287 return;
2288 }
2289
2290 ll_rx_put(link, rx);
2291
2292 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && rx_incomplete) {
2293 rx_release_put(rx_incomplete);
2294 }
2295
2296 ll_rx_sched();
2297 }
2298
ull_scan_aux_done(struct node_rx_event_done * done)2299 void ull_scan_aux_done(struct node_rx_event_done *done)
2300 {
2301 struct ll_scan_aux_chain *chain;
2302
2303 /* Get reference to chain */
2304 chain = CONTAINER_OF(done->extra.lll, struct ll_scan_aux_chain, lll);
2305 LL_ASSERT(scan_aux_chain_is_valid_get(chain));
2306
2307 /* Remove chain from active list */
2308 chain_remove_from_list(&scan_aux_set.active_chains, chain);
2309
2310 flush(chain);
2311 }
2312
lll_scan_aux_chain_is_valid_get(struct lll_scan_aux * lll)2313 struct ll_scan_aux_chain *lll_scan_aux_chain_is_valid_get(struct lll_scan_aux *lll)
2314 {
2315 return scan_aux_chain_is_valid_get(CONTAINER_OF(lll, struct ll_scan_aux_chain, lll));
2316 }
2317
ull_scan_aux_lll_parent_get(struct lll_scan_aux * lll,uint8_t * is_lll_scan)2318 void *ull_scan_aux_lll_parent_get(struct lll_scan_aux *lll,
2319 uint8_t *is_lll_scan)
2320 {
2321 struct ll_scan_aux_chain *chain;
2322
2323 chain = CONTAINER_OF(lll, struct ll_scan_aux_chain, lll);
2324
2325 if (is_lll_scan) {
2326 struct ll_scan_set *scan;
2327 struct lll_scan *lllscan;
2328
2329 lllscan = chain->parent;
2330 LL_ASSERT(lllscan);
2331
2332 scan = HDR_LLL2ULL(lllscan);
2333 *is_lll_scan = !!ull_scan_is_valid_get(scan);
2334 }
2335
2336 return chain->parent;
2337 }
2338
scan_aux_chain_is_valid_get(struct ll_scan_aux_chain * chain)2339 struct ll_scan_aux_chain *scan_aux_chain_is_valid_get(struct ll_scan_aux_chain *chain)
2340 {
2341 if (((uint8_t *)chain < (uint8_t *)ll_scan_aux_pool) ||
2342 ((uint8_t *)chain > ((uint8_t *)ll_scan_aux_pool +
2343 (sizeof(struct ll_scan_aux_chain) *
2344 (CONFIG_BT_CTLR_SCAN_AUX_CHAIN_COUNT - 1))))) {
2345 return NULL;
2346 }
2347
2348 return chain;
2349 }
2350
ull_scan_aux_lll_is_valid_get(struct lll_scan_aux * lll)2351 struct lll_scan_aux *ull_scan_aux_lll_is_valid_get(struct lll_scan_aux *lll)
2352 {
2353 struct ll_scan_aux_chain *chain;
2354
2355 chain = CONTAINER_OF(lll, struct ll_scan_aux_chain, lll);
2356 chain = scan_aux_chain_is_valid_get(chain);
2357 if (chain) {
2358 return &chain->lll;
2359 }
2360
2361 return NULL;
2362 }
2363
ull_scan_aux_release(memq_link_t * link,struct node_rx_pdu * rx)2364 void ull_scan_aux_release(memq_link_t *link, struct node_rx_pdu *rx)
2365 {
2366 struct lll_scan_aux *lll_aux;
2367 void *param_ull;
2368
2369 param_ull = HDR_LLL2ULL(rx->rx_ftr.param);
2370
2371 /* Mark for buffer for release */
2372 rx->hdr.type = NODE_RX_TYPE_RELEASE;
2373
2374 if (ull_scan_is_valid_get(param_ull)) {
2375 struct lll_scan *lll;
2376
2377 lll = rx->rx_ftr.param;
2378 lll_aux = lll->lll_aux;
2379
2380 } else if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) ||
2381 param_ull == &scan_aux_set) {
2382
2383 lll_aux = rx->rx_ftr.param;
2384
2385 } else if (ull_sync_is_valid_get(param_ull)) {
2386 struct lll_sync *lll;
2387
2388 lll = rx->rx_ftr.param;
2389 lll_aux = lll->lll_aux;
2390
2391 if (!lll_aux) {
2392 struct ll_sync_set *sync = param_ull;
2393
2394 /* Change node type so HCI can dispatch report for truncated
2395 * data properly.
2396 */
2397 rx->hdr.type = NODE_RX_TYPE_SYNC_REPORT;
2398 rx->hdr.handle = ull_sync_handle_get(sync);
2399
2400 /* Dequeue will try releasing list of node rx, set the extra
2401 * pointer to NULL.
2402 */
2403 rx->rx_ftr.extra = NULL;
2404 }
2405 } else {
2406 LL_ASSERT(0);
2407 lll_aux = NULL;
2408 }
2409
2410 if (lll_aux) {
2411 struct ll_scan_aux_chain *chain;
2412 struct ll_scan_set *scan;
2413 struct lll_scan *lll;
2414 uint8_t is_stop;
2415
2416 chain = CONTAINER_OF(lll_aux, struct ll_scan_aux_chain, lll);
2417 lll = chain->parent;
2418 LL_ASSERT(lll);
2419
2420 scan = HDR_LLL2ULL(lll);
2421 scan = ull_scan_is_valid_get(scan);
2422 if (scan) {
2423 is_stop = scan->is_stop;
2424 } else {
2425 struct lll_sync *sync_lll;
2426 struct ll_sync_set *sync;
2427
2428 sync_lll = (void *)lll;
2429 sync = HDR_LLL2ULL(sync_lll);
2430 is_stop = sync->is_stop;
2431 }
2432
2433 if (!is_stop) {
2434 LL_ASSERT(chain->parent);
2435
2436 /* Remove chain from active list and flush */
2437 chain_remove_from_list(&scan_aux_set.active_chains, chain);
2438 flush(chain);
2439 }
2440 }
2441
2442 ll_rx_put(link, rx);
2443 ll_rx_sched();
2444 }
2445
scan_aux_stop_all_chains_for_parent(void * parent)2446 static void scan_aux_stop_all_chains_for_parent(void *parent)
2447 {
2448 static memq_link_t link;
2449 static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
2450 struct ll_scan_aux_chain *curr = scan_aux_set.sched_chains;
2451 struct ll_scan_aux_chain *prev = NULL;
2452 bool ticker_stopped = false;
2453 bool disabling = false;
2454
2455 if (curr && curr->parent == parent) {
2456 uint8_t ticker_status;
2457
2458 /* Scheduled head is about to be removed - stop running ticker */
2459 ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
2460 TICKER_ID_SCAN_AUX, NULL, NULL);
2461 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2462 (ticker_status == TICKER_STATUS_BUSY));
2463 ticker_stopped = true;
2464 }
2465
2466 while (curr) {
2467 if (curr->parent == parent) {
2468 if (curr == scan_aux_set.sched_chains) {
2469 scan_aux_set.sched_chains = curr->next;
2470 flush(curr);
2471 curr = scan_aux_set.sched_chains;
2472 } else {
2473 prev->next = curr->next;
2474 flush(curr);
2475 curr = prev->next;
2476 }
2477 } else {
2478 prev = curr;
2479 curr = curr->next;
2480 }
2481 }
2482
2483 if (ticker_stopped && scan_aux_set.sched_chains) {
2484 /* Start ticker using new head */
2485 chain_start_ticker(scan_aux_set.sched_chains, false);
2486 }
2487
2488 /* Check active chains */
2489 prev = NULL;
2490 curr = scan_aux_set.active_chains;
2491 while (curr) {
2492 if (curr->parent == parent) {
2493 struct ll_scan_aux_chain *chain = curr;
2494 uint32_t ret;
2495
2496 if (curr == scan_aux_set.active_chains) {
2497 scan_aux_set.active_chains = curr->next;
2498 curr = scan_aux_set.active_chains;
2499 } else {
2500 prev->next = curr->next;
2501 curr = prev->next;
2502 }
2503
2504 if (chain->is_lll_sched || ull_ref_get(&scan_aux_set.ull) == 0) {
2505 /* Disable called by parent disable or race with scan stop */
2506 flush(chain);
2507 } else {
2508 /* Flush on disabled callback */
2509 chain->next = scan_aux_set.flushing_chains;
2510 scan_aux_set.flushing_chains = chain;
2511 scan_aux_set.ull.disabled_cb = done_disabled_cb;
2512
2513 /* Call lll_disable */
2514 disabling = true;
2515 mfy.param = &curr->lll;
2516 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0,
2517 &mfy);
2518 LL_ASSERT(!ret);
2519 }
2520 } else {
2521 prev = curr;
2522 curr = curr->next;
2523 }
2524 }
2525
2526 if (!disabling) {
2527 /* Signal completion */
2528 k_sem_give(&sem_scan_aux_stop);
2529 }
2530 }
2531
2532 /* Stops all chains with the given parent */
ull_scan_aux_stop(void * parent)2533 int ull_scan_aux_stop(void *parent)
2534 {
2535 static memq_link_t link;
2536 static struct mayfly mfy = {0, 0, &link, NULL, scan_aux_stop_all_chains_for_parent};
2537 uint32_t ret;
2538
2539 /* Stop chains in ULL execution context */
2540 mfy.param = parent;
2541 ret = mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_ULL_HIGH, 0, &mfy);
2542 LL_ASSERT(!ret);
2543
2544 /* Wait for chains to be stopped before returning */
2545 (void)k_sem_take(&sem_scan_aux_stop, K_FOREVER);
2546
2547 return 0;
2548 }
2549
init_reset(void)2550 static int init_reset(void)
2551 {
2552 ull_hdr_init(&scan_aux_set.ull);
2553 scan_aux_set.sched_chains = NULL;
2554 scan_aux_set.active_chains = NULL;
2555
2556 /* Initialize scan aux chains pool */
2557 mem_init(ll_scan_aux_pool, sizeof(struct ll_scan_aux_chain),
2558 sizeof(ll_scan_aux_pool) / sizeof(struct ll_scan_aux_chain),
2559 &scan_aux_free);
2560
2561 return 0;
2562 }
2563
aux_chain_acquire(void)2564 static inline struct ll_scan_aux_chain *aux_chain_acquire(void)
2565 {
2566 return mem_acquire(&scan_aux_free);
2567 }
2568
aux_chain_release(struct ll_scan_aux_chain * chain)2569 static inline void aux_chain_release(struct ll_scan_aux_chain *chain)
2570 {
2571 /* Clear the parent so that when scan is being disabled then this
2572 * auxiliary context shall not associate itself from being disable.
2573 */
2574 LL_ASSERT(chain->parent);
2575 chain->parent = NULL;
2576
2577 mem_release(chain, &scan_aux_free);
2578 }
2579
done_disabled_cb(void * param)2580 static void done_disabled_cb(void *param)
2581 {
2582 ARG_UNUSED(param);
2583
2584 while (scan_aux_set.flushing_chains) {
2585 flush(scan_aux_set.flushing_chains);
2586 }
2587
2588 scan_aux_set.ull.disabled_cb = NULL;
2589
2590 /* Release semaphore if it is locked */
2591 if (k_sem_count_get(&sem_scan_aux_stop) == 0) {
2592 k_sem_give(&sem_scan_aux_stop);
2593 }
2594 }
2595
flush_safe(void * param)2596 static void flush_safe(void *param)
2597 {
2598 struct ll_scan_aux_chain *chain;
2599
2600 chain = param;
2601 LL_ASSERT(chain->parent);
2602
2603 if (chain_is_in_list(scan_aux_set.flushing_chains, chain)) {
2604 /* Chain already marked for flushing */
2605 return;
2606 }
2607
2608 /* If chain is active we need to flush from disabled callback */
2609 if (chain_is_in_list(scan_aux_set.active_chains, chain) &&
2610 ull_ref_get(&scan_aux_set.ull)) {
2611
2612 chain->next = scan_aux_set.flushing_chains;
2613 scan_aux_set.flushing_chains = chain;
2614 scan_aux_set.ull.disabled_cb = done_disabled_cb;
2615 } else {
2616 flush(chain);
2617 }
2618 }
2619
flush(struct ll_scan_aux_chain * chain)2620 static void flush(struct ll_scan_aux_chain *chain)
2621 {
2622 struct ll_scan_set *scan;
2623 struct node_rx_pdu *rx;
2624 struct lll_scan *lll;
2625 bool sched = false;
2626
2627 /* Debug check that parent was assigned when allocated for reception of
2628 * auxiliary channel PDUs.
2629 */
2630 LL_ASSERT(chain->parent);
2631
2632 /* Chain is being flushed now - remove from flushing_chains if present */
2633 chain_remove_from_list(&scan_aux_set.flushing_chains, chain);
2634
2635 lll = chain->parent;
2636 scan = HDR_LLL2ULL(lll);
2637 scan = ull_scan_is_valid_get(scan);
2638
2639 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2640 if (!scan && chain->aux_sched) {
2641 /* Send incomplete sync message */
2642 aux_sync_incomplete(chain);
2643 }
2644 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2645
2646 rx = chain->rx_head;
2647 if (rx) {
2648 chain->rx_head = NULL;
2649
2650 ll_rx_put(rx->hdr.link, rx);
2651 sched = true;
2652 }
2653
2654 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2655 if (!scan) {
2656 struct ll_sync_set *sync = HDR_LLL2ULL(lll);
2657
2658 rx = sync->rx_incomplete;
2659 if (rx) {
2660 sync->rx_incomplete = NULL;
2661
2662 rx_release_put(rx);
2663 sched = true;
2664 }
2665 }
2666 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2667
2668 if (sched) {
2669 ll_rx_sched();
2670 }
2671
2672 if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || scan) {
2673 if (lll->lll_aux == &chain->lll) {
2674 lll->lll_aux = NULL;
2675 }
2676 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
2677 lll->scan_aux_score = chain->lll.hdr.score;
2678 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
2679 } else {
2680 struct lll_sync *sync_lll;
2681 struct ll_sync_set *sync;
2682
2683 sync_lll = chain->parent;
2684 sync = HDR_LLL2ULL(sync_lll);
2685
2686 LL_ASSERT(sync->is_stop || sync_lll->lll_aux);
2687 sync_lll->lll_aux = NULL;
2688 }
2689
2690 aux_chain_release(chain);
2691 }
2692
2693 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
aux_sync_incomplete(struct ll_scan_aux_chain * chain)2694 static void aux_sync_incomplete(struct ll_scan_aux_chain *chain)
2695 {
2696 struct ll_sync_set *sync;
2697 struct node_rx_pdu *rx;
2698 struct lll_sync *lll;
2699
2700 LL_ASSERT(chain->parent);
2701
2702 /* get reference to sync context */
2703 lll = chain->parent;
2704 LL_ASSERT(lll);
2705 sync = HDR_LLL2ULL(lll);
2706
2707 /* pick extra node rx stored in sync context */
2708 rx = sync->rx_incomplete;
2709 LL_ASSERT(rx);
2710 sync->rx_incomplete = NULL;
2711
2712 /* prepare sync report with failure */
2713 rx->hdr.type = NODE_RX_TYPE_SYNC_REPORT;
2714 rx->hdr.handle = ull_sync_handle_get(sync);
2715 rx->rx_ftr.param = lll;
2716
2717 /* flag chain reception failure */
2718 rx->rx_ftr.aux_failed = 1U;
2719
2720 /* Dequeue will try releasing list of node rx,
2721 * set the extra pointer to NULL.
2722 */
2723 rx->rx_ftr.extra = NULL;
2724
2725 /* add to rx list, will be flushed */
2726 chain->rx_head = rx;
2727 }
2728 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2729
chain_start_ticker(struct ll_scan_aux_chain * chain,bool replace)2730 static void chain_start_ticker(struct ll_scan_aux_chain *chain, bool replace)
2731 {
2732 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2733 uint8_t ticker_yield_handle = TICKER_NULL;
2734 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2735 uint32_t ticks_slot_overhead;
2736 uint32_t ticks_slot_offset;
2737 uint32_t ready_delay_us;
2738 uint8_t ticker_status;
2739
2740 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2741 if (ull_scan_is_valid_get(HDR_LLL2ULL(chain->parent))) {
2742 if (chain->rx_head == chain->rx_last) {
2743 struct ll_scan_set *scan = HDR_LLL2ULL(chain->parent);
2744
2745 ticker_yield_handle = TICKER_ID_SCAN_BASE +
2746 ull_scan_handle_get(scan);
2747 } else {
2748 ticker_yield_handle = TICKER_ID_SCAN_AUX;
2749 }
2750 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
2751 } else {
2752 /* Periodic sync context */
2753 struct ll_sync_set *ull_sync = HDR_LLL2ULL(chain->parent);
2754
2755 ticker_yield_handle = TICKER_ID_SCAN_SYNC_BASE +
2756 ull_sync_handle_get(ull_sync);
2757 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
2758 }
2759 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2760
2761 ready_delay_us = lll_radio_rx_ready_delay_get(chain->lll.phy, PHY_FLAGS_S8);
2762
2763 /* TODO: active_to_start feature port */
2764 scan_aux_set.ull.ticks_active_to_start = 0;
2765 scan_aux_set.ull.ticks_prepare_to_start =
2766 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
2767 scan_aux_set.ull.ticks_preempt_to_start =
2768 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
2769 scan_aux_set.ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(
2770 EVENT_OVERHEAD_START_US + ready_delay_us +
2771 PDU_AC_MAX_US(PDU_AC_EXT_PAYLOAD_RX_SIZE, chain->lll.phy) +
2772 EVENT_OVERHEAD_END_US);
2773
2774 ticks_slot_offset = MAX(scan_aux_set.ull.ticks_active_to_start,
2775 scan_aux_set.ull.ticks_prepare_to_start);
2776 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
2777 ticks_slot_overhead = ticks_slot_offset;
2778 } else {
2779 ticks_slot_overhead = 0U;
2780 }
2781 ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
2782
2783 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
2784 /* disable ticker job, in order to chain yield and start to reduce
2785 * CPU use by reducing successive calls to ticker_job().
2786 */
2787 mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 0);
2788 #endif
2789
2790 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2791 /* Yield the primary scan window or auxiliary or periodic sync event
2792 * in ticker.
2793 */
2794 if (ticker_yield_handle != TICKER_NULL) {
2795 ticker_status = ticker_yield_abs(TICKER_INSTANCE_ID_CTLR,
2796 TICKER_USER_ID_ULL_HIGH,
2797 ticker_yield_handle,
2798 (chain->ticker_ticks -
2799 ticks_slot_offset),
2800 NULL, NULL);
2801 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2802 (ticker_status == TICKER_STATUS_BUSY));
2803 }
2804 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2805
2806 if (replace) {
2807 ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
2808 TICKER_ID_SCAN_AUX, NULL, NULL);
2809 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2810 (ticker_status == TICKER_STATUS_BUSY));
2811 }
2812
2813 ticker_status = ticker_start(TICKER_INSTANCE_ID_CTLR,
2814 TICKER_USER_ID_ULL_HIGH,
2815 TICKER_ID_SCAN_AUX,
2816 chain->ticker_ticks - ticks_slot_offset,
2817 0,
2818 TICKER_NULL_PERIOD,
2819 TICKER_NULL_REMAINDER,
2820 TICKER_NULL_LAZY,
2821 (scan_aux_set.ull.ticks_slot +
2822 ticks_slot_overhead),
2823 ticker_cb, chain, ticker_op_cb, chain);
2824 #if defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2825 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2826 (ticker_status == TICKER_STATUS_BUSY));
2827 #else
2828 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2829 (ticker_status == TICKER_STATUS_BUSY) ||
2830 ((ticker_status == TICKER_STATUS_FAILURE) &&
2831 IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT)));
2832 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2833
2834 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
2835 /* enable ticker job, queued ticker operation will be handled
2836 * thereafter.
2837 */
2838 mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1);
2839 #endif
2840 }
2841
ticker_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)2842 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
2843 uint32_t remainder, uint16_t lazy, uint8_t force,
2844 void *param)
2845 {
2846 static memq_link_t link;
2847 static struct mayfly mfy = {0, 0, &link, NULL, lll_scan_aux_prepare};
2848 struct ll_scan_aux_chain *chain = param;
2849 static struct lll_prepare_param p;
2850 uint32_t ret;
2851 uint8_t ref;
2852
2853 DEBUG_RADIO_PREPARE_O(1);
2854
2855 /* Increment prepare reference count */
2856 ref = ull_ref_inc(&scan_aux_set.ull);
2857 LL_ASSERT(ref);
2858
2859 /* The chain should always be the first in the sched_chains list */
2860 LL_ASSERT(scan_aux_set.sched_chains == chain);
2861
2862 /* Move chain to active list */
2863 chain_remove_from_list(&scan_aux_set.sched_chains, chain);
2864 chain_append_to_list(&scan_aux_set.active_chains, chain);
2865
2866 /* Append timing parameters */
2867 p.ticks_at_expire = ticks_at_expire;
2868 p.remainder = 0; /* FIXME: remainder; */
2869 p.lazy = lazy;
2870 p.force = force;
2871 p.param = &chain->lll;
2872 mfy.param = &p;
2873
2874 /* Kick LLL prepare */
2875 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
2876 0, &mfy);
2877 LL_ASSERT(!ret);
2878
2879 if (scan_aux_set.sched_chains) {
2880 /* Start ticker for next chain */
2881 chain_start_ticker(scan_aux_set.sched_chains, false);
2882 }
2883
2884 DEBUG_RADIO_PREPARE_O(1);
2885 }
2886
2887 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
ticker_start_failed(void * param)2888 static void ticker_start_failed(void *param)
2889 {
2890 struct ll_scan_aux_chain *chain;
2891
2892 /* Ticker start failed, so remove this chain from scheduled chains */
2893 chain = param;
2894 chain_remove_from_list(&scan_aux_set.sched_chains, chain);
2895
2896 flush(chain);
2897
2898 if (scan_aux_set.sched_chains) {
2899 chain_start_ticker(scan_aux_set.sched_chains, false);
2900 }
2901 }
2902 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2903
ticker_op_cb(uint32_t status,void * param)2904 static void ticker_op_cb(uint32_t status, void *param)
2905 {
2906 #if defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2907 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
2908 #else /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2909 static memq_link_t link;
2910 static struct mayfly mfy = {0, 0, &link, NULL, ticker_start_failed};
2911 uint32_t ret;
2912
2913 if (status == TICKER_STATUS_SUCCESS) {
2914 return;
2915 }
2916
2917 mfy.param = param;
2918
2919 ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW, TICKER_USER_ID_ULL_HIGH,
2920 1, &mfy);
2921 LL_ASSERT(!ret);
2922 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2923 }
2924
chain_ticker_ticks_diff(uint32_t ticks_a,uint32_t ticks_b)2925 static int32_t chain_ticker_ticks_diff(uint32_t ticks_a, uint32_t ticks_b)
2926 {
2927 if ((ticks_a - ticks_b) & BIT(HAL_TICKER_CNTR_MSBIT)) {
2928 return -ticker_ticks_diff_get(ticks_b, ticks_a);
2929 } else {
2930 return ticker_ticks_diff_get(ticks_a, ticks_b);
2931 }
2932 }
2933
2934 /* Sorted insertion into sched list, starting/replacing the ticker when needed
2935 * Returns:
2936 * - false for no insertion (conflict with existing entry)
2937 * - true for inserted
2938 */
chain_insert_in_sched_list(struct ll_scan_aux_chain * chain)2939 static bool chain_insert_in_sched_list(struct ll_scan_aux_chain *chain)
2940 {
2941 struct ll_scan_aux_chain *curr = scan_aux_set.sched_chains;
2942 struct ll_scan_aux_chain *prev = NULL;
2943 uint32_t ticks_min_delta;
2944
2945 if (!scan_aux_set.sched_chains) {
2946 chain->next = NULL;
2947 scan_aux_set.sched_chains = chain;
2948 chain_start_ticker(chain, false);
2949 return true;
2950 }
2951
2952 /* Find insertion point */
2953 while (curr && chain_ticker_ticks_diff(chain->ticker_ticks, curr->ticker_ticks) > 0) {
2954 prev = curr;
2955 curr = curr->next;
2956 }
2957
2958 /* Check for conflict with existing entry */
2959 ticks_min_delta = HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_END_US + EVENT_OVERHEAD_START_US);
2960 if ((prev &&
2961 ticker_ticks_diff_get(chain->ticker_ticks, prev->ticker_ticks) < ticks_min_delta) ||
2962 (curr &&
2963 ticker_ticks_diff_get(curr->ticker_ticks, chain->ticker_ticks) < ticks_min_delta)) {
2964 return false;
2965 }
2966
2967 if (prev) {
2968 chain->next = prev->next;
2969 prev->next = chain;
2970 } else {
2971 chain->next = scan_aux_set.sched_chains;
2972 scan_aux_set.sched_chains = chain;
2973 chain_start_ticker(chain, true);
2974 }
2975
2976 return true;
2977 }
2978
chain_remove_from_list(struct ll_scan_aux_chain ** head,struct ll_scan_aux_chain * chain)2979 static void chain_remove_from_list(struct ll_scan_aux_chain **head, struct ll_scan_aux_chain *chain)
2980 {
2981 struct ll_scan_aux_chain *curr = *head;
2982 struct ll_scan_aux_chain *prev = NULL;
2983
2984 while (curr && curr != chain) {
2985 prev = curr;
2986 curr = curr->next;
2987 }
2988
2989 if (curr) {
2990 if (prev) {
2991 prev->next = curr->next;
2992 } else {
2993 *head = curr->next;
2994 }
2995 }
2996
2997 chain->next = NULL;
2998 }
2999
chain_append_to_list(struct ll_scan_aux_chain ** head,struct ll_scan_aux_chain * chain)3000 static void chain_append_to_list(struct ll_scan_aux_chain **head, struct ll_scan_aux_chain *chain)
3001 {
3002 struct ll_scan_aux_chain *prev = *head;
3003
3004 if (!*head) {
3005 chain->next = NULL;
3006 *head = chain;
3007 return;
3008 }
3009
3010 while (prev->next) {
3011 prev = prev->next;
3012 }
3013
3014 prev->next = chain;
3015 }
3016
chain_is_in_list(struct ll_scan_aux_chain * head,struct ll_scan_aux_chain * chain)3017 static bool chain_is_in_list(struct ll_scan_aux_chain *head, struct ll_scan_aux_chain *chain)
3018 {
3019 while (head) {
3020 if (head == chain) {
3021 return true;
3022 }
3023 head = head->next;
3024 }
3025 return false;
3026 }
3027 #endif /* CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
3028