1 /*
2 * Copyright (c) 2020 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <stdint.h>
7
8 #include <zephyr/toolchain.h>
9 #include <zephyr/sys/util.h>
10 #include <zephyr/sys/byteorder.h>
11
12 #include "hal/ccm.h"
13 #include "hal/radio.h"
14 #include "hal/ticker.h"
15 #include "hal/radio_df.h"
16
17 #include "util/util.h"
18 #include "util/memq.h"
19 #include "util/dbuf.h"
20
21 #include "pdu_df.h"
22 #include "pdu_vendor.h"
23 #include "pdu.h"
24
25 #include "lll.h"
26 #include "lll_vendor.h"
27 #include "lll_clock.h"
28 #include "lll_chan.h"
29 #include "lll_df_types.h"
30 #include "lll_scan.h"
31 #include "lll_sync.h"
32
33 #include "lll_internal.h"
34 #include "lll_tim_internal.h"
35 #include "lll_prof_internal.h"
36 #include "lll_scan_internal.h"
37
38 #include "lll_df.h"
39 #include "lll_df_internal.h"
40
41 #include "ll_feat.h"
42
43 #include <zephyr/bluetooth/hci_types.h>
44
45 #include <soc.h>
46 #include "hal/debug.h"
47
48 static int init_reset(void);
49 static void prepare(void *param);
50 static int create_prepare_cb(struct lll_prepare_param *p);
51 static int prepare_cb(struct lll_prepare_param *p);
52 static int prepare_cb_common(struct lll_prepare_param *p, uint8_t chan_idx);
53 static int is_abort_cb(void *next, void *curr, lll_prepare_cb_t *resume_cb);
54 static void abort_cb(struct lll_prepare_param *prepare_param, void *param);
55 static int isr_rx(struct lll_sync *lll, uint8_t node_type, uint8_t crc_ok,
56 uint8_t phy_flags_rx, uint8_t cte_ready, uint8_t rssi_ready,
57 enum sync_status status);
58 static void isr_rx_adv_sync_estab(void *param);
59 static void isr_rx_adv_sync(void *param);
60 static void isr_rx_aux_chain(void *param);
61 static void isr_rx_done_cleanup(struct lll_sync *lll, uint8_t crc_ok, bool sync_term);
62 static void isr_done(void *param);
63 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
64 static int iq_report_create_put(struct lll_sync *lll, uint8_t rssi_ready,
65 uint8_t packet_status);
66 static int iq_report_incomplete_create_put(struct lll_sync *lll);
67 static void iq_report_incomplete_release_put(struct lll_sync *lll);
68 static bool is_max_cte_reached(uint8_t max_cte_count, uint8_t cte_count);
69 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
70 static uint8_t data_channel_calc(struct lll_sync *lll);
71 static enum sync_status sync_filtrate_by_cte_type(uint8_t cte_type_mask, uint8_t filter_policy);
72
73 static uint8_t trx_cnt;
74
lll_sync_init(void)75 int lll_sync_init(void)
76 {
77 int err;
78
79 err = init_reset();
80 if (err) {
81 return err;
82 }
83
84 return 0;
85 }
86
lll_sync_reset(void)87 int lll_sync_reset(void)
88 {
89 int err;
90
91 err = init_reset();
92 if (err) {
93 return err;
94 }
95
96 return 0;
97 }
98
lll_sync_create_prepare(void * param)99 void lll_sync_create_prepare(void *param)
100 {
101 int err;
102
103 prepare(param);
104
105 /* Invoke common pipeline handling of prepare */
106 err = lll_prepare(is_abort_cb, abort_cb, create_prepare_cb, 0, param);
107 LL_ASSERT(!err || err == -EINPROGRESS);
108 }
109
lll_sync_prepare(void * param)110 void lll_sync_prepare(void *param)
111 {
112 int err;
113
114 prepare(param);
115
116 /* Invoke common pipeline handling of prepare */
117 err = lll_prepare(is_abort_cb, abort_cb, prepare_cb, 0, param);
118 LL_ASSERT(!err || err == -EINPROGRESS);
119 }
120
prepare(void * param)121 static void prepare(void *param)
122 {
123 struct lll_prepare_param *p;
124 struct lll_sync *lll;
125 int err;
126
127 /* Request to start HF Clock */
128 err = lll_hfclock_on();
129 LL_ASSERT(err >= 0);
130
131 p = param;
132
133 lll = p->param;
134
135 lll->lazy_prepare = p->lazy;
136
137 /* Accumulate window widening */
138 lll->window_widening_prepare_us += lll->window_widening_periodic_us *
139 (lll->lazy_prepare + 1U);
140 if (lll->window_widening_prepare_us > lll->window_widening_max_us) {
141 lll->window_widening_prepare_us = lll->window_widening_max_us;
142 }
143 }
144
lll_sync_aux_prepare_cb(struct lll_sync * lll,struct lll_scan_aux * lll_aux)145 void lll_sync_aux_prepare_cb(struct lll_sync *lll,
146 struct lll_scan_aux *lll_aux)
147 {
148 struct node_rx_pdu *node_rx;
149
150 /* Initialize Trx count */
151 trx_cnt = 0U;
152
153 /* Start setting up Radio h/w */
154 radio_reset();
155
156 radio_phy_set(lll_aux->phy, PHY_FLAGS_S8);
157 radio_pkt_configure(RADIO_PKT_CONF_LENGTH_8BIT, LL_EXT_OCTETS_RX_MAX,
158 RADIO_PKT_CONF_PHY(lll_aux->phy));
159
160 node_rx = ull_pdu_rx_alloc_peek(1);
161 LL_ASSERT(node_rx);
162
163 radio_pkt_rx_set(node_rx->pdu);
164
165 /* Set access address for sync */
166 radio_aa_set(lll->access_addr);
167 radio_crc_configure(PDU_CRC_POLYNOMIAL,
168 sys_get_le24(lll->crc_init));
169
170 lll_chan_set(lll_aux->chan);
171
172 radio_isr_set(isr_rx_aux_chain, lll);
173
174 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
175 struct lll_df_sync_cfg *cfg;
176
177 cfg = lll_df_sync_cfg_latest_get(&lll->df_cfg, NULL);
178
179 if (cfg->is_enabled) {
180 int err;
181
182 /* Prepare additional node for reporting insufficient memory for IQ samples
183 * reports.
184 */
185 err = lll_df_iq_report_no_resources_prepare(lll);
186 if (!err) {
187 err = lll_df_conf_cte_rx_enable(cfg->slot_durations, cfg->ant_sw_len,
188 cfg->ant_ids, lll_aux->chan,
189 CTE_INFO_IN_PAYLOAD, lll_aux->phy);
190 if (err) {
191 lll->is_cte_incomplete = true;
192 }
193 } else {
194 lll->is_cte_incomplete = true;
195 }
196 cfg->cte_count = 0;
197 } else {
198 /* If CTE reception is disabled, release additional node allocated to report
199 * insufficient memory for IQ samples.
200 */
201 iq_report_incomplete_release_put(lll);
202 }
203 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
204 radio_switch_complete_and_disable();
205 }
206
207 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
lll_sync_cte_is_allowed(uint8_t cte_type_mask,uint8_t filter_policy,uint8_t rx_cte_time,uint8_t rx_cte_type)208 enum sync_status lll_sync_cte_is_allowed(uint8_t cte_type_mask, uint8_t filter_policy,
209 uint8_t rx_cte_time, uint8_t rx_cte_type)
210 {
211 bool cte_ok;
212
213 if (cte_type_mask == BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_FILTERING) {
214 return SYNC_STAT_ALLOWED;
215 }
216
217 if (rx_cte_time > 0) {
218 if ((cte_type_mask & BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_CTE) != 0) {
219 cte_ok = false;
220 } else {
221 switch (rx_cte_type) {
222 case BT_HCI_LE_AOA_CTE:
223 cte_ok = !(cte_type_mask &
224 BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_AOA);
225 break;
226 case BT_HCI_LE_AOD_CTE_1US:
227 cte_ok = !(cte_type_mask &
228 BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_AOD_1US);
229 break;
230 case BT_HCI_LE_AOD_CTE_2US:
231 cte_ok = !(cte_type_mask &
232 BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_AOD_2US);
233 break;
234 default:
235 /* Unknown or forbidden CTE type */
236 cte_ok = false;
237 }
238 }
239 } else {
240 /* If there is no CTEInfo in advertising PDU, Radio will not parse the S0 byte and
241 * CTESTATUS register will hold zeros only.
242 * Zero value in CTETime field of CTESTATUS may be used to distinguish between PDU
243 * that includes CTEInfo or not. Allowed range for CTETime is 2-20.
244 */
245 if ((cte_type_mask & BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_ONLY_CTE) != 0) {
246 cte_ok = false;
247 } else {
248 cte_ok = true;
249 }
250 }
251
252 if (!cte_ok) {
253 return filter_policy ? SYNC_STAT_CONT_SCAN : SYNC_STAT_TERM;
254 }
255
256 return SYNC_STAT_ALLOWED;
257 }
258 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
259
init_reset(void)260 static int init_reset(void)
261 {
262 return 0;
263 }
264
create_prepare_cb(struct lll_prepare_param * p)265 static int create_prepare_cb(struct lll_prepare_param *p)
266 {
267 uint16_t event_counter;
268 struct lll_sync *lll;
269 uint8_t chan_idx;
270 int err;
271
272 DEBUG_RADIO_START_O(1);
273
274 lll = p->param;
275
276 /* Calculate the current event latency */
277 lll->skip_event = lll->skip_prepare + lll->lazy_prepare;
278
279 /* Calculate the current event counter value */
280 event_counter = lll->event_counter + lll->skip_event;
281
282 /* Reset accumulated latencies */
283 lll->skip_prepare = 0U;
284
285 chan_idx = data_channel_calc(lll);
286
287 /* Update event counter to next value */
288 lll->event_counter = (event_counter + 1U);
289
290 err = prepare_cb_common(p, chan_idx);
291 if (err) {
292 DEBUG_RADIO_START_O(1);
293
294 return 0;
295 }
296
297 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
298 struct lll_df_sync_cfg *cfg;
299
300 cfg = lll_df_sync_cfg_latest_get(&lll->df_cfg, NULL);
301 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
302
303 if (false) {
304 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
305 } else if (cfg->is_enabled) {
306 /* In case of call in create_prepare_cb, new sync event starts hence discard
307 * previous incomplete state.
308 */
309 lll->is_cte_incomplete = false;
310
311 /* Prepare additional node for reporting insufficient IQ report nodes issue */
312 err = lll_df_iq_report_no_resources_prepare(lll);
313 if (!err) {
314 err = lll_df_conf_cte_rx_enable(cfg->slot_durations, cfg->ant_sw_len,
315 cfg->ant_ids, chan_idx,
316 CTE_INFO_IN_PAYLOAD, lll->phy);
317 if (err) {
318 lll->is_cte_incomplete = true;
319 }
320 } else {
321 lll->is_cte_incomplete = true;
322 }
323
324 cfg->cte_count = 0;
325 } else {
326 /* If CTE reception is disabled, release additional node allocated to report
327 * insufficient memory for IQ samples.
328 */
329 iq_report_incomplete_release_put(lll);
330 #else
331 } else {
332 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
333 if (IS_ENABLED(CONFIG_BT_CTLR_DF)) {
334 /* Disable CTE reception and sampling in Radio */
335 radio_df_cte_inline_set_enabled(false);
336 }
337 }
338
339 radio_switch_complete_and_disable();
340
341 /* RSSI enable must be called after radio_switch_XXX function because it clears
342 * RADIO->SHORTS register, thus disables all other shortcuts.
343 */
344 radio_rssi_measure();
345
346 radio_isr_set(isr_rx_adv_sync_estab, lll);
347
348 DEBUG_RADIO_START_O(1);
349
350 return 0;
351 }
352
prepare_cb(struct lll_prepare_param * p)353 static int prepare_cb(struct lll_prepare_param *p)
354 {
355 uint16_t event_counter;
356 struct lll_sync *lll;
357 uint8_t chan_idx;
358 int err;
359
360 DEBUG_RADIO_START_O(1);
361
362 lll = p->param;
363
364 /* Calculate the current event latency */
365 lll->skip_event = lll->skip_prepare + lll->lazy_prepare;
366
367 /* Calculate the current event counter value */
368 event_counter = lll->event_counter + lll->skip_event;
369
370 /* Reset accumulated latencies */
371 lll->skip_prepare = 0U;
372
373 chan_idx = data_channel_calc(lll);
374
375 /* Update event counter to next value */
376 lll->event_counter = (event_counter + 1U);
377
378 err = prepare_cb_common(p, chan_idx);
379 if (err) {
380 DEBUG_RADIO_START_O(1);
381
382 return 0;
383 }
384
385 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
386 struct lll_df_sync_cfg *cfg;
387
388 cfg = lll_df_sync_cfg_latest_get(&lll->df_cfg, NULL);
389
390 if (cfg->is_enabled) {
391 /* In case of call in prepare, new sync event starts hence discard previous
392 * incomplete state.
393 */
394 lll->is_cte_incomplete = false;
395
396 /* Prepare additional node for reporting insufficient IQ report nodes issue */
397 err = lll_df_iq_report_no_resources_prepare(lll);
398 if (!err) {
399 err = lll_df_conf_cte_rx_enable(cfg->slot_durations, cfg->ant_sw_len,
400 cfg->ant_ids, chan_idx,
401 CTE_INFO_IN_PAYLOAD, lll->phy);
402 if (err) {
403 lll->is_cte_incomplete = true;
404 }
405 } else {
406 lll->is_cte_incomplete = true;
407 }
408 cfg->cte_count = 0;
409 } else {
410 /* If CTE reception is disabled, release additional node allocated to report
411 * insufficient memory for IQ samples.
412 */
413 iq_report_incomplete_release_put(lll);
414 }
415 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
416
417 radio_switch_complete_and_disable();
418
419 /* RSSI enable must be called after radio_switch_XXX function because it clears
420 * RADIO->SHORTS register, thus disables all other shortcuts.
421 */
422 radio_rssi_measure();
423
424 radio_isr_set(isr_rx_adv_sync, lll);
425
426 DEBUG_RADIO_START_O(1);
427
428 return 0;
429 }
430
prepare_cb_common(struct lll_prepare_param * p,uint8_t chan_idx)431 static int prepare_cb_common(struct lll_prepare_param *p, uint8_t chan_idx)
432 {
433 struct node_rx_pdu *node_rx;
434 uint32_t ticks_at_event;
435 uint32_t ticks_at_start;
436 uint32_t remainder_us;
437 struct lll_sync *lll;
438 struct ull_hdr *ull;
439 uint32_t remainder;
440 uint32_t hcto;
441 uint32_t ret;
442
443 lll = p->param;
444
445 /* Current window widening */
446 lll->window_widening_event_us += lll->window_widening_prepare_us;
447 lll->window_widening_prepare_us = 0;
448 if (lll->window_widening_event_us > lll->window_widening_max_us) {
449 lll->window_widening_event_us = lll->window_widening_max_us;
450 }
451
452 /* Reset chain PDU being scheduled by lll_sync context */
453 lll->is_aux_sched = 0U;
454
455 /* Initialize Trx count */
456 trx_cnt = 0U;
457
458 /* Start setting up Radio h/w */
459 radio_reset();
460
461 radio_phy_set(lll->phy, PHY_FLAGS_S8);
462 radio_pkt_configure(RADIO_PKT_CONF_LENGTH_8BIT, LL_EXT_OCTETS_RX_MAX,
463 RADIO_PKT_CONF_PHY(lll->phy));
464 radio_aa_set(lll->access_addr);
465 radio_crc_configure(PDU_CRC_POLYNOMIAL,
466 sys_get_le24(lll->crc_init));
467
468 lll_chan_set(chan_idx);
469
470 node_rx = ull_pdu_rx_alloc_peek(1);
471 LL_ASSERT(node_rx);
472
473 radio_pkt_rx_set(node_rx->pdu);
474
475 ticks_at_event = p->ticks_at_expire;
476 ull = HDR_LLL2ULL(lll);
477 ticks_at_event += lll_event_offset_get(ull);
478
479 ticks_at_start = ticks_at_event;
480 ticks_at_start += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
481
482 remainder = p->remainder;
483 remainder_us = radio_tmr_start(0, ticks_at_start, remainder);
484
485 radio_tmr_aa_capture();
486
487 hcto = remainder_us +
488 ((EVENT_JITTER_US + EVENT_TICKER_RES_MARGIN_US + lll->window_widening_event_us)
489 << 1) +
490 lll->window_size_event_us;
491 hcto += radio_rx_ready_delay_get(lll->phy, PHY_FLAGS_S8);
492 hcto += addr_us_get(lll->phy);
493 hcto += radio_rx_chain_delay_get(lll->phy, PHY_FLAGS_S8);
494 radio_tmr_hcto_configure(hcto);
495
496 radio_tmr_end_capture();
497
498 #if defined(HAL_RADIO_GPIO_HAVE_LNA_PIN)
499 radio_gpio_lna_setup();
500
501 radio_gpio_pa_lna_enable(remainder_us +
502 radio_rx_ready_delay_get(lll->phy,
503 PHY_FLAGS_S8) -
504 HAL_RADIO_GPIO_LNA_OFFSET);
505 #endif /* HAL_RADIO_GPIO_HAVE_LNA_PIN */
506
507 #if defined(CONFIG_BT_CTLR_XTAL_ADVANCED) && \
508 (EVENT_OVERHEAD_PREEMPT_US <= EVENT_OVERHEAD_PREEMPT_MIN_US)
509 uint32_t overhead;
510
511 overhead = lll_preempt_calc(ull, (TICKER_ID_SCAN_SYNC_BASE + ull_sync_lll_handle_get(lll)),
512 ticks_at_event);
513 /* check if preempt to start has changed */
514 if (overhead) {
515 LL_ASSERT_OVERHEAD(overhead);
516
517 radio_isr_set(isr_done, lll);
518 radio_disable();
519
520 return -ECANCELED;
521 }
522 #endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
523
524 ret = lll_prepare_done(lll);
525 LL_ASSERT(!ret);
526
527 DEBUG_RADIO_START_O(1);
528
529 return 0;
530 }
531
is_abort_cb(void * next,void * curr,lll_prepare_cb_t * resume_cb)532 static int is_abort_cb(void *next, void *curr, lll_prepare_cb_t *resume_cb)
533 {
534 /* Sync context shall not resume when being preempted, i.e. they
535 * shall not use -EAGAIN as return value.
536 */
537 ARG_UNUSED(resume_cb);
538
539 /* Different radio event overlap */
540 if (next != curr) {
541 struct lll_scan_aux *lll_aux;
542 struct lll_scan *lll;
543
544 lll = ull_scan_lll_is_valid_get(next);
545 if (lll) {
546 /* Do not abort current periodic sync event as next
547 * event is a scan event.
548 */
549 return 0;
550 }
551
552 lll_aux = ull_scan_aux_lll_is_valid_get(next);
553 if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_SKIP_ON_SCAN_AUX) &&
554 lll_aux) {
555 /* Do not abort current periodic sync event as next
556 * event is a scan aux event.
557 */
558 return 0;
559 }
560
561 #if defined(CONFIG_BT_CTLR_SCAN_AUX_SYNC_RESERVE_MIN)
562 struct lll_sync *lll_sync_next;
563 struct lll_sync *lll_sync_curr;
564
565 lll_sync_next = ull_sync_lll_is_valid_get(next);
566 if (!lll_sync_next) {
567 lll_sync_curr = curr;
568
569 /* Do not abort if near supervision timeout */
570 if (lll_sync_curr->forced) {
571 return 0;
572 }
573
574 /* Abort current event as next event is not a
575 * scan and not a scan aux event.
576 */
577 return -ECANCELED;
578 }
579
580 lll_sync_curr = curr;
581 if (lll_sync_curr->abort_count < lll_sync_next->abort_count) {
582 if (lll_sync_curr->abort_count < UINT8_MAX) {
583 lll_sync_curr->abort_count++;
584 }
585
586 /* Abort current event as next event has higher abort
587 * count.
588 */
589 return -ECANCELED;
590 }
591
592 if (lll_sync_next->abort_count < UINT8_MAX) {
593 lll_sync_next->abort_count++;
594 }
595
596 #else /* !CONFIG_BT_CTLR_SCAN_AUX_SYNC_RESERVE_MIN */
597 /* Abort current event as next event is not a
598 * scan and not a scan aux event.
599 */
600 return -ECANCELED;
601 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_SYNC_RESERVE_MIN */
602 }
603
604 /* Do not abort if current periodic sync event overlaps next interval
605 * or next event is a scan event.
606 */
607 return 0;
608 }
609
abort_cb(struct lll_prepare_param * prepare_param,void * param)610 static void abort_cb(struct lll_prepare_param *prepare_param, void *param)
611 {
612 struct event_done_extra *e;
613 struct lll_sync *lll;
614 int err;
615
616 /* NOTE: This is not a prepare being cancelled */
617 if (!prepare_param) {
618 /* Perform event abort here.
619 * After event has been cleanly aborted, clean up resources
620 * and dispatch event done.
621 */
622 radio_isr_set(isr_done, param);
623 radio_disable();
624
625 return;
626 }
627
628 /* NOTE: Else clean the top half preparations of the aborted event
629 * currently in preparation pipeline.
630 */
631 err = lll_hfclock_off();
632 LL_ASSERT(err >= 0);
633
634 /* Accumulate the latency as event is aborted while being in pipeline */
635 lll = prepare_param->param;
636 lll->skip_prepare += (lll->lazy_prepare + 1U);
637
638 /* Extra done event, to check sync lost */
639 e = ull_event_done_extra_get();
640 LL_ASSERT(e);
641
642 e->type = EVENT_DONE_EXTRA_TYPE_SYNC;
643 e->trx_cnt = 0U;
644 e->crc_valid = 0U;
645 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
646 defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
647 e->sync_term = 0U;
648 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING &&
649 * CONFIG_BT_CTLR_CTEINLINE_SUPPORT
650 */
651
652 lll_done(param);
653 }
654
isr_aux_setup(void * param)655 static void isr_aux_setup(void *param)
656 {
657 struct pdu_adv_aux_ptr *aux_ptr;
658 struct node_rx_pdu *node_rx;
659 uint32_t window_widening_us;
660 uint32_t window_size_us;
661 struct node_rx_ftr *ftr;
662 uint32_t aux_offset_us;
663 uint32_t aux_start_us;
664 struct lll_sync *lll;
665 uint32_t start_us;
666 uint8_t phy_aux;
667 uint32_t hcto;
668
669 lll_isr_status_reset();
670
671 node_rx = param;
672 ftr = &node_rx->rx_ftr;
673 aux_ptr = ftr->aux_ptr;
674 phy_aux = BIT(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
675 ftr->aux_phy = phy_aux;
676
677 lll = ftr->param;
678
679 /* Determine the window size */
680 if (aux_ptr->offs_units) {
681 window_size_us = OFFS_UNIT_300_US;
682 } else {
683 window_size_us = OFFS_UNIT_30_US;
684 }
685
686 /* Calculate the aux offset from start of the scan window */
687 aux_offset_us = (uint32_t) PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr) * window_size_us;
688
689 /* Calculate the window widening that needs to be deducted */
690 if (aux_ptr->ca) {
691 window_widening_us = SCA_DRIFT_50_PPM_US(aux_offset_us);
692 } else {
693 window_widening_us = SCA_DRIFT_500_PPM_US(aux_offset_us);
694 }
695
696 /* Setup radio for auxiliary PDU scan */
697 radio_phy_set(phy_aux, PHY_FLAGS_S8);
698 radio_pkt_configure(RADIO_PKT_CONF_LENGTH_8BIT, LL_EXT_OCTETS_RX_MAX,
699 RADIO_PKT_CONF_PHY(phy_aux));
700
701 lll_chan_set(aux_ptr->chan_idx);
702
703 radio_pkt_rx_set(node_rx->pdu);
704
705 radio_isr_set(isr_rx_aux_chain, lll);
706
707 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
708 struct lll_df_sync_cfg *cfg;
709
710 cfg = lll_df_sync_cfg_latest_get(&lll->df_cfg, NULL);
711
712 if (cfg->is_enabled && is_max_cte_reached(cfg->max_cte_count, cfg->cte_count)) {
713 int err;
714
715 /* Prepare additional node for reporting insufficient memory for IQ samples
716 * reports.
717 */
718 err = lll_df_iq_report_no_resources_prepare(lll);
719 if (!err) {
720 err = lll_df_conf_cte_rx_enable(cfg->slot_durations,
721 cfg->ant_sw_len,
722 cfg->ant_ids,
723 aux_ptr->chan_idx,
724 CTE_INFO_IN_PAYLOAD,
725 PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
726 if (err) {
727 lll->is_cte_incomplete = true;
728 }
729 } else {
730 lll->is_cte_incomplete = true;
731 }
732 } else if (!cfg->is_enabled) {
733 /* If CTE reception is disabled, release additional node allocated to report
734 * insufficient memory for IQ samples.
735 */
736 iq_report_incomplete_release_put(lll);
737 }
738 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
739 radio_switch_complete_and_disable();
740
741 /* Setup radio rx on micro second offset. Note that radio_end_us stores
742 * PDU start time in this case.
743 */
744 aux_start_us = ftr->radio_end_us + aux_offset_us;
745 aux_start_us -= lll_radio_rx_ready_delay_get(phy_aux, PHY_FLAGS_S8);
746 aux_start_us -= window_widening_us;
747 aux_start_us -= EVENT_JITTER_US;
748
749 start_us = radio_tmr_start_us(0, aux_start_us);
750 LL_ASSERT(start_us == (aux_start_us + 1U));
751
752 /* Setup header complete timeout */
753 hcto = start_us;
754 hcto += EVENT_JITTER_US;
755 hcto += window_widening_us;
756 hcto += lll_radio_rx_ready_delay_get(phy_aux, PHY_FLAGS_S8);
757 hcto += window_size_us;
758 hcto += radio_rx_chain_delay_get(phy_aux, PHY_FLAGS_S8);
759 hcto += addr_us_get(phy_aux);
760 radio_tmr_hcto_configure_abs(hcto);
761
762 /* capture end of Rx-ed PDU, extended scan to schedule auxiliary
763 * channel chaining, create connection or to create periodic sync.
764 */
765 radio_tmr_end_capture();
766
767 /* scanner always measures RSSI */
768 radio_rssi_measure();
769
770 #if defined(HAL_RADIO_GPIO_HAVE_LNA_PIN)
771 radio_gpio_lna_setup();
772
773 radio_gpio_pa_lna_enable(start_us +
774 radio_rx_ready_delay_get(phy_aux,
775 PHY_FLAGS_S8) -
776 HAL_RADIO_GPIO_LNA_OFFSET);
777 #endif /* HAL_RADIO_GPIO_HAVE_LNA_PIN */
778 }
779
780 /**
781 * @brief Common part of ISR responsible for handling PDU receive.
782 *
783 * @param lll Pointer to LLL sync object.
784 * @param node_type Type of a receive node to be set for handling by ULL.
785 * @param crc_ok Informs if received PDU has correct CRC.
786 * @param phy_flags_rx Received Coded PHY coding scheme (0 - S1, 1 - S8).
787 * @param cte_ready Informs if received PDU has CTEInfo present and IQ samples were collected.
788 * @param rssi_ready Informs if RSSI for received PDU is ready.
789 * @param status Informs about periodic advertisement synchronization status.
790 *
791 * @return Zero in case of there is no chained PDU or there is a chained PDUs but spaced long enough
792 * to schedule its reception by ULL.
793 * @return -EBUSY in case there is a chained PDU scheduled by LLL due to short spacing.
794 */
isr_rx(struct lll_sync * lll,uint8_t node_type,uint8_t crc_ok,uint8_t phy_flags_rx,uint8_t cte_ready,uint8_t rssi_ready,enum sync_status status)795 static int isr_rx(struct lll_sync *lll, uint8_t node_type, uint8_t crc_ok,
796 uint8_t phy_flags_rx, uint8_t cte_ready, uint8_t rssi_ready,
797 enum sync_status status)
798 {
799 bool sched = false;
800 int err;
801
802 /* Check CRC and generate Periodic Advertising Report */
803 if (crc_ok) {
804 struct node_rx_pdu *node_rx;
805
806 /* Verify if there are free RX buffers for:
807 * - reporting just received PDU
808 * - allocating an extra node_rx for periodic report incomplete
809 * - a buffer for receiving data in a connection
810 * - a buffer for receiving empty PDU
811 *
812 * If this is a reception of chained PDU, node_type is
813 * NODE_RX_TYPE_EXT_AUX_REPORT, then there is no need to reserve
814 * again a node_rx for periodic report incomplete.
815 */
816 if (node_type != NODE_RX_TYPE_EXT_AUX_REPORT) {
817 node_rx = ull_pdu_rx_alloc_peek(4);
818 } else {
819 node_rx = ull_pdu_rx_alloc_peek(3);
820 }
821
822 if (node_rx) {
823 struct node_rx_ftr *ftr;
824 struct pdu_adv *pdu;
825
826 ull_pdu_rx_alloc();
827
828 node_rx->hdr.type = node_type;
829
830 ftr = &(node_rx->rx_ftr);
831 ftr->param = lll;
832 ftr->lll_aux = lll->lll_aux;
833 ftr->aux_failed = 0U;
834 ftr->rssi = (rssi_ready) ? radio_rssi_get() :
835 BT_HCI_LE_RSSI_NOT_AVAILABLE;
836 ftr->ticks_anchor = radio_tmr_start_get();
837 ftr->radio_end_us = radio_tmr_end_get() -
838 radio_rx_chain_delay_get(lll->phy,
839 phy_flags_rx);
840 ftr->phy_flags = phy_flags_rx;
841 ftr->sync_status = status;
842 ftr->sync_rx_enabled = lll->is_rx_enabled;
843
844 if (node_type != NODE_RX_TYPE_EXT_AUX_REPORT) {
845 ftr->extra = ull_pdu_rx_alloc();
846 }
847
848 pdu = (void *)node_rx->pdu;
849
850 ftr->aux_lll_sched = lll_scan_aux_setup(pdu, lll->phy,
851 phy_flags_rx,
852 isr_aux_setup,
853 lll);
854 if (ftr->aux_lll_sched) {
855 if (node_type != NODE_RX_TYPE_EXT_AUX_REPORT) {
856 lll->is_aux_sched = 1U;
857 }
858
859 err = -EBUSY;
860 } else {
861 err = 0;
862 }
863
864 ull_rx_put(node_rx->hdr.link, node_rx);
865
866 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
867 if (cte_ready) {
868 /* If there is a periodic advertising report generate IQ data
869 * report with valid packet_status if there were free nodes for
870 * that. Or report insufficient resources for IQ data report.
871 *
872 * Returned value is not checked because it does not matter if there
873 * is a IQ report to be send towards ULL. There is always periodic
874 * sync report to be send.
875 */
876 (void)iq_report_create_put(lll, rssi_ready, BT_HCI_LE_CTE_CRC_OK);
877 }
878 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
879
880 sched = true;
881 } else {
882 if (node_type == NODE_RX_TYPE_EXT_AUX_REPORT) {
883 err = -ENOMEM;
884 } else {
885 err = 0;
886 }
887 }
888 } else {
889 #if defined(CONFIG_BT_CTLR_DF_SAMPLE_CTE_FOR_PDU_WITH_BAD_CRC)
890 /* In case of reception of chained PDUs IQ samples report for a PDU with wrong
891 * CRC is handled by caller. It has to be that way to be sure the IQ report
892 * follows possible periodic advertising report.
893 */
894 if (cte_ready && node_type != NODE_RX_TYPE_EXT_AUX_REPORT) {
895 err = iq_report_create_put(lll, rssi_ready,
896 BT_HCI_LE_CTE_CRC_ERR_CTE_BASED_TIME);
897 if (!err) {
898 sched = true;
899 }
900 }
901 #endif /* CONFIG_BT_CTLR_DF_SAMPLE_CTE_FOR_PDU_WITH_BAD_CRC */
902
903 err = 0;
904 }
905
906 if (sched) {
907 ull_rx_sched();
908 }
909
910 return err;
911 }
912
isr_rx_adv_sync_estab(void * param)913 static void isr_rx_adv_sync_estab(void *param)
914 {
915 enum sync_status sync_ok;
916 struct lll_sync *lll;
917 uint8_t phy_flags_rx;
918 uint8_t rssi_ready;
919 uint8_t cte_ready;
920 uint8_t trx_done;
921 uint8_t crc_ok;
922 int err;
923
924 lll = param;
925
926 /* Read radio status and events */
927 trx_done = radio_is_done();
928 if (trx_done) {
929 crc_ok = radio_crc_is_valid();
930 rssi_ready = radio_rssi_is_ready();
931 phy_flags_rx = radio_phy_flags_rx_get();
932 sync_ok = sync_filtrate_by_cte_type(lll->cte_type, lll->filter_policy);
933 trx_cnt = 1U;
934
935 if (IS_ENABLED(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)) {
936 cte_ready = radio_df_cte_ready();
937 } else {
938 cte_ready = 0U;
939 }
940 } else {
941 crc_ok = phy_flags_rx = rssi_ready = cte_ready = 0U;
942 /* Initiated as allowed, crc_ok takes precended during handling of PDU
943 * reception in the situation.
944 */
945 sync_ok = SYNC_STAT_ALLOWED;
946 }
947
948 /* Clear radio rx status and events */
949 lll_isr_rx_status_reset();
950
951 /* No Rx */
952 if (!trx_done) {
953 /* TODO: Combine the early exit with above if-then-else block
954 */
955 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
956 LL_ASSERT(!lll->node_cte_incomplete);
957 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
958
959 goto isr_rx_done;
960 }
961
962 /* Save radio ready and address capture timestamp for later use for
963 * drift compensation.
964 */
965 radio_tmr_aa_save(radio_tmr_aa_get());
966 radio_tmr_ready_save(radio_tmr_ready_get());
967
968 /* Handle regular PDU reception if CTE type is acceptable */
969 if (sync_ok == SYNC_STAT_ALLOWED) {
970 err = isr_rx(lll, NODE_RX_TYPE_SYNC, crc_ok, phy_flags_rx,
971 cte_ready, rssi_ready, SYNC_STAT_ALLOWED);
972 if (err == -EBUSY) {
973 return;
974 }
975 } else if (sync_ok == SYNC_STAT_TERM) {
976 struct node_rx_pdu *node_rx;
977
978 /* Verify if there are free RX buffers for:
979 * - reporting just received PDU
980 * - a buffer for receiving data in a connection
981 * - a buffer for receiving empty PDU
982 */
983 node_rx = ull_pdu_rx_alloc_peek(3);
984 if (node_rx) {
985 struct node_rx_ftr *ftr;
986
987 ull_pdu_rx_alloc();
988
989 node_rx->hdr.type = NODE_RX_TYPE_SYNC;
990
991 ftr = &node_rx->rx_ftr;
992 ftr->param = lll;
993 ftr->sync_status = SYNC_STAT_TERM;
994
995 ull_rx_put_sched(node_rx->hdr.link, node_rx);
996 }
997 }
998
999 isr_rx_done:
1000 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
1001 defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1002 isr_rx_done_cleanup(lll, crc_ok, sync_ok != SYNC_STAT_ALLOWED);
1003 #else
1004 isr_rx_done_cleanup(lll, crc_ok, false);
1005 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1006 }
1007
isr_rx_adv_sync(void * param)1008 static void isr_rx_adv_sync(void *param)
1009 {
1010 struct lll_sync *lll;
1011 uint8_t phy_flags_rx;
1012 uint8_t rssi_ready;
1013 uint8_t cte_ready;
1014 uint8_t trx_done;
1015 uint8_t crc_ok;
1016 int err;
1017
1018 lll = param;
1019
1020 /* Read radio status and events */
1021 trx_done = radio_is_done();
1022 if (trx_done) {
1023 crc_ok = radio_crc_is_valid();
1024 rssi_ready = radio_rssi_is_ready();
1025 phy_flags_rx = radio_phy_flags_rx_get();
1026 trx_cnt = 1U;
1027
1028 if (IS_ENABLED(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)) {
1029 cte_ready = radio_df_cte_ready();
1030 } else {
1031 cte_ready = 0U;
1032 }
1033 } else {
1034 crc_ok = phy_flags_rx = rssi_ready = cte_ready = 0U;
1035 }
1036
1037 /* Clear radio rx status and events */
1038 lll_isr_rx_status_reset();
1039
1040 /* No Rx */
1041 if (!trx_done) {
1042 /* TODO: Combine the early exit with above if-then-else block
1043 */
1044 goto isr_rx_done;
1045 }
1046
1047 /* Save radio ready and address capture timestamp for later use for
1048 * drift compensation.
1049 */
1050 radio_tmr_aa_save(radio_tmr_aa_get());
1051 radio_tmr_ready_save(radio_tmr_ready_get());
1052
1053 /* When periodic advertisement is synchronized, the CTEType may change. It should not
1054 * affect synchronization even when new CTE type is not allowed by sync parameters.
1055 * Hence the SYNC_STAT_READY is set.
1056 */
1057 err = isr_rx(lll, NODE_RX_TYPE_SYNC_REPORT, crc_ok, phy_flags_rx, cte_ready, rssi_ready,
1058 SYNC_STAT_READY);
1059 if (err == -EBUSY) {
1060 return;
1061 }
1062
1063 isr_rx_done:
1064 isr_rx_done_cleanup(lll, crc_ok, false);
1065 }
1066
isr_rx_aux_chain(void * param)1067 static void isr_rx_aux_chain(void *param)
1068 {
1069 struct lll_scan_aux *lll_aux;
1070 struct lll_sync *lll;
1071 uint8_t phy_flags_rx;
1072 uint8_t rssi_ready;
1073 uint8_t cte_ready;
1074 uint8_t trx_done;
1075 uint8_t crc_ok;
1076 int err;
1077
1078 lll = param;
1079 lll_aux = lll->lll_aux;
1080 if (!lll_aux) {
1081 /* auxiliary context not assigned (yet) in ULL execution
1082 * context, drop current reception and abort further chain PDU
1083 * receptions, if any.
1084 */
1085 lll_isr_status_reset();
1086
1087 rssi_ready = 0U;
1088 cte_ready = 0U;
1089 crc_ok = 0U;
1090 err = 0;
1091
1092 goto isr_rx_aux_chain_done;
1093 }
1094
1095 /* Read radio status and events */
1096 trx_done = radio_is_done();
1097 if (trx_done) {
1098 crc_ok = radio_crc_is_valid();
1099 phy_flags_rx = radio_phy_flags_rx_get();
1100 rssi_ready = radio_rssi_is_ready();
1101
1102 if (IS_ENABLED(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)) {
1103 cte_ready = radio_df_cte_ready();
1104 } else {
1105 cte_ready = 0U;
1106 }
1107 } else {
1108 crc_ok = phy_flags_rx = rssi_ready = cte_ready = 0U;
1109 }
1110
1111 /* Clear radio rx status and events */
1112 lll_isr_rx_status_reset();
1113
1114 /* No Rx */
1115 if (!trx_done) {
1116 /* TODO: Combine the early exit with above if-then-else block
1117 */
1118
1119 err = 0;
1120
1121 goto isr_rx_aux_chain_done;
1122 }
1123
1124 /* When periodic advertisement is synchronized, the CTEType may change. It should not
1125 * affect synchronization even when new CTE type is not allowed by sync parameters.
1126 * Hence the SYNC_STAT_READY is set.
1127 */
1128 err = isr_rx(lll, NODE_RX_TYPE_EXT_AUX_REPORT, crc_ok, phy_flags_rx, cte_ready, rssi_ready,
1129 SYNC_STAT_READY);
1130 if (err == -EBUSY) {
1131 return;
1132 }
1133
1134 isr_rx_aux_chain_done:
1135 if (!crc_ok || err) {
1136 struct node_rx_pdu *node_rx;
1137
1138 /* Generate message to release aux context and flag the report
1139 * generated thereafter by HCI as incomplete.
1140 */
1141 node_rx = ull_pdu_rx_alloc();
1142 LL_ASSERT(node_rx);
1143
1144 node_rx->hdr.type = NODE_RX_TYPE_EXT_AUX_RELEASE;
1145
1146 node_rx->rx_ftr.param = lll;
1147 node_rx->rx_ftr.lll_aux = lll->lll_aux;
1148 node_rx->rx_ftr.aux_failed = 1U;
1149
1150 ull_rx_put(node_rx->hdr.link, node_rx);
1151
1152 if (!crc_ok) {
1153 #if defined(CONFIG_BT_CTLR_DF_SAMPLE_CTE_FOR_PDU_WITH_BAD_CRC)
1154 if (cte_ready) {
1155 (void)iq_report_create_put(lll, rssi_ready,
1156 BT_HCI_LE_CTE_CRC_ERR_CTE_BASED_TIME);
1157 }
1158 #endif /* CONFIG_BT_CTLR_DF_SAMPLE_CTE_FOR_PDU_WITH_BAD_CRC */
1159 } else {
1160 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1161 /* Report insufficient resources for IQ data report and release additional
1162 * noder_rx_iq_data stored in lll_sync object, to avoid buffers leakage.
1163 */
1164 iq_report_incomplete_create_put(lll);
1165 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1166 }
1167
1168 ull_rx_sched();
1169 }
1170
1171 if (lll->is_aux_sched) {
1172 lll->is_aux_sched = 0U;
1173
1174 isr_rx_done_cleanup(lll, 1U, false);
1175 } else {
1176 lll_isr_cleanup(lll_aux);
1177 }
1178 }
1179
isr_rx_done_cleanup(struct lll_sync * lll,uint8_t crc_ok,bool sync_term)1180 static void isr_rx_done_cleanup(struct lll_sync *lll, uint8_t crc_ok, bool sync_term)
1181 {
1182 struct event_done_extra *e;
1183
1184 /* Reset Sync context association with any Aux context as the chain reception is done.
1185 * By code inspection there should not be a race that ULL execution context assigns lll_aux
1186 * that would be reset here, because either we are here not receiving a chain PDU or the
1187 * lll_aux has been set in the node rx type NODE_RX_TYPE_EXT_AUX_RELEASE before we are here.
1188 */
1189 lll->lll_aux = NULL;
1190
1191 /* Calculate and place the drift information in done event */
1192 e = ull_event_done_extra_get();
1193 LL_ASSERT(e);
1194
1195 e->type = EVENT_DONE_EXTRA_TYPE_SYNC;
1196 e->trx_cnt = trx_cnt;
1197 e->crc_valid = crc_ok;
1198 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
1199 defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1200 e->sync_term = sync_term;
1201 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1202 if (trx_cnt) {
1203 e->drift.preamble_to_addr_us = addr_us_get(lll->phy);
1204 e->drift.start_to_address_actual_us =
1205 radio_tmr_aa_restore() - radio_tmr_ready_restore();
1206 e->drift.window_widening_event_us = lll->window_widening_event_us;
1207
1208 /* Reset window widening, as anchor point sync-ed */
1209 lll->window_widening_event_us = 0U;
1210 lll->window_size_event_us = 0U;
1211
1212 #if defined(CONFIG_BT_CTLR_SCAN_AUX_SYNC_RESERVE_MIN)
1213 /* Reset LLL abort count as LLL event is gracefully done and
1214 * was not aborted by any other event when current event could
1215 * have been using unreserved time space.
1216 */
1217 lll->abort_count = 0U;
1218 #endif /* CONFIG_BT_CTLR_SCAN_AUX_SYNC_RESERVE_MIN */
1219 }
1220
1221 lll_isr_cleanup(lll);
1222 }
1223
isr_done(void * param)1224 static void isr_done(void *param)
1225 {
1226 struct lll_sync *lll;
1227
1228 lll_isr_status_reset();
1229
1230 /* Generate incomplete data status and release aux context when
1231 * sync event is using LLL scheduling.
1232 */
1233 lll = param;
1234
1235 /* LLL scheduling used for chain PDU reception is aborted/preempted */
1236 if (lll->is_aux_sched) {
1237 struct node_rx_pdu *node_rx;
1238
1239 lll->is_aux_sched = 0U;
1240
1241 /* Generate message to release aux context and flag the report
1242 * generated thereafter by HCI as incomplete.
1243 */
1244 node_rx = ull_pdu_rx_alloc();
1245 LL_ASSERT(node_rx);
1246
1247 node_rx->hdr.type = NODE_RX_TYPE_EXT_AUX_RELEASE;
1248
1249 node_rx->rx_ftr.param = lll;
1250 node_rx->rx_ftr.lll_aux = lll->lll_aux;
1251 node_rx->rx_ftr.aux_failed = 1U;
1252
1253 ull_rx_put_sched(node_rx->hdr.link, node_rx);
1254 }
1255
1256 isr_rx_done_cleanup(param, ((trx_cnt) ? 1U : 0U), false);
1257 }
1258
1259 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
iq_report_create(struct lll_sync * lll,uint8_t rssi_ready,uint8_t packet_status,uint8_t slot_durations,struct node_rx_iq_report * iq_report)1260 static void iq_report_create(struct lll_sync *lll, uint8_t rssi_ready, uint8_t packet_status,
1261 uint8_t slot_durations, struct node_rx_iq_report *iq_report)
1262 {
1263 struct node_rx_ftr *ftr;
1264 uint8_t cte_info;
1265 uint8_t ant;
1266
1267 cte_info = radio_df_cte_status_get();
1268 ant = radio_df_pdu_antenna_switch_pattern_get();
1269
1270 iq_report->rx.hdr.type = NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT;
1271 iq_report->sample_count = radio_df_iq_samples_amount_get();
1272 iq_report->packet_status = packet_status;
1273 iq_report->rssi_ant_id = ant;
1274 iq_report->cte_info = *(struct pdu_cte_info *)&cte_info;
1275 iq_report->local_slot_durations = slot_durations;
1276 /* Event counter is updated to next value during event preparation, hence
1277 * it has to be subtracted to store actual event counter value.
1278 */
1279 iq_report->event_counter = lll->event_counter - 1;
1280
1281 ftr = &iq_report->rx.rx_ftr;
1282 ftr->param = lll;
1283 ftr->rssi =
1284 ((rssi_ready) ? radio_rssi_get() : BT_HCI_LE_RSSI_NOT_AVAILABLE);
1285 }
1286
iq_report_incomplete_create(struct lll_sync * lll,struct node_rx_iq_report * iq_report)1287 static void iq_report_incomplete_create(struct lll_sync *lll, struct node_rx_iq_report *iq_report)
1288 {
1289 struct node_rx_ftr *ftr;
1290
1291 iq_report->rx.hdr.type = NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT;
1292 iq_report->sample_count = 0;
1293 iq_report->packet_status = BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES;
1294 /* Event counter is updated to next value during event preparation,
1295 * hence it has to be subtracted to store actual event counter
1296 * value.
1297 */
1298 iq_report->event_counter = lll->event_counter - 1;
1299 /* The PDU antenna is set in configuration, hence it is always
1300 * available. BT 5.3 Core Spec. does not say if this field
1301 * may be invalid in case of insufficient resources.
1302 */
1303 iq_report->rssi_ant_id = radio_df_pdu_antenna_switch_pattern_get();
1304 /* According to BT 5.3, Vol 4, Part E, section 7.7.65.21 below
1305 * fields have invalid values in case of insufficient resources.
1306 */
1307 iq_report->cte_info =
1308 (struct pdu_cte_info){.time = 0, .rfu = 0, .type = 0};
1309 iq_report->local_slot_durations = 0;
1310
1311 ftr = &iq_report->rx.rx_ftr;
1312 ftr->param = lll;
1313
1314 ftr->rssi = BT_HCI_LE_RSSI_NOT_AVAILABLE;
1315 ftr->extra = NULL;
1316 }
1317
iq_report_create_put(struct lll_sync * lll,uint8_t rssi_ready,uint8_t packet_status)1318 static int iq_report_create_put(struct lll_sync *lll, uint8_t rssi_ready, uint8_t packet_status)
1319 {
1320 struct node_rx_iq_report *iq_report;
1321 struct lll_df_sync_cfg *cfg;
1322 int err;
1323
1324 cfg = lll_df_sync_cfg_curr_get(&lll->df_cfg);
1325
1326 if (cfg->is_enabled) {
1327 if (!lll->is_cte_incomplete &&
1328 is_max_cte_reached(cfg->max_cte_count, cfg->cte_count)) {
1329 iq_report = ull_df_iq_report_alloc();
1330 LL_ASSERT(iq_report);
1331
1332 iq_report_create(lll, rssi_ready, packet_status,
1333 cfg->slot_durations, iq_report);
1334 err = 0;
1335 } else if (lll->is_cte_incomplete && is_max_cte_reached(cfg->max_cte_count,
1336 cfg->cte_count)) {
1337 iq_report = lll->node_cte_incomplete;
1338
1339 /* Reception of chained PDUs may be still in progress. Do not report
1340 * insufficient resources multiple times.
1341 */
1342 if (iq_report) {
1343 iq_report_incomplete_create(lll, iq_report);
1344 lll->node_cte_incomplete = NULL;
1345
1346 /* Report ready to be send to ULL */
1347 err = 0;
1348 } else {
1349 /* Incomplete CTE was already reported */
1350 err = -ENODATA;
1351 }
1352 } else {
1353 err = -ENODATA;
1354 }
1355 } else {
1356 err = -ENODATA;
1357 }
1358
1359 if (!err) {
1360 ull_rx_put(iq_report->rx.hdr.link, iq_report);
1361
1362 cfg->cte_count += 1U;
1363 }
1364
1365 return err;
1366 }
1367
iq_report_incomplete_create_put(struct lll_sync * lll)1368 static int iq_report_incomplete_create_put(struct lll_sync *lll)
1369 {
1370 struct lll_df_sync_cfg *cfg;
1371
1372 cfg = lll_df_sync_cfg_curr_get(&lll->df_cfg);
1373
1374 if (cfg->is_enabled) {
1375 struct node_rx_iq_report *iq_report;
1376
1377 iq_report = lll->node_cte_incomplete;
1378
1379 /* Reception of chained PDUs may be still in progress. Do not report
1380 * insufficient resources multiple times.
1381 */
1382 if (iq_report) {
1383 iq_report_incomplete_create(lll, iq_report);
1384
1385 lll->node_cte_incomplete = NULL;
1386 ull_rx_put(iq_report->rx.hdr.link, iq_report);
1387
1388 return 0;
1389 } else {
1390 /* Incomplete CTE was already reported */
1391 return -ENODATA;
1392 }
1393
1394 }
1395
1396 return -ENODATA;
1397 }
1398
iq_report_incomplete_release_put(struct lll_sync * lll)1399 static void iq_report_incomplete_release_put(struct lll_sync *lll)
1400 {
1401 if (lll->node_cte_incomplete) {
1402 struct node_rx_iq_report *iq_report = lll->node_cte_incomplete;
1403
1404 iq_report->rx.hdr.type = NODE_RX_TYPE_IQ_SAMPLE_REPORT_LLL_RELEASE;
1405
1406 ull_rx_put(iq_report->rx.hdr.link, iq_report);
1407 lll->node_cte_incomplete = NULL;
1408 }
1409 }
is_max_cte_reached(uint8_t max_cte_count,uint8_t cte_count)1410 static bool is_max_cte_reached(uint8_t max_cte_count, uint8_t cte_count)
1411 {
1412 return max_cte_count == BT_HCI_LE_SAMPLE_CTE_ALL || cte_count < max_cte_count;
1413 }
1414 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1415
data_channel_calc(struct lll_sync * lll)1416 static uint8_t data_channel_calc(struct lll_sync *lll)
1417 {
1418 uint8_t data_chan_count;
1419 uint8_t *data_chan_map;
1420
1421 /* Process channel map update, if any */
1422 if (lll->chm_first != lll->chm_last) {
1423 uint16_t instant_latency;
1424
1425 instant_latency = (lll->event_counter + lll->skip_event - lll->chm_instant) &
1426 EVENT_INSTANT_MAX;
1427 if (instant_latency <= EVENT_INSTANT_LATENCY_MAX) {
1428 /* At or past the instant, use channelMapNew */
1429 lll->chm_first = lll->chm_last;
1430 }
1431 }
1432
1433 /* Calculate the radio channel to use */
1434 data_chan_map = lll->chm[lll->chm_first].data_chan_map;
1435 data_chan_count = lll->chm[lll->chm_first].data_chan_count;
1436 return lll_chan_sel_2(lll->event_counter + lll->skip_event, lll->data_chan_id,
1437 data_chan_map, data_chan_count);
1438 }
1439
sync_filtrate_by_cte_type(uint8_t cte_type_mask,uint8_t filter_policy)1440 static enum sync_status sync_filtrate_by_cte_type(uint8_t cte_type_mask, uint8_t filter_policy)
1441 {
1442 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
1443 defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1444 uint8_t rx_cte_time;
1445 uint8_t rx_cte_type;
1446
1447 rx_cte_time = nrf_radio_cte_time_get(NRF_RADIO);
1448 rx_cte_type = nrf_radio_cte_type_get(NRF_RADIO);
1449
1450 return lll_sync_cte_is_allowed(cte_type_mask, filter_policy, rx_cte_time, rx_cte_type);
1451
1452 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1453 return SYNC_STAT_ALLOWED;
1454 }
1455