1 /*
2 * Copyright (c) 2020 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <stdint.h>
7
8 #include <zephyr/toolchain.h>
9 #include <zephyr/sys/util.h>
10 #include <zephyr/sys/byteorder.h>
11
12 #include "hal/ccm.h"
13 #include "hal/radio.h"
14 #include "hal/ticker.h"
15 #include "hal/radio_df.h"
16
17 #include "util/util.h"
18 #include "util/memq.h"
19 #include "util/dbuf.h"
20
21 #include "pdu_df.h"
22 #include "pdu_vendor.h"
23 #include "pdu.h"
24
25 #include "lll.h"
26 #include "lll_vendor.h"
27 #include "lll_clock.h"
28 #include "lll_chan.h"
29 #include "lll_df_types.h"
30 #include "lll_scan.h"
31 #include "lll_sync.h"
32
33 #include "lll_internal.h"
34 #include "lll_tim_internal.h"
35 #include "lll_prof_internal.h"
36 #include "lll_scan_internal.h"
37
38 #include "lll_df.h"
39 #include "lll_df_internal.h"
40
41 #include "ll_feat.h"
42
43 #include <zephyr/bluetooth/hci_types.h>
44
45 #include <soc.h>
46 #include "hal/debug.h"
47
48 static int init_reset(void);
49 static void prepare(void *param);
50 static int create_prepare_cb(struct lll_prepare_param *p);
51 static int prepare_cb(struct lll_prepare_param *p);
52 static int prepare_cb_common(struct lll_prepare_param *p, uint8_t chan_idx);
53 static int is_abort_cb(void *next, void *curr, lll_prepare_cb_t *resume_cb);
54 static void abort_cb(struct lll_prepare_param *prepare_param, void *param);
55 static int isr_rx(struct lll_sync *lll, uint8_t node_type, uint8_t crc_ok,
56 uint8_t phy_flags_rx, uint8_t cte_ready, uint8_t rssi_ready,
57 enum sync_status status);
58 static void isr_rx_adv_sync_estab(void *param);
59 static void isr_rx_adv_sync(void *param);
60 static void isr_rx_aux_chain(void *param);
61 static void isr_rx_done_cleanup(struct lll_sync *lll, uint8_t crc_ok, bool sync_term);
62 static void isr_done(void *param);
63 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
64 static int iq_report_create_put(struct lll_sync *lll, uint8_t rssi_ready,
65 uint8_t packet_status);
66 static int iq_report_incomplete_create_put(struct lll_sync *lll);
67 static void iq_report_incomplete_release_put(struct lll_sync *lll);
68 static bool is_max_cte_reached(uint8_t max_cte_count, uint8_t cte_count);
69 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
70 static uint8_t data_channel_calc(struct lll_sync *lll);
71 static enum sync_status sync_filtrate_by_cte_type(uint8_t cte_type_mask, uint8_t filter_policy);
72
73 static uint8_t trx_cnt;
74
lll_sync_init(void)75 int lll_sync_init(void)
76 {
77 int err;
78
79 err = init_reset();
80 if (err) {
81 return err;
82 }
83
84 return 0;
85 }
86
lll_sync_reset(void)87 int lll_sync_reset(void)
88 {
89 int err;
90
91 err = init_reset();
92 if (err) {
93 return err;
94 }
95
96 return 0;
97 }
98
lll_sync_create_prepare(void * param)99 void lll_sync_create_prepare(void *param)
100 {
101 int err;
102
103 prepare(param);
104
105 /* Invoke common pipeline handling of prepare */
106 err = lll_prepare(is_abort_cb, abort_cb, create_prepare_cb, 0, param);
107 LL_ASSERT(!err || err == -EINPROGRESS);
108 }
109
lll_sync_prepare(void * param)110 void lll_sync_prepare(void *param)
111 {
112 int err;
113
114 prepare(param);
115
116 /* Invoke common pipeline handling of prepare */
117 err = lll_prepare(is_abort_cb, abort_cb, prepare_cb, 0, param);
118 LL_ASSERT(!err || err == -EINPROGRESS);
119 }
120
prepare(void * param)121 static void prepare(void *param)
122 {
123 struct lll_prepare_param *p;
124 struct lll_sync *lll;
125 int err;
126
127 /* Request to start HF Clock */
128 err = lll_hfclock_on();
129 LL_ASSERT(err >= 0);
130
131 p = param;
132
133 lll = p->param;
134
135 /* Accumulate window widening */
136 lll->window_widening_prepare_us += lll->window_widening_periodic_us *
137 (p->lazy + 1U);
138 if (lll->window_widening_prepare_us > lll->window_widening_max_us) {
139 lll->window_widening_prepare_us = lll->window_widening_max_us;
140 }
141 }
142
lll_sync_aux_prepare_cb(struct lll_sync * lll,struct lll_scan_aux * lll_aux)143 void lll_sync_aux_prepare_cb(struct lll_sync *lll,
144 struct lll_scan_aux *lll_aux)
145 {
146 struct node_rx_pdu *node_rx;
147
148 /* Initialize Trx count */
149 trx_cnt = 0U;
150
151 /* Start setting up Radio h/w */
152 radio_reset();
153
154 radio_phy_set(lll_aux->phy, PHY_FLAGS_S8);
155 radio_pkt_configure(RADIO_PKT_CONF_LENGTH_8BIT, LL_EXT_OCTETS_RX_MAX,
156 RADIO_PKT_CONF_PHY(lll_aux->phy));
157
158 node_rx = ull_pdu_rx_alloc_peek(1);
159 LL_ASSERT(node_rx);
160
161 radio_pkt_rx_set(node_rx->pdu);
162
163 /* Set access address for sync */
164 radio_aa_set(lll->access_addr);
165 radio_crc_configure(PDU_CRC_POLYNOMIAL,
166 sys_get_le24(lll->crc_init));
167
168 lll_chan_set(lll_aux->chan);
169
170 radio_isr_set(isr_rx_aux_chain, lll);
171
172 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
173 struct lll_df_sync_cfg *cfg;
174
175 cfg = lll_df_sync_cfg_latest_get(&lll->df_cfg, NULL);
176
177 if (cfg->is_enabled) {
178 int err;
179
180 /* Prepare additional node for reporting insufficient memory for IQ samples
181 * reports.
182 */
183 err = lll_df_iq_report_no_resources_prepare(lll);
184 if (!err) {
185 err = lll_df_conf_cte_rx_enable(cfg->slot_durations, cfg->ant_sw_len,
186 cfg->ant_ids, lll_aux->chan,
187 CTE_INFO_IN_PAYLOAD, lll_aux->phy);
188 if (err) {
189 lll->is_cte_incomplete = true;
190 }
191 } else {
192 lll->is_cte_incomplete = true;
193 }
194 cfg->cte_count = 0;
195 } else {
196 /* If CTE reception is disabled, release additional node allocated to report
197 * insufficient memory for IQ samples.
198 */
199 iq_report_incomplete_release_put(lll);
200 }
201 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
202 radio_switch_complete_and_disable();
203 }
204
205 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
lll_sync_cte_is_allowed(uint8_t cte_type_mask,uint8_t filter_policy,uint8_t rx_cte_time,uint8_t rx_cte_type)206 enum sync_status lll_sync_cte_is_allowed(uint8_t cte_type_mask, uint8_t filter_policy,
207 uint8_t rx_cte_time, uint8_t rx_cte_type)
208 {
209 bool cte_ok;
210
211 if (cte_type_mask == BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_FILTERING) {
212 return SYNC_STAT_ALLOWED;
213 }
214
215 if (rx_cte_time > 0) {
216 if ((cte_type_mask & BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_CTE) != 0) {
217 cte_ok = false;
218 } else {
219 switch (rx_cte_type) {
220 case BT_HCI_LE_AOA_CTE:
221 cte_ok = !(cte_type_mask &
222 BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_AOA);
223 break;
224 case BT_HCI_LE_AOD_CTE_1US:
225 cte_ok = !(cte_type_mask &
226 BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_AOD_1US);
227 break;
228 case BT_HCI_LE_AOD_CTE_2US:
229 cte_ok = !(cte_type_mask &
230 BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_AOD_2US);
231 break;
232 default:
233 /* Unknown or forbidden CTE type */
234 cte_ok = false;
235 }
236 }
237 } else {
238 /* If there is no CTEInfo in advertising PDU, Radio will not parse the S0 byte and
239 * CTESTATUS register will hold zeros only.
240 * Zero value in CTETime field of CTESTATUS may be used to distinguish between PDU
241 * that includes CTEInfo or not. Allowed range for CTETime is 2-20.
242 */
243 if ((cte_type_mask & BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_ONLY_CTE) != 0) {
244 cte_ok = false;
245 } else {
246 cte_ok = true;
247 }
248 }
249
250 if (!cte_ok) {
251 return filter_policy ? SYNC_STAT_CONT_SCAN : SYNC_STAT_TERM;
252 }
253
254 return SYNC_STAT_ALLOWED;
255 }
256 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
257
init_reset(void)258 static int init_reset(void)
259 {
260 return 0;
261 }
262
create_prepare_cb(struct lll_prepare_param * p)263 static int create_prepare_cb(struct lll_prepare_param *p)
264 {
265 uint16_t event_counter;
266 struct lll_sync *lll;
267 uint8_t chan_idx;
268 int err;
269
270 DEBUG_RADIO_START_O(1);
271
272 lll = p->param;
273
274 /* Calculate the current event latency */
275 lll->skip_event = lll->skip_prepare + p->lazy;
276
277 /* Calculate the current event counter value */
278 event_counter = lll->event_counter + lll->skip_event;
279
280 /* Reset accumulated latencies */
281 lll->skip_prepare = 0U;
282
283 chan_idx = data_channel_calc(lll);
284
285 /* Update event counter to next value */
286 lll->event_counter = (event_counter + 1U);
287
288 err = prepare_cb_common(p, chan_idx);
289 if (err) {
290 DEBUG_RADIO_START_O(1);
291
292 return 0;
293 }
294
295 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
296 struct lll_df_sync_cfg *cfg;
297
298 cfg = lll_df_sync_cfg_latest_get(&lll->df_cfg, NULL);
299 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
300
301 if (false) {
302 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
303 } else if (cfg->is_enabled) {
304 /* In case of call in create_prepare_cb, new sync event starts hence discard
305 * previous incomplete state.
306 */
307 lll->is_cte_incomplete = false;
308
309 /* Prepare additional node for reporting insufficient IQ report nodes issue */
310 err = lll_df_iq_report_no_resources_prepare(lll);
311 if (!err) {
312 err = lll_df_conf_cte_rx_enable(cfg->slot_durations, cfg->ant_sw_len,
313 cfg->ant_ids, chan_idx,
314 CTE_INFO_IN_PAYLOAD, lll->phy);
315 if (err) {
316 lll->is_cte_incomplete = true;
317 }
318 } else {
319 lll->is_cte_incomplete = true;
320 }
321
322 cfg->cte_count = 0;
323 } else {
324 /* If CTE reception is disabled, release additional node allocated to report
325 * insufficient memory for IQ samples.
326 */
327 iq_report_incomplete_release_put(lll);
328 #else
329 } else {
330 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
331 if (IS_ENABLED(CONFIG_BT_CTLR_DF)) {
332 /* Disable CTE reception and sampling in Radio */
333 radio_df_cte_inline_set_enabled(false);
334 }
335 }
336
337 radio_switch_complete_and_disable();
338
339 /* RSSI enable must be called after radio_switch_XXX function because it clears
340 * RADIO->SHORTS register, thus disables all other shortcuts.
341 */
342 radio_rssi_measure();
343
344 radio_isr_set(isr_rx_adv_sync_estab, lll);
345
346 DEBUG_RADIO_START_O(1);
347
348 return 0;
349 }
350
prepare_cb(struct lll_prepare_param * p)351 static int prepare_cb(struct lll_prepare_param *p)
352 {
353 uint16_t event_counter;
354 struct lll_sync *lll;
355 uint8_t chan_idx;
356 int err;
357
358 DEBUG_RADIO_START_O(1);
359
360 lll = p->param;
361
362 /* Calculate the current event latency */
363 lll->skip_event = lll->skip_prepare + p->lazy;
364
365 /* Calculate the current event counter value */
366 event_counter = lll->event_counter + lll->skip_event;
367
368 /* Reset accumulated latencies */
369 lll->skip_prepare = 0U;
370
371 chan_idx = data_channel_calc(lll);
372
373 /* Update event counter to next value */
374 lll->event_counter = (event_counter + 1U);
375
376 err = prepare_cb_common(p, chan_idx);
377 if (err) {
378 DEBUG_RADIO_START_O(1);
379
380 return 0;
381 }
382
383 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
384 struct lll_df_sync_cfg *cfg;
385
386 cfg = lll_df_sync_cfg_latest_get(&lll->df_cfg, NULL);
387
388 if (cfg->is_enabled) {
389 /* In case of call in prepare, new sync event starts hence discard previous
390 * incomplete state.
391 */
392 lll->is_cte_incomplete = false;
393
394 /* Prepare additional node for reporting insufficient IQ report nodes issue */
395 err = lll_df_iq_report_no_resources_prepare(lll);
396 if (!err) {
397 err = lll_df_conf_cte_rx_enable(cfg->slot_durations, cfg->ant_sw_len,
398 cfg->ant_ids, chan_idx,
399 CTE_INFO_IN_PAYLOAD, lll->phy);
400 if (err) {
401 lll->is_cte_incomplete = true;
402 }
403 } else {
404 lll->is_cte_incomplete = true;
405 }
406 cfg->cte_count = 0;
407 } else {
408 /* If CTE reception is disabled, release additional node allocated to report
409 * insufficient memory for IQ samples.
410 */
411 iq_report_incomplete_release_put(lll);
412 }
413 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
414
415 radio_switch_complete_and_disable();
416
417 /* RSSI enable must be called after radio_switch_XXX function because it clears
418 * RADIO->SHORTS register, thus disables all other shortcuts.
419 */
420 radio_rssi_measure();
421
422 radio_isr_set(isr_rx_adv_sync, lll);
423
424 DEBUG_RADIO_START_O(1);
425
426 return 0;
427 }
428
prepare_cb_common(struct lll_prepare_param * p,uint8_t chan_idx)429 static int prepare_cb_common(struct lll_prepare_param *p, uint8_t chan_idx)
430 {
431 struct node_rx_pdu *node_rx;
432 uint32_t ticks_at_event;
433 uint32_t ticks_at_start;
434 uint32_t remainder_us;
435 struct lll_sync *lll;
436 struct ull_hdr *ull;
437 uint32_t remainder;
438 uint32_t hcto;
439 uint32_t ret;
440
441 lll = p->param;
442
443 /* Current window widening */
444 lll->window_widening_event_us += lll->window_widening_prepare_us;
445 lll->window_widening_prepare_us = 0;
446 if (lll->window_widening_event_us > lll->window_widening_max_us) {
447 lll->window_widening_event_us = lll->window_widening_max_us;
448 }
449
450 /* Reset chain PDU being scheduled by lll_sync context */
451 lll->is_aux_sched = 0U;
452
453 /* Initialize Trx count */
454 trx_cnt = 0U;
455
456 /* Start setting up Radio h/w */
457 radio_reset();
458
459 radio_phy_set(lll->phy, PHY_FLAGS_S8);
460 radio_pkt_configure(RADIO_PKT_CONF_LENGTH_8BIT, LL_EXT_OCTETS_RX_MAX,
461 RADIO_PKT_CONF_PHY(lll->phy));
462 radio_aa_set(lll->access_addr);
463 radio_crc_configure(PDU_CRC_POLYNOMIAL,
464 sys_get_le24(lll->crc_init));
465
466 lll_chan_set(chan_idx);
467
468 node_rx = ull_pdu_rx_alloc_peek(1);
469 LL_ASSERT(node_rx);
470
471 radio_pkt_rx_set(node_rx->pdu);
472
473 ticks_at_event = p->ticks_at_expire;
474 ull = HDR_LLL2ULL(lll);
475 ticks_at_event += lll_event_offset_get(ull);
476
477 ticks_at_start = ticks_at_event;
478 ticks_at_start += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
479
480 remainder = p->remainder;
481 remainder_us = radio_tmr_start(0, ticks_at_start, remainder);
482
483 radio_tmr_aa_capture();
484
485 hcto = remainder_us +
486 ((EVENT_JITTER_US + EVENT_TICKER_RES_MARGIN_US + lll->window_widening_event_us)
487 << 1) +
488 lll->window_size_event_us;
489 hcto += radio_rx_ready_delay_get(lll->phy, PHY_FLAGS_S8);
490 hcto += addr_us_get(lll->phy);
491 hcto += radio_rx_chain_delay_get(lll->phy, PHY_FLAGS_S8);
492 radio_tmr_hcto_configure(hcto);
493
494 radio_tmr_end_capture();
495
496 #if defined(HAL_RADIO_GPIO_HAVE_LNA_PIN)
497 radio_gpio_lna_setup();
498
499 radio_gpio_pa_lna_enable(remainder_us +
500 radio_rx_ready_delay_get(lll->phy,
501 PHY_FLAGS_S8) -
502 HAL_RADIO_GPIO_LNA_OFFSET);
503 #endif /* HAL_RADIO_GPIO_HAVE_LNA_PIN */
504
505 #if defined(CONFIG_BT_CTLR_XTAL_ADVANCED) && \
506 (EVENT_OVERHEAD_PREEMPT_US <= EVENT_OVERHEAD_PREEMPT_MIN_US)
507 uint32_t overhead;
508
509 overhead = lll_preempt_calc(ull, (TICKER_ID_SCAN_SYNC_BASE + ull_sync_lll_handle_get(lll)),
510 ticks_at_event);
511 /* check if preempt to start has changed */
512 if (overhead) {
513 LL_ASSERT_OVERHEAD(overhead);
514
515 radio_isr_set(isr_done, lll);
516 radio_disable();
517
518 return -ECANCELED;
519 }
520 #endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
521
522 ret = lll_prepare_done(lll);
523 LL_ASSERT(!ret);
524
525 DEBUG_RADIO_START_O(1);
526
527 return 0;
528 }
529
is_abort_cb(void * next,void * curr,lll_prepare_cb_t * resume_cb)530 static int is_abort_cb(void *next, void *curr, lll_prepare_cb_t *resume_cb)
531 {
532 /* Sync context shall not resume when being preempted, i.e. they
533 * shall not use -EAGAIN as return value.
534 */
535 ARG_UNUSED(resume_cb);
536
537 /* Different radio event overlap */
538 if (next != curr) {
539 struct lll_scan_aux *lll_aux;
540 struct lll_scan *lll;
541
542 lll = ull_scan_lll_is_valid_get(next);
543 if (lll) {
544 /* Do not abort current periodic sync event as next
545 * event is a scan event.
546 */
547 return 0;
548 }
549
550 lll_aux = ull_scan_aux_lll_is_valid_get(next);
551 if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_SKIP_ON_SCAN_AUX) &&
552 lll_aux) {
553 /* Do not abort current periodic sync event as next
554 * event is a scan aux event.
555 */
556 return 0;
557 }
558
559 #if defined(CONFIG_BT_CTLR_SCAN_AUX_SYNC_RESERVE_MIN)
560 struct lll_sync *lll_sync_next;
561 struct lll_sync *lll_sync_curr;
562
563 lll_sync_next = ull_sync_lll_is_valid_get(next);
564 if (!lll_sync_next) {
565 /* Abort current event as next event is not a
566 * scan and not a scan aux event.
567 */
568 return -ECANCELED;
569 }
570
571 lll_sync_curr = curr;
572 if (lll_sync_curr->abort_count < lll_sync_next->abort_count) {
573 if (lll_sync_curr->abort_count < UINT8_MAX) {
574 lll_sync_curr->abort_count++;
575 }
576
577 /* Abort current event as next event has higher abort
578 * count.
579 */
580 return -ECANCELED;
581 }
582
583 if (lll_sync_next->abort_count < UINT8_MAX) {
584 lll_sync_next->abort_count++;
585 }
586
587 #else /* !CONFIG_BT_CTLR_SCAN_AUX_SYNC_RESERVE_MIN */
588 /* Abort current event as next event is not a
589 * scan and not a scan aux event.
590 */
591 return -ECANCELED;
592 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_SYNC_RESERVE_MIN */
593 }
594
595 /* Do not abort if current periodic sync event overlaps next interval
596 * or next event is a scan event.
597 */
598 return 0;
599 }
600
abort_cb(struct lll_prepare_param * prepare_param,void * param)601 static void abort_cb(struct lll_prepare_param *prepare_param, void *param)
602 {
603 struct event_done_extra *e;
604 struct lll_sync *lll;
605 int err;
606
607 /* NOTE: This is not a prepare being cancelled */
608 if (!prepare_param) {
609 /* Perform event abort here.
610 * After event has been cleanly aborted, clean up resources
611 * and dispatch event done.
612 */
613 radio_isr_set(isr_done, param);
614 radio_disable();
615
616 return;
617 }
618
619 /* NOTE: Else clean the top half preparations of the aborted event
620 * currently in preparation pipeline.
621 */
622 err = lll_hfclock_off();
623 LL_ASSERT(err >= 0);
624
625 /* Accumulate the latency as event is aborted while being in pipeline */
626 lll = prepare_param->param;
627 lll->skip_prepare += (prepare_param->lazy + 1U);
628
629 /* Extra done event, to check sync lost */
630 e = ull_event_done_extra_get();
631 LL_ASSERT(e);
632
633 e->type = EVENT_DONE_EXTRA_TYPE_SYNC;
634 e->trx_cnt = 0U;
635 e->crc_valid = 0U;
636 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
637 defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
638 e->sync_term = 0U;
639 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING &&
640 * CONFIG_BT_CTLR_CTEINLINE_SUPPORT
641 */
642
643 lll_done(param);
644 }
645
isr_aux_setup(void * param)646 static void isr_aux_setup(void *param)
647 {
648 struct pdu_adv_aux_ptr *aux_ptr;
649 struct node_rx_pdu *node_rx;
650 uint32_t window_widening_us;
651 uint32_t window_size_us;
652 struct node_rx_ftr *ftr;
653 uint32_t aux_offset_us;
654 uint32_t aux_start_us;
655 struct lll_sync *lll;
656 uint32_t start_us;
657 uint8_t phy_aux;
658 uint32_t hcto;
659
660 lll_isr_status_reset();
661
662 node_rx = param;
663 ftr = &node_rx->hdr.rx_ftr;
664 aux_ptr = ftr->aux_ptr;
665 phy_aux = BIT(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
666 ftr->aux_phy = phy_aux;
667
668 lll = ftr->param;
669
670 /* Determine the window size */
671 if (aux_ptr->offs_units) {
672 window_size_us = OFFS_UNIT_300_US;
673 } else {
674 window_size_us = OFFS_UNIT_30_US;
675 }
676
677 /* Calculate the aux offset from start of the scan window */
678 aux_offset_us = (uint32_t) PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr) * window_size_us;
679
680 /* Calculate the window widening that needs to be deducted */
681 if (aux_ptr->ca) {
682 window_widening_us = SCA_DRIFT_50_PPM_US(aux_offset_us);
683 } else {
684 window_widening_us = SCA_DRIFT_500_PPM_US(aux_offset_us);
685 }
686
687 /* Setup radio for auxiliary PDU scan */
688 radio_phy_set(phy_aux, PHY_FLAGS_S8);
689 radio_pkt_configure(RADIO_PKT_CONF_LENGTH_8BIT, LL_EXT_OCTETS_RX_MAX,
690 RADIO_PKT_CONF_PHY(phy_aux));
691
692 lll_chan_set(aux_ptr->chan_idx);
693
694 radio_pkt_rx_set(node_rx->pdu);
695
696 radio_isr_set(isr_rx_aux_chain, lll);
697
698 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
699 struct lll_df_sync_cfg *cfg;
700
701 cfg = lll_df_sync_cfg_latest_get(&lll->df_cfg, NULL);
702
703 if (cfg->is_enabled && is_max_cte_reached(cfg->max_cte_count, cfg->cte_count)) {
704 int err;
705
706 /* Prepare additional node for reporting insufficient memory for IQ samples
707 * reports.
708 */
709 err = lll_df_iq_report_no_resources_prepare(lll);
710 if (!err) {
711 err = lll_df_conf_cte_rx_enable(cfg->slot_durations,
712 cfg->ant_sw_len,
713 cfg->ant_ids,
714 aux_ptr->chan_idx,
715 CTE_INFO_IN_PAYLOAD,
716 PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
717 if (err) {
718 lll->is_cte_incomplete = true;
719 }
720 } else {
721 lll->is_cte_incomplete = true;
722 }
723 } else if (!cfg->is_enabled) {
724 /* If CTE reception is disabled, release additional node allocated to report
725 * insufficient memory for IQ samples.
726 */
727 iq_report_incomplete_release_put(lll);
728 }
729 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
730 radio_switch_complete_and_disable();
731
732 /* Setup radio rx on micro second offset. Note that radio_end_us stores
733 * PDU start time in this case.
734 */
735 aux_start_us = ftr->radio_end_us + aux_offset_us;
736 aux_start_us -= lll_radio_rx_ready_delay_get(phy_aux, PHY_FLAGS_S8);
737 aux_start_us -= window_widening_us;
738 aux_start_us -= EVENT_JITTER_US;
739
740 start_us = radio_tmr_start_us(0, aux_start_us);
741
742 /* Setup header complete timeout */
743 hcto = start_us;
744 hcto += EVENT_JITTER_US;
745 hcto += window_widening_us;
746 hcto += lll_radio_rx_ready_delay_get(phy_aux, PHY_FLAGS_S8);
747 hcto += window_size_us;
748 hcto += radio_rx_chain_delay_get(phy_aux, PHY_FLAGS_S8);
749 hcto += addr_us_get(phy_aux);
750 radio_tmr_hcto_configure(hcto);
751
752 /* capture end of Rx-ed PDU, extended scan to schedule auxiliary
753 * channel chaining, create connection or to create periodic sync.
754 */
755 radio_tmr_end_capture();
756
757 /* scanner always measures RSSI */
758 radio_rssi_measure();
759
760 #if defined(HAL_RADIO_GPIO_HAVE_LNA_PIN)
761 radio_gpio_lna_setup();
762
763 radio_gpio_pa_lna_enable(start_us +
764 radio_rx_ready_delay_get(phy_aux,
765 PHY_FLAGS_S8) -
766 HAL_RADIO_GPIO_LNA_OFFSET);
767 #endif /* HAL_RADIO_GPIO_HAVE_LNA_PIN */
768 }
769
770 /**
771 * @brief Common part of ISR responsible for handling PDU receive.
772 *
773 * @param lll Pointer to LLL sync object.
774 * @param node_type Type of a receive node to be set for handling by ULL.
775 * @param crc_ok Informs if received PDU has correct CRC.
776 * @param phy_flags_rx Received Coded PHY coding scheme (0 - S1, 1 - S8).
777 * @param cte_ready Informs if received PDU has CTEInfo present and IQ samples were collected.
778 * @param rssi_ready Informs if RSSI for received PDU is ready.
779 * @param status Informs about periodic advertisement synchronization status.
780 *
781 * @return Zero in case of there is no chained PDU or there is a chained PDUs but spaced long enough
782 * to schedule its reception by ULL.
783 * @return -EBUSY in case there is a chained PDU scheduled by LLL due to short spacing.
784 */
isr_rx(struct lll_sync * lll,uint8_t node_type,uint8_t crc_ok,uint8_t phy_flags_rx,uint8_t cte_ready,uint8_t rssi_ready,enum sync_status status)785 static int isr_rx(struct lll_sync *lll, uint8_t node_type, uint8_t crc_ok,
786 uint8_t phy_flags_rx, uint8_t cte_ready, uint8_t rssi_ready,
787 enum sync_status status)
788 {
789 bool sched = false;
790 int err;
791
792 /* Check CRC and generate Periodic Advertising Report */
793 if (crc_ok) {
794 struct node_rx_pdu *node_rx;
795
796 /* Verify if there are free RX buffers for:
797 * - reporting just received PDU
798 * - allocating an extra node_rx for periodic report incomplete
799 * - a buffer for receiving data in a connection
800 * - a buffer for receiving empty PDU
801 *
802 * If this is a reception of chained PDU, node_type is
803 * NODE_RX_TYPE_EXT_AUX_REPORT, then there is no need to reserve
804 * again a node_rx for periodic report incomplete.
805 */
806 if (node_type != NODE_RX_TYPE_EXT_AUX_REPORT) {
807 node_rx = ull_pdu_rx_alloc_peek(4);
808 } else {
809 node_rx = ull_pdu_rx_alloc_peek(3);
810 }
811
812 if (node_rx) {
813 struct node_rx_ftr *ftr;
814 struct pdu_adv *pdu;
815
816 ull_pdu_rx_alloc();
817
818 node_rx->hdr.type = node_type;
819
820 ftr = &(node_rx->hdr.rx_ftr);
821 ftr->param = lll;
822 ftr->aux_failed = 0U;
823 ftr->rssi = (rssi_ready) ? radio_rssi_get() :
824 BT_HCI_LE_RSSI_NOT_AVAILABLE;
825 ftr->ticks_anchor = radio_tmr_start_get();
826 ftr->radio_end_us = radio_tmr_end_get() -
827 radio_rx_chain_delay_get(lll->phy,
828 phy_flags_rx);
829 ftr->phy_flags = phy_flags_rx;
830 ftr->sync_status = status;
831 ftr->sync_rx_enabled = lll->is_rx_enabled;
832
833 if (node_type != NODE_RX_TYPE_EXT_AUX_REPORT) {
834 ftr->extra = ull_pdu_rx_alloc();
835 }
836
837 pdu = (void *)node_rx->pdu;
838
839 ftr->aux_lll_sched = lll_scan_aux_setup(pdu, lll->phy,
840 phy_flags_rx,
841 isr_aux_setup,
842 lll);
843 if (ftr->aux_lll_sched) {
844 if (node_type != NODE_RX_TYPE_EXT_AUX_REPORT) {
845 lll->is_aux_sched = 1U;
846 }
847
848 err = -EBUSY;
849 } else {
850 err = 0;
851 }
852
853 ull_rx_put(node_rx->hdr.link, node_rx);
854
855 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
856 if (cte_ready) {
857 /* If there is a periodic advertising report generate IQ data
858 * report with valid packet_status if there were free nodes for
859 * that. Or report insufficient resources for IQ data report.
860 *
861 * Retunred value is not checked because it does not matter if there
862 * is a IQ report to be send towards ULL. There is always periodic
863 * sync report to be send.
864 */
865 (void)iq_report_create_put(lll, rssi_ready, BT_HCI_LE_CTE_CRC_OK);
866 }
867 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
868
869 sched = true;
870 } else {
871 if (node_type == NODE_RX_TYPE_EXT_AUX_REPORT) {
872 err = -ENOMEM;
873 } else {
874 err = 0;
875 }
876 }
877 } else {
878 #if defined(CONFIG_BT_CTLR_DF_SAMPLE_CTE_FOR_PDU_WITH_BAD_CRC)
879 /* In case of reception of chained PDUs IQ samples report for a PDU with wrong
880 * CRC is handled by caller. It has to be that way to be sure the IQ report
881 * follows possible periodic advertising report.
882 */
883 if (cte_ready && node_type != NODE_RX_TYPE_EXT_AUX_REPORT) {
884 err = iq_report_create_put(lll, rssi_ready,
885 BT_HCI_LE_CTE_CRC_ERR_CTE_BASED_TIME);
886 if (!err) {
887 sched = true;
888 }
889 }
890 #endif /* CONFIG_BT_CTLR_DF_SAMPLE_CTE_FOR_PDU_WITH_BAD_CRC */
891
892 err = 0;
893 }
894
895 if (sched) {
896 ull_rx_sched();
897 }
898
899 return err;
900 }
901
isr_rx_adv_sync_estab(void * param)902 static void isr_rx_adv_sync_estab(void *param)
903 {
904 enum sync_status sync_ok;
905 struct lll_sync *lll;
906 uint8_t phy_flags_rx;
907 uint8_t rssi_ready;
908 uint8_t cte_ready;
909 uint8_t trx_done;
910 uint8_t crc_ok;
911 int err;
912
913 lll = param;
914
915 /* Read radio status and events */
916 trx_done = radio_is_done();
917 if (trx_done) {
918 crc_ok = radio_crc_is_valid();
919 rssi_ready = radio_rssi_is_ready();
920 phy_flags_rx = radio_phy_flags_rx_get();
921 sync_ok = sync_filtrate_by_cte_type(lll->cte_type, lll->filter_policy);
922 trx_cnt = 1U;
923
924 if (IS_ENABLED(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)) {
925 cte_ready = radio_df_cte_ready();
926 } else {
927 cte_ready = 0U;
928 }
929 } else {
930 crc_ok = phy_flags_rx = rssi_ready = cte_ready = 0U;
931 /* Initiated as allowed, crc_ok takes precended during handling of PDU
932 * reception in the situation.
933 */
934 sync_ok = SYNC_STAT_ALLOWED;
935 }
936
937 /* Clear radio rx status and events */
938 lll_isr_rx_status_reset();
939
940 /* No Rx */
941 if (!trx_done) {
942 /* TODO: Combine the early exit with above if-then-else block
943 */
944 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
945 LL_ASSERT(!lll->node_cte_incomplete);
946 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
947
948 goto isr_rx_done;
949 }
950
951 /* Save radio ready and address capture timestamp for later use for
952 * drift compensation.
953 */
954 radio_tmr_aa_save(radio_tmr_aa_get());
955 radio_tmr_ready_save(radio_tmr_ready_get());
956
957 /* Handle regular PDU reception if CTE type is acceptable */
958 if (sync_ok == SYNC_STAT_ALLOWED) {
959 err = isr_rx(lll, NODE_RX_TYPE_SYNC, crc_ok, phy_flags_rx,
960 cte_ready, rssi_ready, SYNC_STAT_ALLOWED);
961 if (err == -EBUSY) {
962 return;
963 }
964 } else if (sync_ok == SYNC_STAT_TERM) {
965 struct node_rx_pdu *node_rx;
966
967 /* Verify if there are free RX buffers for:
968 * - reporting just received PDU
969 * - a buffer for receiving data in a connection
970 * - a buffer for receiving empty PDU
971 */
972 node_rx = ull_pdu_rx_alloc_peek(3);
973 if (node_rx) {
974 struct node_rx_ftr *ftr;
975
976 ull_pdu_rx_alloc();
977
978 node_rx->hdr.type = NODE_RX_TYPE_SYNC;
979
980 ftr = &node_rx->hdr.rx_ftr;
981 ftr->param = lll;
982 ftr->sync_status = SYNC_STAT_TERM;
983
984 ull_rx_put_sched(node_rx->hdr.link, node_rx);
985 }
986 }
987
988 isr_rx_done:
989 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
990 defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
991 isr_rx_done_cleanup(lll, crc_ok, sync_ok != SYNC_STAT_ALLOWED);
992 #else
993 isr_rx_done_cleanup(lll, crc_ok, false);
994 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
995 }
996
isr_rx_adv_sync(void * param)997 static void isr_rx_adv_sync(void *param)
998 {
999 struct lll_sync *lll;
1000 uint8_t phy_flags_rx;
1001 uint8_t rssi_ready;
1002 uint8_t cte_ready;
1003 uint8_t trx_done;
1004 uint8_t crc_ok;
1005 int err;
1006
1007 lll = param;
1008
1009 /* Read radio status and events */
1010 trx_done = radio_is_done();
1011 if (trx_done) {
1012 crc_ok = radio_crc_is_valid();
1013 rssi_ready = radio_rssi_is_ready();
1014 phy_flags_rx = radio_phy_flags_rx_get();
1015 trx_cnt = 1U;
1016
1017 if (IS_ENABLED(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)) {
1018 cte_ready = radio_df_cte_ready();
1019 } else {
1020 cte_ready = 0U;
1021 }
1022 } else {
1023 crc_ok = phy_flags_rx = rssi_ready = cte_ready = 0U;
1024 }
1025
1026 /* Clear radio rx status and events */
1027 lll_isr_rx_status_reset();
1028
1029 /* No Rx */
1030 if (!trx_done) {
1031 /* TODO: Combine the early exit with above if-then-else block
1032 */
1033 goto isr_rx_done;
1034 }
1035
1036 /* Save radio ready and address capture timestamp for later use for
1037 * drift compensation.
1038 */
1039 radio_tmr_aa_save(radio_tmr_aa_get());
1040 radio_tmr_ready_save(radio_tmr_ready_get());
1041
1042 /* When periodic advertisement is synchronized, the CTEType may change. It should not
1043 * affect synchronization even when new CTE type is not allowed by sync parameters.
1044 * Hence the SYNC_STAT_READY is set.
1045 */
1046 err = isr_rx(lll, NODE_RX_TYPE_SYNC_REPORT, crc_ok, phy_flags_rx, cte_ready, rssi_ready,
1047 SYNC_STAT_READY);
1048 if (err == -EBUSY) {
1049 return;
1050 }
1051
1052 isr_rx_done:
1053 isr_rx_done_cleanup(lll, crc_ok, false);
1054 }
1055
isr_rx_aux_chain(void * param)1056 static void isr_rx_aux_chain(void *param)
1057 {
1058 struct lll_scan_aux *lll_aux;
1059 struct lll_sync *lll;
1060 uint8_t phy_flags_rx;
1061 uint8_t rssi_ready;
1062 uint8_t cte_ready;
1063 uint8_t trx_done;
1064 uint8_t crc_ok;
1065 int err;
1066
1067 lll = param;
1068 lll_aux = lll->lll_aux;
1069 if (!lll_aux) {
1070 /* auxiliary context not assigned (yet) in ULL execution
1071 * context, drop current reception and abort further chain PDU
1072 * receptions, if any.
1073 */
1074 lll_isr_status_reset();
1075
1076 rssi_ready = 0U;
1077 cte_ready = 0U;
1078 crc_ok = 0U;
1079 err = 0;
1080
1081 goto isr_rx_aux_chain_done;
1082 }
1083
1084 /* Read radio status and events */
1085 trx_done = radio_is_done();
1086 if (trx_done) {
1087 crc_ok = radio_crc_is_valid();
1088 phy_flags_rx = radio_phy_flags_rx_get();
1089 rssi_ready = radio_rssi_is_ready();
1090
1091 if (IS_ENABLED(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)) {
1092 cte_ready = radio_df_cte_ready();
1093 } else {
1094 cte_ready = 0U;
1095 }
1096 } else {
1097 crc_ok = phy_flags_rx = rssi_ready = cte_ready = 0U;
1098 }
1099
1100 /* Clear radio rx status and events */
1101 lll_isr_rx_status_reset();
1102
1103 /* No Rx */
1104 if (!trx_done) {
1105 /* TODO: Combine the early exit with above if-then-else block
1106 */
1107
1108 err = 0;
1109
1110 goto isr_rx_aux_chain_done;
1111 }
1112
1113 /* When periodic advertisement is synchronized, the CTEType may change. It should not
1114 * affect synchronization even when new CTE type is not allowed by sync parameters.
1115 * Hence the SYNC_STAT_READY is set.
1116 */
1117 err = isr_rx(lll, NODE_RX_TYPE_EXT_AUX_REPORT, crc_ok, phy_flags_rx, cte_ready, rssi_ready,
1118 SYNC_STAT_READY);
1119 if (err == -EBUSY) {
1120 return;
1121 }
1122
1123 isr_rx_aux_chain_done:
1124 if (!crc_ok || err) {
1125 struct node_rx_pdu *node_rx;
1126
1127 /* Generate message to release aux context and flag the report
1128 * generated thereafter by HCI as incomplete.
1129 */
1130 node_rx = ull_pdu_rx_alloc();
1131 LL_ASSERT(node_rx);
1132
1133 node_rx->hdr.type = NODE_RX_TYPE_EXT_AUX_RELEASE;
1134
1135 node_rx->hdr.rx_ftr.param = lll;
1136 node_rx->hdr.rx_ftr.aux_failed = 1U;
1137
1138 ull_rx_put(node_rx->hdr.link, node_rx);
1139
1140 if (!crc_ok) {
1141 #if defined(CONFIG_BT_CTLR_DF_SAMPLE_CTE_FOR_PDU_WITH_BAD_CRC)
1142 if (cte_ready) {
1143 (void)iq_report_create_put(lll, rssi_ready,
1144 BT_HCI_LE_CTE_CRC_ERR_CTE_BASED_TIME);
1145 }
1146 #endif /* CONFIG_BT_CTLR_DF_SAMPLE_CTE_FOR_PDU_WITH_BAD_CRC */
1147 } else {
1148 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1149 /* Report insufficient resurces for IQ data report and relese additional
1150 * noder_rx_iq_data stored in lll_sync object, to vaoid buffers leakage.
1151 */
1152 iq_report_incomplete_create_put(lll);
1153 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1154 }
1155
1156 ull_rx_sched();
1157 }
1158
1159 if (lll->is_aux_sched) {
1160 lll->is_aux_sched = 0U;
1161
1162 isr_rx_done_cleanup(lll, 1U, false);
1163 } else {
1164 lll_isr_cleanup(lll_aux);
1165 }
1166 }
1167
isr_rx_done_cleanup(struct lll_sync * lll,uint8_t crc_ok,bool sync_term)1168 static void isr_rx_done_cleanup(struct lll_sync *lll, uint8_t crc_ok, bool sync_term)
1169 {
1170 struct event_done_extra *e;
1171
1172 /* Calculate and place the drift information in done event */
1173 e = ull_event_done_extra_get();
1174 LL_ASSERT(e);
1175
1176 e->type = EVENT_DONE_EXTRA_TYPE_SYNC;
1177 e->trx_cnt = trx_cnt;
1178 e->crc_valid = crc_ok;
1179 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
1180 defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1181 e->sync_term = sync_term;
1182 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1183 if (trx_cnt) {
1184 e->drift.preamble_to_addr_us = addr_us_get(lll->phy);
1185 e->drift.start_to_address_actual_us =
1186 radio_tmr_aa_restore() - radio_tmr_ready_restore();
1187 e->drift.window_widening_event_us = lll->window_widening_event_us;
1188
1189 /* Reset window widening, as anchor point sync-ed */
1190 lll->window_widening_event_us = 0U;
1191 lll->window_size_event_us = 0U;
1192
1193 #if defined(CONFIG_BT_CTLR_SCAN_AUX_SYNC_RESERVE_MIN)
1194 /* Reset LLL abort count as LLL event is gracefully done and
1195 * was not aborted by any other event when current event could
1196 * have been using unreserved time space.
1197 */
1198 lll->abort_count = 0U;
1199 #endif /* CONFIG_BT_CTLR_SCAN_AUX_SYNC_RESERVE_MIN */
1200 }
1201
1202 lll_isr_cleanup(lll);
1203 }
1204
isr_done(void * param)1205 static void isr_done(void *param)
1206 {
1207 struct lll_sync *lll;
1208
1209 lll_isr_status_reset();
1210
1211 /* Generate incomplete data status and release aux context when
1212 * sync event is using LLL scheduling.
1213 */
1214 lll = param;
1215
1216 /* LLL scheduling used for chain PDU reception is aborted/preempted */
1217 if (lll->is_aux_sched) {
1218 struct node_rx_pdu *node_rx;
1219
1220 lll->is_aux_sched = 0U;
1221
1222 /* Generate message to release aux context and flag the report
1223 * generated thereafter by HCI as incomplete.
1224 */
1225 node_rx = ull_pdu_rx_alloc();
1226 LL_ASSERT(node_rx);
1227
1228 node_rx->hdr.type = NODE_RX_TYPE_EXT_AUX_RELEASE;
1229
1230 node_rx->hdr.rx_ftr.param = lll;
1231 node_rx->hdr.rx_ftr.aux_failed = 1U;
1232
1233 ull_rx_put_sched(node_rx->hdr.link, node_rx);
1234 }
1235
1236 isr_rx_done_cleanup(param, ((trx_cnt) ? 1U : 0U), false);
1237 }
1238
1239 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
iq_report_create(struct lll_sync * lll,uint8_t rssi_ready,uint8_t packet_status,uint8_t slot_durations,struct node_rx_iq_report * iq_report)1240 static void iq_report_create(struct lll_sync *lll, uint8_t rssi_ready, uint8_t packet_status,
1241 uint8_t slot_durations, struct node_rx_iq_report *iq_report)
1242 {
1243 struct node_rx_ftr *ftr;
1244 uint8_t cte_info;
1245 uint8_t ant;
1246
1247 cte_info = radio_df_cte_status_get();
1248 ant = radio_df_pdu_antenna_switch_pattern_get();
1249
1250 iq_report->hdr.type = NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT;
1251 iq_report->sample_count = radio_df_iq_samples_amount_get();
1252 iq_report->packet_status = packet_status;
1253 iq_report->rssi_ant_id = ant;
1254 iq_report->cte_info = *(struct pdu_cte_info *)&cte_info;
1255 iq_report->local_slot_durations = slot_durations;
1256 /* Event counter is updated to next value during event preparation, hence
1257 * it has to be subtracted to store actual event counter value.
1258 */
1259 iq_report->event_counter = lll->event_counter - 1;
1260
1261 ftr = &iq_report->hdr.rx_ftr;
1262 ftr->param = lll;
1263 ftr->rssi =
1264 ((rssi_ready) ? radio_rssi_get() : BT_HCI_LE_RSSI_NOT_AVAILABLE);
1265 }
1266
iq_report_incomplete_create(struct lll_sync * lll,struct node_rx_iq_report * iq_report)1267 static void iq_report_incomplete_create(struct lll_sync *lll, struct node_rx_iq_report *iq_report)
1268 {
1269 struct node_rx_ftr *ftr;
1270
1271 iq_report->hdr.type = NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT;
1272 iq_report->sample_count = 0;
1273 iq_report->packet_status = BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES;
1274 /* Event counter is updated to next value during event preparation,
1275 * hence it has to be subtracted to store actual event counter
1276 * value.
1277 */
1278 iq_report->event_counter = lll->event_counter - 1;
1279 /* The PDU antenna is set in configuration, hence it is always
1280 * available. BT 5.3 Core Spec. does not say if this field
1281 * may be invalid in case of insufficient resources.
1282 */
1283 iq_report->rssi_ant_id = radio_df_pdu_antenna_switch_pattern_get();
1284 /* Accodring to BT 5.3, Vol 4, Part E, section 7.7.65.21 below
1285 * fields have invalid values in case of insufficient resources.
1286 */
1287 iq_report->cte_info =
1288 (struct pdu_cte_info){.time = 0, .rfu = 0, .type = 0};
1289 iq_report->local_slot_durations = 0;
1290
1291 ftr = &iq_report->hdr.rx_ftr;
1292 ftr->param = lll;
1293
1294 ftr->rssi = BT_HCI_LE_RSSI_NOT_AVAILABLE;
1295 ftr->extra = NULL;
1296 }
1297
iq_report_create_put(struct lll_sync * lll,uint8_t rssi_ready,uint8_t packet_status)1298 static int iq_report_create_put(struct lll_sync *lll, uint8_t rssi_ready, uint8_t packet_status)
1299 {
1300 struct node_rx_iq_report *iq_report;
1301 struct lll_df_sync_cfg *cfg;
1302 int err;
1303
1304 cfg = lll_df_sync_cfg_curr_get(&lll->df_cfg);
1305
1306 if (cfg->is_enabled) {
1307 if (!lll->is_cte_incomplete &&
1308 is_max_cte_reached(cfg->max_cte_count, cfg->cte_count)) {
1309 iq_report = ull_df_iq_report_alloc();
1310 LL_ASSERT(iq_report);
1311
1312 iq_report_create(lll, rssi_ready, packet_status,
1313 cfg->slot_durations, iq_report);
1314 err = 0;
1315 } else if (lll->is_cte_incomplete && is_max_cte_reached(cfg->max_cte_count,
1316 cfg->cte_count)) {
1317 iq_report = lll->node_cte_incomplete;
1318
1319 /* Reception of chained PDUs may be still in progress. Do not report
1320 * insufficient resources multiple times.
1321 */
1322 if (iq_report) {
1323 iq_report_incomplete_create(lll, iq_report);
1324 lll->node_cte_incomplete = NULL;
1325
1326 /* Report ready to be send to ULL */
1327 err = 0;
1328 } else {
1329 /* Incomplete CTE was already reported */
1330 err = -ENODATA;
1331 }
1332 } else {
1333 err = -ENODATA;
1334 }
1335 } else {
1336 err = -ENODATA;
1337 }
1338
1339 if (!err) {
1340 ull_rx_put(iq_report->hdr.link, iq_report);
1341
1342 cfg->cte_count += 1U;
1343 }
1344
1345 return err;
1346 }
1347
iq_report_incomplete_create_put(struct lll_sync * lll)1348 static int iq_report_incomplete_create_put(struct lll_sync *lll)
1349 {
1350 struct lll_df_sync_cfg *cfg;
1351
1352 cfg = lll_df_sync_cfg_curr_get(&lll->df_cfg);
1353
1354 if (cfg->is_enabled) {
1355 struct node_rx_iq_report *iq_report;
1356
1357 iq_report = lll->node_cte_incomplete;
1358
1359 /* Reception of chained PDUs may be still in progress. Do not report
1360 * insufficient resources multiple times.
1361 */
1362 if (iq_report) {
1363 iq_report_incomplete_create(lll, iq_report);
1364
1365 lll->node_cte_incomplete = NULL;
1366 ull_rx_put(iq_report->hdr.link, iq_report);
1367
1368 return 0;
1369 } else {
1370 /* Incomplete CTE was already reported */
1371 return -ENODATA;
1372 }
1373
1374 }
1375
1376 return -ENODATA;
1377 }
1378
iq_report_incomplete_release_put(struct lll_sync * lll)1379 static void iq_report_incomplete_release_put(struct lll_sync *lll)
1380 {
1381 if (lll->node_cte_incomplete) {
1382 struct node_rx_iq_report *iq_report = lll->node_cte_incomplete;
1383
1384 iq_report->hdr.type = NODE_RX_TYPE_IQ_SAMPLE_REPORT_LLL_RELEASE;
1385
1386 ull_rx_put(iq_report->hdr.link, iq_report);
1387 lll->node_cte_incomplete = NULL;
1388 }
1389 }
is_max_cte_reached(uint8_t max_cte_count,uint8_t cte_count)1390 static bool is_max_cte_reached(uint8_t max_cte_count, uint8_t cte_count)
1391 {
1392 return max_cte_count == BT_HCI_LE_SAMPLE_CTE_ALL || cte_count < max_cte_count;
1393 }
1394 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1395
data_channel_calc(struct lll_sync * lll)1396 static uint8_t data_channel_calc(struct lll_sync *lll)
1397 {
1398 uint8_t data_chan_count;
1399 uint8_t *data_chan_map;
1400
1401 /* Process channel map update, if any */
1402 if (lll->chm_first != lll->chm_last) {
1403 uint16_t instant_latency;
1404
1405 instant_latency = (lll->event_counter + lll->skip_event - lll->chm_instant) &
1406 EVENT_INSTANT_MAX;
1407 if (instant_latency <= EVENT_INSTANT_LATENCY_MAX) {
1408 /* At or past the instant, use channelMapNew */
1409 lll->chm_first = lll->chm_last;
1410 }
1411 }
1412
1413 /* Calculate the radio channel to use */
1414 data_chan_map = lll->chm[lll->chm_first].data_chan_map;
1415 data_chan_count = lll->chm[lll->chm_first].data_chan_count;
1416 return lll_chan_sel_2(lll->event_counter + lll->skip_event, lll->data_chan_id,
1417 data_chan_map, data_chan_count);
1418 }
1419
sync_filtrate_by_cte_type(uint8_t cte_type_mask,uint8_t filter_policy)1420 static enum sync_status sync_filtrate_by_cte_type(uint8_t cte_type_mask, uint8_t filter_policy)
1421 {
1422 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
1423 defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1424 uint8_t rx_cte_time;
1425 uint8_t rx_cte_type;
1426
1427 rx_cte_time = nrf_radio_cte_time_get(NRF_RADIO);
1428 rx_cte_type = nrf_radio_cte_type_get(NRF_RADIO);
1429
1430 return lll_sync_cte_is_allowed(cte_type_mask, filter_policy, rx_cte_time, rx_cte_type);
1431
1432 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1433 return SYNC_STAT_ALLOWED;
1434 }
1435