1 /*
2  * Copyright (c) 2020 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <stdint.h>
7 
8 #include <zephyr/toolchain.h>
9 #include <zephyr/sys/util.h>
10 #include <zephyr/sys/byteorder.h>
11 
12 #include "hal/ccm.h"
13 #include "hal/radio.h"
14 #include "hal/ticker.h"
15 #include "hal/radio_df.h"
16 
17 #include "util/util.h"
18 #include "util/memq.h"
19 #include "util/dbuf.h"
20 
21 #include "pdu_df.h"
22 #include "pdu_vendor.h"
23 #include "pdu.h"
24 
25 #include "lll.h"
26 #include "lll_vendor.h"
27 #include "lll_clock.h"
28 #include "lll_chan.h"
29 #include "lll_df_types.h"
30 #include "lll_scan.h"
31 #include "lll_sync.h"
32 
33 #include "lll_internal.h"
34 #include "lll_tim_internal.h"
35 #include "lll_prof_internal.h"
36 #include "lll_scan_internal.h"
37 
38 #include "lll_df.h"
39 #include "lll_df_internal.h"
40 
41 #include "ll_feat.h"
42 
43 #include <zephyr/bluetooth/hci_types.h>
44 
45 #include <soc.h>
46 #include "hal/debug.h"
47 
48 static int init_reset(void);
49 static void prepare(void *param);
50 static int create_prepare_cb(struct lll_prepare_param *p);
51 static int prepare_cb(struct lll_prepare_param *p);
52 static int prepare_cb_common(struct lll_prepare_param *p, uint8_t chan_idx);
53 static int is_abort_cb(void *next, void *curr, lll_prepare_cb_t *resume_cb);
54 static void abort_cb(struct lll_prepare_param *prepare_param, void *param);
55 static int isr_rx(struct lll_sync *lll, uint8_t node_type, uint8_t crc_ok,
56 		  uint8_t phy_flags_rx, uint8_t cte_ready, uint8_t rssi_ready,
57 		  enum sync_status status);
58 static void isr_rx_adv_sync_estab(void *param);
59 static void isr_rx_adv_sync(void *param);
60 static void isr_rx_aux_chain(void *param);
61 static void isr_rx_done_cleanup(struct lll_sync *lll, uint8_t crc_ok, bool sync_term);
62 static void isr_done(void *param);
63 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
64 static int iq_report_create_put(struct lll_sync *lll, uint8_t rssi_ready,
65 				uint8_t packet_status);
66 static int iq_report_incomplete_create_put(struct lll_sync *lll);
67 static void iq_report_incomplete_release_put(struct lll_sync *lll);
68 static bool is_max_cte_reached(uint8_t max_cte_count, uint8_t cte_count);
69 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
70 static uint8_t data_channel_calc(struct lll_sync *lll);
71 static enum sync_status sync_filtrate_by_cte_type(uint8_t cte_type_mask, uint8_t filter_policy);
72 
73 static uint8_t trx_cnt;
74 
lll_sync_init(void)75 int lll_sync_init(void)
76 {
77 	int err;
78 
79 	err = init_reset();
80 	if (err) {
81 		return err;
82 	}
83 
84 	return 0;
85 }
86 
lll_sync_reset(void)87 int lll_sync_reset(void)
88 {
89 	int err;
90 
91 	err = init_reset();
92 	if (err) {
93 		return err;
94 	}
95 
96 	return 0;
97 }
98 
lll_sync_create_prepare(void * param)99 void lll_sync_create_prepare(void *param)
100 {
101 	int err;
102 
103 	prepare(param);
104 
105 	/* Invoke common pipeline handling of prepare */
106 	err = lll_prepare(is_abort_cb, abort_cb, create_prepare_cb, 0, param);
107 	LL_ASSERT(!err || err == -EINPROGRESS);
108 }
109 
lll_sync_prepare(void * param)110 void lll_sync_prepare(void *param)
111 {
112 	int err;
113 
114 	prepare(param);
115 
116 	/* Invoke common pipeline handling of prepare */
117 	err = lll_prepare(is_abort_cb, abort_cb, prepare_cb, 0, param);
118 	LL_ASSERT(!err || err == -EINPROGRESS);
119 }
120 
prepare(void * param)121 static void prepare(void *param)
122 {
123 	struct lll_prepare_param *p;
124 	struct lll_sync *lll;
125 	int err;
126 
127 	/* Request to start HF Clock */
128 	err = lll_hfclock_on();
129 	LL_ASSERT(err >= 0);
130 
131 	p = param;
132 
133 	lll = p->param;
134 
135 	lll->lazy_prepare = p->lazy;
136 
137 	/* Accumulate window widening */
138 	lll->window_widening_prepare_us += lll->window_widening_periodic_us *
139 					   (lll->lazy_prepare + 1U);
140 	if (lll->window_widening_prepare_us > lll->window_widening_max_us) {
141 		lll->window_widening_prepare_us = lll->window_widening_max_us;
142 	}
143 }
144 
lll_sync_aux_prepare_cb(struct lll_sync * lll,struct lll_scan_aux * lll_aux)145 void lll_sync_aux_prepare_cb(struct lll_sync *lll,
146 			     struct lll_scan_aux *lll_aux)
147 {
148 	struct node_rx_pdu *node_rx;
149 
150 	/* Initialize Trx count */
151 	trx_cnt = 0U;
152 
153 	/* Start setting up Radio h/w */
154 	radio_reset();
155 
156 	radio_phy_set(lll_aux->phy, PHY_FLAGS_S8);
157 	radio_pkt_configure(RADIO_PKT_CONF_LENGTH_8BIT, LL_EXT_OCTETS_RX_MAX,
158 			    RADIO_PKT_CONF_PHY(lll_aux->phy));
159 
160 	node_rx = ull_pdu_rx_alloc_peek(1);
161 	LL_ASSERT(node_rx);
162 
163 	radio_pkt_rx_set(node_rx->pdu);
164 
165 	/* Set access address for sync */
166 	radio_aa_set(lll->access_addr);
167 	radio_crc_configure(PDU_CRC_POLYNOMIAL,
168 				sys_get_le24(lll->crc_init));
169 
170 	lll_chan_set(lll_aux->chan);
171 
172 	radio_isr_set(isr_rx_aux_chain, lll);
173 
174 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
175 	struct lll_df_sync_cfg *cfg;
176 
177 	cfg = lll_df_sync_cfg_latest_get(&lll->df_cfg, NULL);
178 
179 	if (cfg->is_enabled) {
180 		int err;
181 
182 		/* Prepare additional node for reporting insufficient memory for IQ samples
183 		 * reports.
184 		 */
185 		err = lll_df_iq_report_no_resources_prepare(lll);
186 		if (!err) {
187 			err = lll_df_conf_cte_rx_enable(cfg->slot_durations, cfg->ant_sw_len,
188 							cfg->ant_ids, lll_aux->chan,
189 							CTE_INFO_IN_PAYLOAD, lll_aux->phy);
190 			if (err) {
191 				lll->is_cte_incomplete = true;
192 			}
193 		} else {
194 			lll->is_cte_incomplete = true;
195 		}
196 		cfg->cte_count = 0;
197 	} else {
198 		/* If CTE reception is disabled, release additional node allocated to report
199 		 * insufficient memory for IQ samples.
200 		 */
201 		iq_report_incomplete_release_put(lll);
202 	}
203 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
204 	radio_switch_complete_and_disable();
205 }
206 
207 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
lll_sync_cte_is_allowed(uint8_t cte_type_mask,uint8_t filter_policy,uint8_t rx_cte_time,uint8_t rx_cte_type)208 enum sync_status lll_sync_cte_is_allowed(uint8_t cte_type_mask, uint8_t filter_policy,
209 					 uint8_t rx_cte_time, uint8_t rx_cte_type)
210 {
211 	bool cte_ok;
212 
213 	if (cte_type_mask == BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_FILTERING) {
214 		return SYNC_STAT_ALLOWED;
215 	}
216 
217 	if (rx_cte_time > 0) {
218 		if ((cte_type_mask & BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_CTE) != 0) {
219 			cte_ok = false;
220 		} else {
221 			switch (rx_cte_type) {
222 			case BT_HCI_LE_AOA_CTE:
223 				cte_ok = !(cte_type_mask &
224 					   BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_AOA);
225 				break;
226 			case BT_HCI_LE_AOD_CTE_1US:
227 				cte_ok = !(cte_type_mask &
228 					   BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_AOD_1US);
229 				break;
230 			case BT_HCI_LE_AOD_CTE_2US:
231 				cte_ok = !(cte_type_mask &
232 					   BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_AOD_2US);
233 				break;
234 			default:
235 				/* Unknown or forbidden CTE type */
236 				cte_ok = false;
237 			}
238 		}
239 	} else {
240 		/* If there is no CTEInfo in advertising PDU, Radio will not parse the S0 byte and
241 		 * CTESTATUS register will hold zeros only.
242 		 * Zero value in CTETime field of CTESTATUS may be used to distinguish between PDU
243 		 * that includes CTEInfo or not. Allowed range for CTETime is 2-20.
244 		 */
245 		if ((cte_type_mask & BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_ONLY_CTE) != 0) {
246 			cte_ok = false;
247 		} else {
248 			cte_ok = true;
249 		}
250 	}
251 
252 	if (!cte_ok) {
253 		return filter_policy ? SYNC_STAT_CONT_SCAN : SYNC_STAT_TERM;
254 	}
255 
256 	return SYNC_STAT_ALLOWED;
257 }
258 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
259 
init_reset(void)260 static int init_reset(void)
261 {
262 	return 0;
263 }
264 
create_prepare_cb(struct lll_prepare_param * p)265 static int create_prepare_cb(struct lll_prepare_param *p)
266 {
267 	uint16_t event_counter;
268 	struct lll_sync *lll;
269 	uint8_t chan_idx;
270 	int err;
271 
272 	DEBUG_RADIO_START_O(1);
273 
274 	lll = p->param;
275 
276 	/* Calculate the current event latency */
277 	lll->skip_event = lll->skip_prepare + lll->lazy_prepare;
278 
279 	/* Calculate the current event counter value */
280 	event_counter = lll->event_counter + lll->skip_event;
281 
282 	/* Reset accumulated latencies */
283 	lll->skip_prepare = 0U;
284 
285 	chan_idx = data_channel_calc(lll);
286 
287 	/* Update event counter to next value */
288 	lll->event_counter = (event_counter + 1U);
289 
290 	err = prepare_cb_common(p, chan_idx);
291 	if (err) {
292 		DEBUG_RADIO_START_O(1);
293 
294 		return 0;
295 	}
296 
297 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
298 	struct lll_df_sync_cfg *cfg;
299 
300 	cfg = lll_df_sync_cfg_latest_get(&lll->df_cfg, NULL);
301 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
302 
303 	if (false) {
304 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
305 	} else if (cfg->is_enabled) {
306 		/* In case of call in create_prepare_cb, new sync event starts hence discard
307 		 * previous incomplete state.
308 		 */
309 		lll->is_cte_incomplete = false;
310 
311 		/* Prepare additional node for reporting insufficient IQ report nodes issue */
312 		err = lll_df_iq_report_no_resources_prepare(lll);
313 		if (!err) {
314 			err = lll_df_conf_cte_rx_enable(cfg->slot_durations, cfg->ant_sw_len,
315 							cfg->ant_ids, chan_idx,
316 							CTE_INFO_IN_PAYLOAD, lll->phy);
317 			if (err) {
318 				lll->is_cte_incomplete = true;
319 			}
320 		} else {
321 			lll->is_cte_incomplete = true;
322 		}
323 
324 		cfg->cte_count = 0;
325 	} else {
326 		/* If CTE reception is disabled, release additional node allocated to report
327 		 * insufficient memory for IQ samples.
328 		 */
329 		iq_report_incomplete_release_put(lll);
330 #else
331 	} else {
332 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
333 		if (IS_ENABLED(CONFIG_BT_CTLR_DF)) {
334 			/* Disable CTE reception and sampling in Radio */
335 			radio_df_cte_inline_set_enabled(false);
336 		}
337 	}
338 
339 	radio_switch_complete_and_disable();
340 
341 	/* RSSI enable must be called after radio_switch_XXX function because it clears
342 	 * RADIO->SHORTS register, thus disables all other shortcuts.
343 	 */
344 	radio_rssi_measure();
345 
346 	radio_isr_set(isr_rx_adv_sync_estab, lll);
347 
348 	DEBUG_RADIO_START_O(1);
349 
350 	return 0;
351 }
352 
prepare_cb(struct lll_prepare_param * p)353 static int prepare_cb(struct lll_prepare_param *p)
354 {
355 	uint16_t event_counter;
356 	struct lll_sync *lll;
357 	uint8_t chan_idx;
358 	int err;
359 
360 	DEBUG_RADIO_START_O(1);
361 
362 	lll = p->param;
363 
364 	/* Calculate the current event latency */
365 	lll->skip_event = lll->skip_prepare + lll->lazy_prepare;
366 
367 	/* Calculate the current event counter value */
368 	event_counter = lll->event_counter + lll->skip_event;
369 
370 	/* Reset accumulated latencies */
371 	lll->skip_prepare = 0U;
372 
373 	chan_idx = data_channel_calc(lll);
374 
375 	/* Update event counter to next value */
376 	lll->event_counter = (event_counter + 1U);
377 
378 	err = prepare_cb_common(p, chan_idx);
379 	if (err) {
380 		DEBUG_RADIO_START_O(1);
381 
382 		return 0;
383 	}
384 
385 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
386 	struct lll_df_sync_cfg *cfg;
387 
388 	cfg = lll_df_sync_cfg_latest_get(&lll->df_cfg, NULL);
389 
390 	if (cfg->is_enabled) {
391 		/* In case of call in prepare, new sync event starts hence discard previous
392 		 * incomplete state.
393 		 */
394 		lll->is_cte_incomplete = false;
395 
396 		/* Prepare additional node for reporting insufficient IQ report nodes issue */
397 		err = lll_df_iq_report_no_resources_prepare(lll);
398 		if (!err) {
399 			err = lll_df_conf_cte_rx_enable(cfg->slot_durations, cfg->ant_sw_len,
400 							cfg->ant_ids, chan_idx,
401 							CTE_INFO_IN_PAYLOAD, lll->phy);
402 			if (err) {
403 				lll->is_cte_incomplete = true;
404 			}
405 		} else {
406 			lll->is_cte_incomplete = true;
407 		}
408 		cfg->cte_count = 0;
409 	} else {
410 		/* If CTE reception is disabled, release additional node allocated to report
411 		 * insufficient memory for IQ samples.
412 		 */
413 		iq_report_incomplete_release_put(lll);
414 	}
415 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
416 
417 	radio_switch_complete_and_disable();
418 
419 	/* RSSI enable must be called after radio_switch_XXX function because it clears
420 	 * RADIO->SHORTS register, thus disables all other shortcuts.
421 	 */
422 	radio_rssi_measure();
423 
424 	radio_isr_set(isr_rx_adv_sync, lll);
425 
426 	DEBUG_RADIO_START_O(1);
427 
428 	return 0;
429 }
430 
prepare_cb_common(struct lll_prepare_param * p,uint8_t chan_idx)431 static int prepare_cb_common(struct lll_prepare_param *p, uint8_t chan_idx)
432 {
433 	struct node_rx_pdu *node_rx;
434 	uint32_t ticks_at_event;
435 	uint32_t ticks_at_start;
436 	uint32_t remainder_us;
437 	struct lll_sync *lll;
438 	struct ull_hdr *ull;
439 	uint32_t remainder;
440 	uint32_t hcto;
441 	uint32_t ret;
442 
443 	lll = p->param;
444 
445 	/* Current window widening */
446 	lll->window_widening_event_us += lll->window_widening_prepare_us;
447 	lll->window_widening_prepare_us = 0;
448 	if (lll->window_widening_event_us > lll->window_widening_max_us) {
449 		lll->window_widening_event_us =	lll->window_widening_max_us;
450 	}
451 
452 	/* Reset chain PDU being scheduled by lll_sync context */
453 	lll->is_aux_sched = 0U;
454 
455 	/* Initialize Trx count */
456 	trx_cnt = 0U;
457 
458 	/* Start setting up Radio h/w */
459 	radio_reset();
460 
461 	radio_phy_set(lll->phy, PHY_FLAGS_S8);
462 	radio_pkt_configure(RADIO_PKT_CONF_LENGTH_8BIT, LL_EXT_OCTETS_RX_MAX,
463 			    RADIO_PKT_CONF_PHY(lll->phy));
464 	radio_aa_set(lll->access_addr);
465 	radio_crc_configure(PDU_CRC_POLYNOMIAL,
466 					sys_get_le24(lll->crc_init));
467 
468 	lll_chan_set(chan_idx);
469 
470 	node_rx = ull_pdu_rx_alloc_peek(1);
471 	LL_ASSERT(node_rx);
472 
473 	radio_pkt_rx_set(node_rx->pdu);
474 
475 	ticks_at_event = p->ticks_at_expire;
476 	ull = HDR_LLL2ULL(lll);
477 	ticks_at_event += lll_event_offset_get(ull);
478 
479 	ticks_at_start = ticks_at_event;
480 	ticks_at_start += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
481 
482 	remainder = p->remainder;
483 	remainder_us = radio_tmr_start(0, ticks_at_start, remainder);
484 
485 	radio_tmr_aa_capture();
486 
487 	hcto = remainder_us +
488 	       ((EVENT_JITTER_US + EVENT_TICKER_RES_MARGIN_US + lll->window_widening_event_us)
489 		<< 1) +
490 	       lll->window_size_event_us;
491 	hcto += radio_rx_ready_delay_get(lll->phy, PHY_FLAGS_S8);
492 	hcto += addr_us_get(lll->phy);
493 	hcto += radio_rx_chain_delay_get(lll->phy, PHY_FLAGS_S8);
494 	radio_tmr_hcto_configure(hcto);
495 
496 	radio_tmr_end_capture();
497 
498 #if defined(HAL_RADIO_GPIO_HAVE_LNA_PIN)
499 	radio_gpio_lna_setup();
500 
501 	radio_gpio_pa_lna_enable(remainder_us +
502 				 radio_rx_ready_delay_get(lll->phy,
503 							  PHY_FLAGS_S8) -
504 				 HAL_RADIO_GPIO_LNA_OFFSET);
505 #endif /* HAL_RADIO_GPIO_HAVE_LNA_PIN */
506 
507 #if defined(CONFIG_BT_CTLR_XTAL_ADVANCED) && \
508 	(EVENT_OVERHEAD_PREEMPT_US <= EVENT_OVERHEAD_PREEMPT_MIN_US)
509 	uint32_t overhead;
510 
511 	overhead = lll_preempt_calc(ull, (TICKER_ID_SCAN_SYNC_BASE + ull_sync_lll_handle_get(lll)),
512 				    ticks_at_event);
513 	/* check if preempt to start has changed */
514 	if (overhead) {
515 		LL_ASSERT_OVERHEAD(overhead);
516 
517 		radio_isr_set(isr_done, lll);
518 		radio_disable();
519 
520 		return -ECANCELED;
521 	}
522 #endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
523 
524 	ret = lll_prepare_done(lll);
525 	LL_ASSERT(!ret);
526 
527 	DEBUG_RADIO_START_O(1);
528 
529 	return 0;
530 }
531 
is_abort_cb(void * next,void * curr,lll_prepare_cb_t * resume_cb)532 static int is_abort_cb(void *next, void *curr, lll_prepare_cb_t *resume_cb)
533 {
534 	/* Sync context shall not resume when being preempted, i.e. they
535 	 * shall not use -EAGAIN as return value.
536 	 */
537 	ARG_UNUSED(resume_cb);
538 
539 	/* Different radio event overlap */
540 	if (next != curr) {
541 		struct lll_scan_aux *lll_aux;
542 		struct lll_scan *lll;
543 
544 		lll = ull_scan_lll_is_valid_get(next);
545 		if (lll) {
546 			/* Do not abort current periodic sync event as next
547 			 * event is a scan event.
548 			 */
549 			return 0;
550 		}
551 
552 		lll_aux = ull_scan_aux_lll_is_valid_get(next);
553 		if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_SKIP_ON_SCAN_AUX) &&
554 		    lll_aux) {
555 			/* Do not abort current periodic sync event as next
556 			 * event is a scan aux event.
557 			 */
558 			return 0;
559 		}
560 
561 #if defined(CONFIG_BT_CTLR_SCAN_AUX_SYNC_RESERVE_MIN)
562 		struct lll_sync *lll_sync_next;
563 		struct lll_sync *lll_sync_curr;
564 
565 		lll_sync_next = ull_sync_lll_is_valid_get(next);
566 		if (!lll_sync_next) {
567 			lll_sync_curr = curr;
568 
569 			/* Do not abort if near supervision timeout */
570 			if (lll_sync_curr->forced) {
571 				return 0;
572 			}
573 
574 			/* Abort current event as next event is not a
575 			 * scan and not a scan aux event.
576 			 */
577 			return -ECANCELED;
578 		}
579 
580 		lll_sync_curr = curr;
581 		if (lll_sync_curr->abort_count < lll_sync_next->abort_count) {
582 			if (lll_sync_curr->abort_count < UINT8_MAX) {
583 				lll_sync_curr->abort_count++;
584 			}
585 
586 			/* Abort current event as next event has higher abort
587 			 * count.
588 			 */
589 			return -ECANCELED;
590 		}
591 
592 		if (lll_sync_next->abort_count < UINT8_MAX) {
593 			lll_sync_next->abort_count++;
594 		}
595 
596 #else /* !CONFIG_BT_CTLR_SCAN_AUX_SYNC_RESERVE_MIN */
597 		/* Abort current event as next event is not a
598 		 * scan and not a scan aux event.
599 		 */
600 		return -ECANCELED;
601 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_SYNC_RESERVE_MIN */
602 	}
603 
604 	/* Do not abort if current periodic sync event overlaps next interval
605 	 * or next event is a scan event.
606 	 */
607 	return 0;
608 }
609 
abort_cb(struct lll_prepare_param * prepare_param,void * param)610 static void abort_cb(struct lll_prepare_param *prepare_param, void *param)
611 {
612 	struct event_done_extra *e;
613 	struct lll_sync *lll;
614 	int err;
615 
616 	/* NOTE: This is not a prepare being cancelled */
617 	if (!prepare_param) {
618 		/* Perform event abort here.
619 		 * After event has been cleanly aborted, clean up resources
620 		 * and dispatch event done.
621 		 */
622 		radio_isr_set(isr_done, param);
623 		radio_disable();
624 
625 		return;
626 	}
627 
628 	/* NOTE: Else clean the top half preparations of the aborted event
629 	 * currently in preparation pipeline.
630 	 */
631 	err = lll_hfclock_off();
632 	LL_ASSERT(err >= 0);
633 
634 	/* Accumulate the latency as event is aborted while being in pipeline */
635 	lll = prepare_param->param;
636 	lll->skip_prepare += (lll->lazy_prepare + 1U);
637 
638 	/* Extra done event, to check sync lost */
639 	e = ull_event_done_extra_get();
640 	LL_ASSERT(e);
641 
642 	e->type = EVENT_DONE_EXTRA_TYPE_SYNC;
643 	e->trx_cnt = 0U;
644 	e->crc_valid = 0U;
645 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
646 	defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
647 	e->sync_term = 0U;
648 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING &&
649 	* CONFIG_BT_CTLR_CTEINLINE_SUPPORT
650 	*/
651 
652 	lll_done(param);
653 }
654 
isr_aux_setup(void * param)655 static void isr_aux_setup(void *param)
656 {
657 	struct pdu_adv_aux_ptr *aux_ptr;
658 	struct node_rx_pdu *node_rx;
659 	uint32_t window_widening_us;
660 	uint32_t window_size_us;
661 	struct node_rx_ftr *ftr;
662 	uint32_t aux_offset_us;
663 	uint32_t aux_start_us;
664 	struct lll_sync *lll;
665 	uint32_t start_us;
666 	uint8_t phy_aux;
667 	uint32_t hcto;
668 
669 	lll_isr_status_reset();
670 
671 	node_rx = param;
672 	ftr = &node_rx->rx_ftr;
673 	aux_ptr = ftr->aux_ptr;
674 	phy_aux = BIT(PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
675 	ftr->aux_phy = phy_aux;
676 
677 	lll = ftr->param;
678 
679 	/* Determine the window size */
680 	if (aux_ptr->offs_units) {
681 		window_size_us = OFFS_UNIT_300_US;
682 	} else {
683 		window_size_us = OFFS_UNIT_30_US;
684 	}
685 
686 	/* Calculate the aux offset from start of the scan window */
687 	aux_offset_us = (uint32_t) PDU_ADV_AUX_PTR_OFFSET_GET(aux_ptr) * window_size_us;
688 
689 	/* Calculate the window widening that needs to be deducted */
690 	if (aux_ptr->ca) {
691 		window_widening_us = SCA_DRIFT_50_PPM_US(aux_offset_us);
692 	} else {
693 		window_widening_us = SCA_DRIFT_500_PPM_US(aux_offset_us);
694 	}
695 
696 	/* Setup radio for auxiliary PDU scan */
697 	radio_phy_set(phy_aux, PHY_FLAGS_S8);
698 	radio_pkt_configure(RADIO_PKT_CONF_LENGTH_8BIT, LL_EXT_OCTETS_RX_MAX,
699 			    RADIO_PKT_CONF_PHY(phy_aux));
700 
701 	lll_chan_set(aux_ptr->chan_idx);
702 
703 	radio_pkt_rx_set(node_rx->pdu);
704 
705 	radio_isr_set(isr_rx_aux_chain, lll);
706 
707 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
708 	struct lll_df_sync_cfg *cfg;
709 
710 	cfg = lll_df_sync_cfg_latest_get(&lll->df_cfg, NULL);
711 
712 	if (cfg->is_enabled && is_max_cte_reached(cfg->max_cte_count, cfg->cte_count)) {
713 		int err;
714 
715 		/* Prepare additional node for reporting insufficient memory for IQ samples
716 		 * reports.
717 		 */
718 		err = lll_df_iq_report_no_resources_prepare(lll);
719 		if (!err) {
720 			err = lll_df_conf_cte_rx_enable(cfg->slot_durations,
721 							cfg->ant_sw_len,
722 							cfg->ant_ids,
723 							aux_ptr->chan_idx,
724 							CTE_INFO_IN_PAYLOAD,
725 							PDU_ADV_AUX_PTR_PHY_GET(aux_ptr));
726 			if (err) {
727 				lll->is_cte_incomplete = true;
728 			}
729 		} else {
730 			lll->is_cte_incomplete = true;
731 		}
732 	} else if (!cfg->is_enabled) {
733 		/* If CTE reception is disabled, release additional node allocated to report
734 		 * insufficient memory for IQ samples.
735 		 */
736 		iq_report_incomplete_release_put(lll);
737 	}
738 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
739 	radio_switch_complete_and_disable();
740 
741 	/* Setup radio rx on micro second offset. Note that radio_end_us stores
742 	 * PDU start time in this case.
743 	 */
744 	aux_start_us = ftr->radio_end_us + aux_offset_us;
745 	aux_start_us -= lll_radio_rx_ready_delay_get(phy_aux, PHY_FLAGS_S8);
746 	aux_start_us -= window_widening_us;
747 	aux_start_us -= EVENT_JITTER_US;
748 
749 	start_us = radio_tmr_start_us(0, aux_start_us);
750 	LL_ASSERT(start_us == (aux_start_us + 1U));
751 
752 	/* Setup header complete timeout */
753 	hcto = start_us;
754 	hcto += EVENT_JITTER_US;
755 	hcto += window_widening_us;
756 	hcto += lll_radio_rx_ready_delay_get(phy_aux, PHY_FLAGS_S8);
757 	hcto += window_size_us;
758 	hcto += radio_rx_chain_delay_get(phy_aux, PHY_FLAGS_S8);
759 	hcto += addr_us_get(phy_aux);
760 	radio_tmr_hcto_configure(hcto);
761 
762 	/* capture end of Rx-ed PDU, extended scan to schedule auxiliary
763 	 * channel chaining, create connection or to create periodic sync.
764 	 */
765 	radio_tmr_end_capture();
766 
767 	/* scanner always measures RSSI */
768 	radio_rssi_measure();
769 
770 #if defined(HAL_RADIO_GPIO_HAVE_LNA_PIN)
771 	radio_gpio_lna_setup();
772 
773 	radio_gpio_pa_lna_enable(start_us +
774 				 radio_rx_ready_delay_get(phy_aux,
775 							  PHY_FLAGS_S8) -
776 				 HAL_RADIO_GPIO_LNA_OFFSET);
777 #endif /* HAL_RADIO_GPIO_HAVE_LNA_PIN */
778 }
779 
780 /**
781  * @brief Common part of ISR responsible for handling PDU receive.
782  *
783  * @param lll          Pointer to LLL sync object.
784  * @param node_type    Type of a receive node to be set for handling by ULL.
785  * @param crc_ok       Informs if received PDU has correct CRC.
786  * @param phy_flags_rx Received Coded PHY coding scheme (0 - S1, 1 - S8).
787  * @param cte_ready    Informs if received PDU has CTEInfo present and IQ samples were collected.
788  * @param rssi_ready   Informs if RSSI for received PDU is ready.
789  * @param status       Informs about periodic advertisement synchronization status.
790  *
791  * @return Zero in case of there is no chained PDU or there is a chained PDUs but spaced long enough
792  *         to schedule its reception by ULL.
793  * @return -EBUSY in case there is a chained PDU scheduled by LLL due to short spacing.
794  */
isr_rx(struct lll_sync * lll,uint8_t node_type,uint8_t crc_ok,uint8_t phy_flags_rx,uint8_t cte_ready,uint8_t rssi_ready,enum sync_status status)795 static int isr_rx(struct lll_sync *lll, uint8_t node_type, uint8_t crc_ok,
796 		  uint8_t phy_flags_rx, uint8_t cte_ready, uint8_t rssi_ready,
797 		  enum sync_status status)
798 {
799 	bool sched = false;
800 	int err;
801 
802 	/* Check CRC and generate Periodic Advertising Report */
803 	if (crc_ok) {
804 		struct node_rx_pdu *node_rx;
805 
806 		/* Verify if there are free RX buffers for:
807 		 * - reporting just received PDU
808 		 * - allocating an extra node_rx for periodic report incomplete
809 		 * - a buffer for receiving data in a connection
810 		 * - a buffer for receiving empty PDU
811 		 *
812 		 * If this is a reception of chained PDU, node_type is
813 		 * NODE_RX_TYPE_EXT_AUX_REPORT, then there is no need to reserve
814 		 * again a node_rx for periodic report incomplete.
815 		 */
816 		if (node_type != NODE_RX_TYPE_EXT_AUX_REPORT) {
817 			/* Reset Sync context association with any Aux context
818 			 * as a new chain is being setup for reception here.
819 			 */
820 			lll->lll_aux = NULL;
821 
822 			node_rx = ull_pdu_rx_alloc_peek(4);
823 		} else {
824 			node_rx = ull_pdu_rx_alloc_peek(3);
825 		}
826 
827 		if (node_rx) {
828 			struct node_rx_ftr *ftr;
829 			struct pdu_adv *pdu;
830 
831 			ull_pdu_rx_alloc();
832 
833 			node_rx->hdr.type = node_type;
834 
835 			ftr = &(node_rx->rx_ftr);
836 			ftr->param = lll;
837 			ftr->aux_failed = 0U;
838 			ftr->rssi = (rssi_ready) ? radio_rssi_get() :
839 						   BT_HCI_LE_RSSI_NOT_AVAILABLE;
840 			ftr->ticks_anchor = radio_tmr_start_get();
841 			ftr->radio_end_us = radio_tmr_end_get() -
842 					    radio_rx_chain_delay_get(lll->phy,
843 								     phy_flags_rx);
844 			ftr->phy_flags = phy_flags_rx;
845 			ftr->sync_status = status;
846 			ftr->sync_rx_enabled = lll->is_rx_enabled;
847 
848 			if (node_type != NODE_RX_TYPE_EXT_AUX_REPORT) {
849 				ftr->extra = ull_pdu_rx_alloc();
850 			}
851 
852 			pdu = (void *)node_rx->pdu;
853 
854 			ftr->aux_lll_sched = lll_scan_aux_setup(pdu, lll->phy,
855 								phy_flags_rx,
856 								isr_aux_setup,
857 								lll);
858 			if (ftr->aux_lll_sched) {
859 				if (node_type != NODE_RX_TYPE_EXT_AUX_REPORT) {
860 					lll->is_aux_sched = 1U;
861 				}
862 
863 				err = -EBUSY;
864 			} else {
865 				err = 0;
866 			}
867 
868 			ull_rx_put(node_rx->hdr.link, node_rx);
869 
870 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
871 			if (cte_ready) {
872 				/* If there is a periodic advertising report generate  IQ data
873 				 * report with valid packet_status if there were free nodes for
874 				 * that. Or report insufficient resources for IQ data report.
875 				 *
876 				 * Returned value is not checked because it does not matter if there
877 				 * is a IQ report to be send towards ULL. There is always periodic
878 				 * sync report to be send.
879 				 */
880 				(void)iq_report_create_put(lll, rssi_ready, BT_HCI_LE_CTE_CRC_OK);
881 			}
882 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
883 
884 			sched = true;
885 		} else {
886 			if (node_type == NODE_RX_TYPE_EXT_AUX_REPORT) {
887 				err = -ENOMEM;
888 			} else {
889 				err = 0;
890 			}
891 		}
892 	} else {
893 #if defined(CONFIG_BT_CTLR_DF_SAMPLE_CTE_FOR_PDU_WITH_BAD_CRC)
894 		/* In case of reception of chained PDUs IQ samples report for a PDU with wrong
895 		 * CRC is handled by caller. It has to be that way to be sure the IQ report
896 		 * follows possible periodic advertising report.
897 		 */
898 		if (cte_ready && node_type != NODE_RX_TYPE_EXT_AUX_REPORT) {
899 			err = iq_report_create_put(lll, rssi_ready,
900 						   BT_HCI_LE_CTE_CRC_ERR_CTE_BASED_TIME);
901 			if (!err) {
902 				sched = true;
903 			}
904 		}
905 #endif /* CONFIG_BT_CTLR_DF_SAMPLE_CTE_FOR_PDU_WITH_BAD_CRC */
906 
907 		err = 0;
908 	}
909 
910 	if (sched) {
911 		ull_rx_sched();
912 	}
913 
914 	return err;
915 }
916 
isr_rx_adv_sync_estab(void * param)917 static void isr_rx_adv_sync_estab(void *param)
918 {
919 	enum sync_status sync_ok;
920 	struct lll_sync *lll;
921 	uint8_t phy_flags_rx;
922 	uint8_t rssi_ready;
923 	uint8_t cte_ready;
924 	uint8_t trx_done;
925 	uint8_t crc_ok;
926 	int err;
927 
928 	lll = param;
929 
930 	/* Read radio status and events */
931 	trx_done = radio_is_done();
932 	if (trx_done) {
933 		crc_ok = radio_crc_is_valid();
934 		rssi_ready = radio_rssi_is_ready();
935 		phy_flags_rx = radio_phy_flags_rx_get();
936 		sync_ok = sync_filtrate_by_cte_type(lll->cte_type, lll->filter_policy);
937 		trx_cnt = 1U;
938 
939 		if (IS_ENABLED(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)) {
940 			cte_ready = radio_df_cte_ready();
941 		} else {
942 			cte_ready = 0U;
943 		}
944 	} else {
945 		crc_ok = phy_flags_rx = rssi_ready = cte_ready = 0U;
946 		/* Initiated as allowed, crc_ok takes precended during handling of PDU
947 		 * reception in the situation.
948 		 */
949 		sync_ok = SYNC_STAT_ALLOWED;
950 	}
951 
952 	/* Clear radio rx status and events */
953 	lll_isr_rx_status_reset();
954 
955 	/* No Rx */
956 	if (!trx_done) {
957 		/* TODO: Combine the early exit with above if-then-else block
958 		 */
959 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
960 		LL_ASSERT(!lll->node_cte_incomplete);
961 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
962 
963 		goto isr_rx_done;
964 	}
965 
966 	/* Save radio ready and address capture timestamp for later use for
967 	 * drift compensation.
968 	 */
969 	radio_tmr_aa_save(radio_tmr_aa_get());
970 	radio_tmr_ready_save(radio_tmr_ready_get());
971 
972 	/* Handle regular PDU reception if CTE type is acceptable */
973 	if (sync_ok == SYNC_STAT_ALLOWED) {
974 		err = isr_rx(lll, NODE_RX_TYPE_SYNC, crc_ok, phy_flags_rx,
975 			     cte_ready, rssi_ready, SYNC_STAT_ALLOWED);
976 		if (err == -EBUSY) {
977 			return;
978 		}
979 	} else if (sync_ok == SYNC_STAT_TERM) {
980 		struct node_rx_pdu *node_rx;
981 
982 		/* Verify if there are free RX buffers for:
983 		 * - reporting just received PDU
984 		 * - a buffer for receiving data in a connection
985 		 * - a buffer for receiving empty PDU
986 		 */
987 		node_rx = ull_pdu_rx_alloc_peek(3);
988 		if (node_rx) {
989 			struct node_rx_ftr *ftr;
990 
991 			ull_pdu_rx_alloc();
992 
993 			node_rx->hdr.type = NODE_RX_TYPE_SYNC;
994 
995 			ftr = &node_rx->rx_ftr;
996 			ftr->param = lll;
997 			ftr->sync_status = SYNC_STAT_TERM;
998 
999 			ull_rx_put_sched(node_rx->hdr.link, node_rx);
1000 		}
1001 	}
1002 
1003 isr_rx_done:
1004 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
1005 	defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1006 	isr_rx_done_cleanup(lll, crc_ok, sync_ok != SYNC_STAT_ALLOWED);
1007 #else
1008 	isr_rx_done_cleanup(lll, crc_ok, false);
1009 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1010 }
1011 
isr_rx_adv_sync(void * param)1012 static void isr_rx_adv_sync(void *param)
1013 {
1014 	struct lll_sync *lll;
1015 	uint8_t phy_flags_rx;
1016 	uint8_t rssi_ready;
1017 	uint8_t cte_ready;
1018 	uint8_t trx_done;
1019 	uint8_t crc_ok;
1020 	int err;
1021 
1022 	lll = param;
1023 
1024 	/* Read radio status and events */
1025 	trx_done = radio_is_done();
1026 	if (trx_done) {
1027 		crc_ok = radio_crc_is_valid();
1028 		rssi_ready = radio_rssi_is_ready();
1029 		phy_flags_rx = radio_phy_flags_rx_get();
1030 		trx_cnt = 1U;
1031 
1032 		if (IS_ENABLED(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)) {
1033 			cte_ready = radio_df_cte_ready();
1034 		} else {
1035 			cte_ready = 0U;
1036 		}
1037 	} else {
1038 		crc_ok = phy_flags_rx = rssi_ready = cte_ready = 0U;
1039 	}
1040 
1041 	/* Clear radio rx status and events */
1042 	lll_isr_rx_status_reset();
1043 
1044 	/* No Rx */
1045 	if (!trx_done) {
1046 		/* TODO: Combine the early exit with above if-then-else block
1047 		 */
1048 		goto isr_rx_done;
1049 	}
1050 
1051 	/* Save radio ready and address capture timestamp for later use for
1052 	 * drift compensation.
1053 	 */
1054 	radio_tmr_aa_save(radio_tmr_aa_get());
1055 	radio_tmr_ready_save(radio_tmr_ready_get());
1056 
1057 	/* When periodic advertisement is synchronized, the CTEType may change. It should not
1058 	 * affect synchronization even when new CTE type is not allowed by sync parameters.
1059 	 * Hence the SYNC_STAT_READY is set.
1060 	 */
1061 	err = isr_rx(lll, NODE_RX_TYPE_SYNC_REPORT, crc_ok, phy_flags_rx, cte_ready, rssi_ready,
1062 		     SYNC_STAT_READY);
1063 	if (err == -EBUSY) {
1064 		return;
1065 	}
1066 
1067 isr_rx_done:
1068 	isr_rx_done_cleanup(lll, crc_ok, false);
1069 }
1070 
isr_rx_aux_chain(void * param)1071 static void isr_rx_aux_chain(void *param)
1072 {
1073 	struct lll_scan_aux *lll_aux;
1074 	struct lll_sync *lll;
1075 	uint8_t phy_flags_rx;
1076 	uint8_t rssi_ready;
1077 	uint8_t cte_ready;
1078 	uint8_t trx_done;
1079 	uint8_t crc_ok;
1080 	int err;
1081 
1082 	lll = param;
1083 	lll_aux = lll->lll_aux;
1084 	if (!lll_aux) {
1085 		/* auxiliary context not assigned (yet) in ULL execution
1086 		 * context, drop current reception and abort further chain PDU
1087 		 * receptions, if any.
1088 		 */
1089 		lll_isr_status_reset();
1090 
1091 		rssi_ready = 0U;
1092 		cte_ready = 0U;
1093 		crc_ok =  0U;
1094 		err = 0;
1095 
1096 		goto isr_rx_aux_chain_done;
1097 	}
1098 
1099 	/* Read radio status and events */
1100 	trx_done = radio_is_done();
1101 	if (trx_done) {
1102 		crc_ok = radio_crc_is_valid();
1103 		phy_flags_rx = radio_phy_flags_rx_get();
1104 		rssi_ready = radio_rssi_is_ready();
1105 
1106 		if (IS_ENABLED(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)) {
1107 			cte_ready = radio_df_cte_ready();
1108 		} else {
1109 			cte_ready = 0U;
1110 		}
1111 	} else {
1112 		crc_ok = phy_flags_rx = rssi_ready = cte_ready = 0U;
1113 	}
1114 
1115 	/* Clear radio rx status and events */
1116 	lll_isr_rx_status_reset();
1117 
1118 	/* No Rx */
1119 	if (!trx_done) {
1120 		/* TODO: Combine the early exit with above if-then-else block
1121 		 */
1122 
1123 		err = 0;
1124 
1125 		goto isr_rx_aux_chain_done;
1126 	}
1127 
1128 	/* When periodic advertisement is synchronized, the CTEType may change. It should not
1129 	 * affect synchronization even when new CTE type is not allowed by sync parameters.
1130 	 * Hence the SYNC_STAT_READY is set.
1131 	 */
1132 	err = isr_rx(lll, NODE_RX_TYPE_EXT_AUX_REPORT, crc_ok, phy_flags_rx, cte_ready, rssi_ready,
1133 		     SYNC_STAT_READY);
1134 	if (err == -EBUSY) {
1135 		return;
1136 	}
1137 
1138 isr_rx_aux_chain_done:
1139 	if (!crc_ok || err) {
1140 		struct node_rx_pdu *node_rx;
1141 
1142 		/* Generate message to release aux context and flag the report
1143 		 * generated thereafter by HCI as incomplete.
1144 		 */
1145 		node_rx = ull_pdu_rx_alloc();
1146 		LL_ASSERT(node_rx);
1147 
1148 		node_rx->hdr.type = NODE_RX_TYPE_EXT_AUX_RELEASE;
1149 
1150 		node_rx->rx_ftr.param = lll;
1151 		node_rx->rx_ftr.aux_failed = 1U;
1152 
1153 		ull_rx_put(node_rx->hdr.link, node_rx);
1154 
1155 		if (!crc_ok) {
1156 #if defined(CONFIG_BT_CTLR_DF_SAMPLE_CTE_FOR_PDU_WITH_BAD_CRC)
1157 			if (cte_ready) {
1158 				(void)iq_report_create_put(lll, rssi_ready,
1159 							   BT_HCI_LE_CTE_CRC_ERR_CTE_BASED_TIME);
1160 			}
1161 #endif /* CONFIG_BT_CTLR_DF_SAMPLE_CTE_FOR_PDU_WITH_BAD_CRC */
1162 		} else {
1163 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1164 			/* Report insufficient resources for IQ data report and release additional
1165 			 * noder_rx_iq_data stored in lll_sync object, to avoid buffers leakage.
1166 			 */
1167 			iq_report_incomplete_create_put(lll);
1168 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1169 		}
1170 
1171 		ull_rx_sched();
1172 	}
1173 
1174 	if (lll->is_aux_sched) {
1175 		lll->is_aux_sched = 0U;
1176 
1177 		isr_rx_done_cleanup(lll, 1U, false);
1178 	} else {
1179 		lll_isr_cleanup(lll_aux);
1180 	}
1181 }
1182 
isr_rx_done_cleanup(struct lll_sync * lll,uint8_t crc_ok,bool sync_term)1183 static void isr_rx_done_cleanup(struct lll_sync *lll, uint8_t crc_ok, bool sync_term)
1184 {
1185 	struct event_done_extra *e;
1186 
1187 	/* Calculate and place the drift information in done event */
1188 	e = ull_event_done_extra_get();
1189 	LL_ASSERT(e);
1190 
1191 	e->type = EVENT_DONE_EXTRA_TYPE_SYNC;
1192 	e->trx_cnt = trx_cnt;
1193 	e->crc_valid = crc_ok;
1194 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
1195 	defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1196 	e->sync_term = sync_term;
1197 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1198 	if (trx_cnt) {
1199 		e->drift.preamble_to_addr_us = addr_us_get(lll->phy);
1200 		e->drift.start_to_address_actual_us =
1201 			radio_tmr_aa_restore() - radio_tmr_ready_restore();
1202 		e->drift.window_widening_event_us = lll->window_widening_event_us;
1203 
1204 		/* Reset window widening, as anchor point sync-ed */
1205 		lll->window_widening_event_us = 0U;
1206 		lll->window_size_event_us = 0U;
1207 
1208 #if defined(CONFIG_BT_CTLR_SCAN_AUX_SYNC_RESERVE_MIN)
1209 		/* Reset LLL abort count as LLL event is gracefully done and
1210 		 * was not aborted by any other event when current event could
1211 		 * have been using unreserved time space.
1212 		 */
1213 		lll->abort_count = 0U;
1214 #endif /* CONFIG_BT_CTLR_SCAN_AUX_SYNC_RESERVE_MIN */
1215 	}
1216 
1217 	lll_isr_cleanup(lll);
1218 }
1219 
isr_done(void * param)1220 static void isr_done(void *param)
1221 {
1222 	struct lll_sync *lll;
1223 
1224 	lll_isr_status_reset();
1225 
1226 	/* Generate incomplete data status and release aux context when
1227 	 * sync event is using LLL scheduling.
1228 	 */
1229 	lll = param;
1230 
1231 	/* LLL scheduling used for chain PDU reception is aborted/preempted */
1232 	if (lll->is_aux_sched) {
1233 		struct node_rx_pdu *node_rx;
1234 
1235 		lll->is_aux_sched = 0U;
1236 
1237 		/* Generate message to release aux context and flag the report
1238 		 * generated thereafter by HCI as incomplete.
1239 		 */
1240 		node_rx = ull_pdu_rx_alloc();
1241 		LL_ASSERT(node_rx);
1242 
1243 		node_rx->hdr.type = NODE_RX_TYPE_EXT_AUX_RELEASE;
1244 
1245 		node_rx->rx_ftr.param = lll;
1246 		node_rx->rx_ftr.aux_failed = 1U;
1247 
1248 		ull_rx_put_sched(node_rx->hdr.link, node_rx);
1249 	}
1250 
1251 	isr_rx_done_cleanup(param, ((trx_cnt) ? 1U : 0U), false);
1252 }
1253 
1254 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
iq_report_create(struct lll_sync * lll,uint8_t rssi_ready,uint8_t packet_status,uint8_t slot_durations,struct node_rx_iq_report * iq_report)1255 static void iq_report_create(struct lll_sync *lll, uint8_t rssi_ready, uint8_t packet_status,
1256 			     uint8_t slot_durations, struct node_rx_iq_report *iq_report)
1257 {
1258 	struct node_rx_ftr *ftr;
1259 	uint8_t cte_info;
1260 	uint8_t ant;
1261 
1262 	cte_info = radio_df_cte_status_get();
1263 	ant = radio_df_pdu_antenna_switch_pattern_get();
1264 
1265 	iq_report->rx.hdr.type = NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT;
1266 	iq_report->sample_count = radio_df_iq_samples_amount_get();
1267 	iq_report->packet_status = packet_status;
1268 	iq_report->rssi_ant_id = ant;
1269 	iq_report->cte_info = *(struct pdu_cte_info *)&cte_info;
1270 	iq_report->local_slot_durations = slot_durations;
1271 	/* Event counter is updated to next value during event preparation, hence
1272 	 * it has to be subtracted to store actual event counter value.
1273 	 */
1274 	iq_report->event_counter = lll->event_counter - 1;
1275 
1276 	ftr = &iq_report->rx.rx_ftr;
1277 	ftr->param = lll;
1278 	ftr->rssi =
1279 		((rssi_ready) ? radio_rssi_get() : BT_HCI_LE_RSSI_NOT_AVAILABLE);
1280 }
1281 
iq_report_incomplete_create(struct lll_sync * lll,struct node_rx_iq_report * iq_report)1282 static void iq_report_incomplete_create(struct lll_sync *lll, struct node_rx_iq_report *iq_report)
1283 {
1284 	struct node_rx_ftr *ftr;
1285 
1286 	iq_report->rx.hdr.type = NODE_RX_TYPE_SYNC_IQ_SAMPLE_REPORT;
1287 	iq_report->sample_count = 0;
1288 	iq_report->packet_status = BT_HCI_LE_CTE_INSUFFICIENT_RESOURCES;
1289 	/* Event counter is updated to next value during event preparation,
1290 	 * hence it has to be subtracted to store actual event counter
1291 	 * value.
1292 	 */
1293 	iq_report->event_counter = lll->event_counter - 1;
1294 	/* The PDU antenna is set in configuration, hence it is always
1295 	 * available. BT 5.3 Core Spec. does not say if this field
1296 	 * may be invalid in case of insufficient resources.
1297 	 */
1298 	iq_report->rssi_ant_id = radio_df_pdu_antenna_switch_pattern_get();
1299 	/* According to BT 5.3, Vol 4, Part E, section 7.7.65.21 below
1300 	 * fields have invalid values in case of insufficient resources.
1301 	 */
1302 	iq_report->cte_info =
1303 		(struct pdu_cte_info){.time = 0, .rfu = 0, .type = 0};
1304 	iq_report->local_slot_durations = 0;
1305 
1306 	ftr = &iq_report->rx.rx_ftr;
1307 	ftr->param = lll;
1308 
1309 	ftr->rssi = BT_HCI_LE_RSSI_NOT_AVAILABLE;
1310 	ftr->extra = NULL;
1311 }
1312 
iq_report_create_put(struct lll_sync * lll,uint8_t rssi_ready,uint8_t packet_status)1313 static int iq_report_create_put(struct lll_sync *lll, uint8_t rssi_ready, uint8_t packet_status)
1314 {
1315 	struct node_rx_iq_report *iq_report;
1316 	struct lll_df_sync_cfg *cfg;
1317 	int err;
1318 
1319 	cfg = lll_df_sync_cfg_curr_get(&lll->df_cfg);
1320 
1321 	if (cfg->is_enabled) {
1322 		if (!lll->is_cte_incomplete &&
1323 		    is_max_cte_reached(cfg->max_cte_count, cfg->cte_count)) {
1324 			iq_report = ull_df_iq_report_alloc();
1325 			LL_ASSERT(iq_report);
1326 
1327 			iq_report_create(lll, rssi_ready, packet_status,
1328 					 cfg->slot_durations, iq_report);
1329 			err = 0;
1330 		} else if (lll->is_cte_incomplete && is_max_cte_reached(cfg->max_cte_count,
1331 									 cfg->cte_count)) {
1332 			iq_report = lll->node_cte_incomplete;
1333 
1334 			/* Reception of chained PDUs may be still in progress. Do not report
1335 			 * insufficient resources multiple times.
1336 			 */
1337 			if (iq_report) {
1338 				iq_report_incomplete_create(lll, iq_report);
1339 				lll->node_cte_incomplete = NULL;
1340 
1341 				/* Report ready to be send to ULL */
1342 				err = 0;
1343 			} else {
1344 				/* Incomplete CTE was already reported */
1345 				err = -ENODATA;
1346 			}
1347 		} else {
1348 			err = -ENODATA;
1349 		}
1350 	} else {
1351 		err = -ENODATA;
1352 	}
1353 
1354 	if (!err) {
1355 		ull_rx_put(iq_report->rx.hdr.link, iq_report);
1356 
1357 		cfg->cte_count += 1U;
1358 	}
1359 
1360 	return err;
1361 }
1362 
iq_report_incomplete_create_put(struct lll_sync * lll)1363 static int iq_report_incomplete_create_put(struct lll_sync *lll)
1364 {
1365 	struct lll_df_sync_cfg *cfg;
1366 
1367 	cfg = lll_df_sync_cfg_curr_get(&lll->df_cfg);
1368 
1369 	if (cfg->is_enabled) {
1370 		struct node_rx_iq_report *iq_report;
1371 
1372 		iq_report = lll->node_cte_incomplete;
1373 
1374 		/* Reception of chained PDUs may be still in progress. Do not report
1375 		 * insufficient resources multiple times.
1376 		 */
1377 		if (iq_report) {
1378 			iq_report_incomplete_create(lll, iq_report);
1379 
1380 			lll->node_cte_incomplete = NULL;
1381 			ull_rx_put(iq_report->rx.hdr.link, iq_report);
1382 
1383 			return 0;
1384 		} else {
1385 			/* Incomplete CTE was already reported */
1386 			return -ENODATA;
1387 		}
1388 
1389 	}
1390 
1391 	return -ENODATA;
1392 }
1393 
iq_report_incomplete_release_put(struct lll_sync * lll)1394 static void iq_report_incomplete_release_put(struct lll_sync *lll)
1395 {
1396 	if (lll->node_cte_incomplete) {
1397 		struct node_rx_iq_report *iq_report = lll->node_cte_incomplete;
1398 
1399 		iq_report->rx.hdr.type = NODE_RX_TYPE_IQ_SAMPLE_REPORT_LLL_RELEASE;
1400 
1401 		ull_rx_put(iq_report->rx.hdr.link, iq_report);
1402 		lll->node_cte_incomplete = NULL;
1403 	}
1404 }
is_max_cte_reached(uint8_t max_cte_count,uint8_t cte_count)1405 static bool is_max_cte_reached(uint8_t max_cte_count, uint8_t cte_count)
1406 {
1407 	return max_cte_count == BT_HCI_LE_SAMPLE_CTE_ALL || cte_count < max_cte_count;
1408 }
1409 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1410 
data_channel_calc(struct lll_sync * lll)1411 static uint8_t data_channel_calc(struct lll_sync *lll)
1412 {
1413 	uint8_t data_chan_count;
1414 	uint8_t *data_chan_map;
1415 
1416 	/* Process channel map update, if any */
1417 	if (lll->chm_first != lll->chm_last) {
1418 		uint16_t instant_latency;
1419 
1420 		instant_latency = (lll->event_counter + lll->skip_event - lll->chm_instant) &
1421 				  EVENT_INSTANT_MAX;
1422 		if (instant_latency <= EVENT_INSTANT_LATENCY_MAX) {
1423 			/* At or past the instant, use channelMapNew */
1424 			lll->chm_first = lll->chm_last;
1425 		}
1426 	}
1427 
1428 	/* Calculate the radio channel to use */
1429 	data_chan_map = lll->chm[lll->chm_first].data_chan_map;
1430 	data_chan_count = lll->chm[lll->chm_first].data_chan_count;
1431 	return lll_chan_sel_2(lll->event_counter + lll->skip_event, lll->data_chan_id,
1432 			      data_chan_map, data_chan_count);
1433 }
1434 
sync_filtrate_by_cte_type(uint8_t cte_type_mask,uint8_t filter_policy)1435 static enum sync_status sync_filtrate_by_cte_type(uint8_t cte_type_mask, uint8_t filter_policy)
1436 {
1437 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
1438 	defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1439 	uint8_t rx_cte_time;
1440 	uint8_t rx_cte_type;
1441 
1442 	rx_cte_time = nrf_radio_cte_time_get(NRF_RADIO);
1443 	rx_cte_type = nrf_radio_cte_type_get(NRF_RADIO);
1444 
1445 	return lll_sync_cte_is_allowed(cte_type_mask, filter_policy, rx_cte_time, rx_cte_type);
1446 
1447 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1448 	return SYNC_STAT_ALLOWED;
1449 }
1450