1 /*
2  * Copyright (c) 2020-2021 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr.h>
8 #include <soc.h>
9 #include <sys/byteorder.h>
10 #include <bluetooth/hci.h>
11 
12 #include "util/util.h"
13 #include "util/mem.h"
14 #include "util/memq.h"
15 #include "util/mayfly.h"
16 
17 #include "hal/cpu.h"
18 #include "hal/ccm.h"
19 #include "hal/radio.h"
20 #include "hal/ticker.h"
21 
22 #include "ticker/ticker.h"
23 
24 #include "pdu.h"
25 
26 #include "lll.h"
27 #include "lll_clock.h"
28 #include "lll/lll_vendor.h"
29 #include "lll_chan.h"
30 #include "lll_scan.h"
31 #include "lll/lll_df_types.h"
32 #include "lll_sync.h"
33 #include "lll_sync_iso.h"
34 
35 #include "ull_scan_types.h"
36 #include "ull_sync_types.h"
37 
38 #include "ull_internal.h"
39 #include "ull_scan_internal.h"
40 #include "ull_sync_internal.h"
41 #include "ull_df_types.h"
42 #include "ull_df_internal.h"
43 
44 #include "ll.h"
45 
46 #define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_HCI_DRIVER)
47 #define LOG_MODULE_NAME bt_ctlr_ull_sync
48 #include "common/log.h"
49 #include <soc.h>
50 #include "hal/debug.h"
51 
52 static int init_reset(void);
53 static inline struct ll_sync_set *sync_acquire(void);
54 static void sync_ticker_cleanup(struct ll_sync_set *sync, ticker_op_func stop_of_cb);
55 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
56 		      uint32_t remainder, uint16_t lazy, uint8_t force,
57 		      void *param);
58 static void ticker_op_cb(uint32_t status, void *param);
59 static void ticker_update_sync_op_cb(uint32_t status, void *param);
60 static void ticker_stop_op_cb(uint32_t status, void *param);
61 static void sync_lost(void *param);
62 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
63 	!defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
64 static struct pdu_cte_info *pdu_cte_info_get(struct pdu_adv *pdu);
65 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && !CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
66 
67 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
68 static void ticker_update_op_status_give(uint32_t status, void *param);
69 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
70 
71 static struct ll_sync_set ll_sync_pool[CONFIG_BT_PER_ADV_SYNC_MAX];
72 static void *sync_free;
73 
74 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
75 /* Semaphore to wakeup thread on ticker API callback */
76 static struct k_sem sem_ticker_cb;
77 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
78 
79 static memq_link_t link_lll_prepare;
80 static struct mayfly mfy_lll_prepare = { 0, 0, &link_lll_prepare, NULL, lll_sync_prepare };
81 
ll_sync_create(uint8_t options,uint8_t sid,uint8_t adv_addr_type,uint8_t * adv_addr,uint16_t skip,uint16_t sync_timeout,uint8_t sync_cte_type)82 uint8_t ll_sync_create(uint8_t options, uint8_t sid, uint8_t adv_addr_type,
83 			    uint8_t *adv_addr, uint16_t skip,
84 			    uint16_t sync_timeout, uint8_t sync_cte_type)
85 {
86 	struct ll_scan_set *scan_coded;
87 	memq_link_t *link_sync_estab;
88 	memq_link_t *link_sync_lost;
89 	struct node_rx_hdr *node_rx;
90 	struct lll_sync *lll_sync;
91 	struct ll_scan_set *scan;
92 	struct ll_sync_set *sync;
93 
94 	scan = ull_scan_set_get(SCAN_HANDLE_1M);
95 	if (!scan || scan->per_scan.sync) {
96 		return BT_HCI_ERR_CMD_DISALLOWED;
97 	}
98 
99 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
100 		scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
101 		if (!scan_coded || scan_coded->per_scan.sync) {
102 			return BT_HCI_ERR_CMD_DISALLOWED;
103 		}
104 	}
105 
106 	link_sync_estab = ll_rx_link_alloc();
107 	if (!link_sync_estab) {
108 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
109 	}
110 
111 	link_sync_lost = ll_rx_link_alloc();
112 	if (!link_sync_lost) {
113 		ll_rx_link_release(link_sync_estab);
114 
115 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
116 	}
117 
118 	node_rx = ll_rx_alloc();
119 	if (!node_rx) {
120 		ll_rx_link_release(link_sync_lost);
121 		ll_rx_link_release(link_sync_estab);
122 
123 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
124 	}
125 
126 	sync = sync_acquire();
127 	if (!sync) {
128 		ll_rx_release(node_rx);
129 		ll_rx_link_release(link_sync_lost);
130 		ll_rx_link_release(link_sync_estab);
131 
132 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
133 	}
134 
135 	scan->per_scan.state = LL_SYNC_STATE_IDLE;
136 	scan->per_scan.filter_policy = options & BIT(0);
137 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
138 		scan_coded->per_scan.state = LL_SYNC_STATE_IDLE;
139 		scan_coded->per_scan.filter_policy =
140 			scan->per_scan.filter_policy;
141 	}
142 
143 	if (!scan->per_scan.filter_policy) {
144 		scan->per_scan.sid = sid;
145 		scan->per_scan.adv_addr_type = adv_addr_type;
146 		memcpy(scan->per_scan.adv_addr, adv_addr, BDADDR_SIZE);
147 
148 		if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
149 			scan_coded->per_scan.sid = scan->per_scan.sid;
150 			scan_coded->per_scan.adv_addr_type =
151 				scan->per_scan.adv_addr_type;
152 			memcpy(scan_coded->per_scan.adv_addr,
153 			       scan->per_scan.adv_addr, BDADDR_SIZE);
154 		}
155 	}
156 
157 	sync->skip = skip;
158 	sync->timeout = sync_timeout;
159 
160 	/* Initialize sync context */
161 	node_rx->link = link_sync_estab;
162 	sync->node_rx_sync_estab = node_rx;
163 	sync->timeout_reload = 0U;
164 	sync->timeout_expire = 0U;
165 
166 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
167 	/* Reset Broadcast Isochronous Group Sync Establishment */
168 	sync->iso.sync_iso = NULL;
169 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
170 
171 	/* Initialize sync LLL context */
172 	lll_sync = &sync->lll;
173 	lll_sync->skip_prepare = 0U;
174 	lll_sync->skip_event = 0U;
175 	lll_sync->window_widening_prepare_us = 0U;
176 	lll_sync->window_widening_event_us = 0U;
177 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
178 	lll_sync->cte_type = sync_cte_type;
179 	lll_sync->filter_policy = scan->per_scan.filter_policy;
180 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
181 
182 	/* Reporting initially enabled/disabled */
183 	lll_sync->is_rx_enabled = options & BIT(1);
184 
185 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
186 	ull_df_sync_cfg_init(&lll_sync->df_cfg);
187 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
188 
189 	/* sync_lost node_rx */
190 	sync->node_rx_lost.hdr.link = link_sync_lost;
191 
192 	/* Initialise ULL and LLL headers */
193 	ull_hdr_init(&sync->ull);
194 	lll_hdr_init(lll_sync, sync);
195 
196 	/* Enable scanner to create sync */
197 	scan->per_scan.sync = sync;
198 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
199 		scan_coded->per_scan.sync = sync;
200 	}
201 
202 	return 0;
203 }
204 
ll_sync_create_cancel(void ** rx)205 uint8_t ll_sync_create_cancel(void **rx)
206 {
207 	struct ll_scan_set *scan_coded;
208 	memq_link_t *link_sync_estab;
209 	memq_link_t *link_sync_lost;
210 	struct node_rx_pdu *node_rx;
211 	struct ll_scan_set *scan;
212 	struct ll_sync_set *sync;
213 	struct node_rx_sync *se;
214 
215 	scan = ull_scan_set_get(SCAN_HANDLE_1M);
216 	if (!scan || !scan->per_scan.sync) {
217 		return BT_HCI_ERR_CMD_DISALLOWED;
218 	}
219 
220 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
221 		scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
222 		if (!scan_coded || !scan_coded->per_scan.sync) {
223 			return BT_HCI_ERR_CMD_DISALLOWED;
224 		}
225 	}
226 
227 	/* Check for race condition where in sync is established when sync
228 	 * context was set to NULL.
229 	 *
230 	 * Setting `scan->per_scan.sync` to NULL represents cancellation
231 	 * requested in the thread context. Checking `sync->timeout_reload`
232 	 * confirms if synchronization was established before
233 	 * `scan->per_scan.sync` was set to NULL.
234 	 */
235 	sync = scan->per_scan.sync;
236 	scan->per_scan.sync = NULL;
237 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
238 		scan_coded->per_scan.sync = NULL;
239 	}
240 	cpu_dmb();
241 	if (!sync || sync->timeout_reload) {
242 		return BT_HCI_ERR_CMD_DISALLOWED;
243 	}
244 
245 	node_rx = (void *)sync->node_rx_sync_estab;
246 	link_sync_estab = node_rx->hdr.link;
247 	link_sync_lost = sync->node_rx_lost.hdr.link;
248 
249 	ll_rx_link_release(link_sync_lost);
250 	ll_rx_link_release(link_sync_estab);
251 	ll_rx_release(node_rx);
252 
253 	node_rx = (void *)&sync->node_rx_lost;
254 	node_rx->hdr.type = NODE_RX_TYPE_SYNC;
255 	node_rx->hdr.handle = 0xffff;
256 
257 	/* NOTE: struct node_rx_lost has uint8_t member following the
258 	 *       struct node_rx_hdr to store the reason.
259 	 */
260 	se = (void *)node_rx->pdu;
261 	se->status = BT_HCI_ERR_OP_CANCELLED_BY_HOST;
262 
263 	/* NOTE: Since NODE_RX_TYPE_SYNC is only generated from ULL context,
264 	 *       pass ULL context as parameter.
265 	 */
266 	node_rx->hdr.rx_ftr.param = sync;
267 
268 	*rx = node_rx;
269 
270 	return 0;
271 }
272 
ll_sync_terminate(uint16_t handle)273 uint8_t ll_sync_terminate(uint16_t handle)
274 {
275 	memq_link_t *link_sync_lost;
276 	struct ll_sync_set *sync;
277 	int err;
278 
279 	sync = ull_sync_is_enabled_get(handle);
280 	if (!sync) {
281 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
282 	}
283 
284 	err = ull_ticker_stop_with_mark(TICKER_ID_SCAN_SYNC_BASE + handle,
285 					sync, &sync->lll);
286 	LL_ASSERT(err == 0 || err == -EALREADY);
287 	if (err) {
288 		return BT_HCI_ERR_CMD_DISALLOWED;
289 	}
290 
291 	link_sync_lost = sync->node_rx_lost.hdr.link;
292 	ll_rx_link_release(link_sync_lost);
293 
294 	ull_sync_release(sync);
295 
296 	return 0;
297 }
298 
ll_sync_recv_enable(uint16_t handle,uint8_t enable)299 uint8_t ll_sync_recv_enable(uint16_t handle, uint8_t enable)
300 {
301 	/* TODO: */
302 	return BT_HCI_ERR_CMD_DISALLOWED;
303 }
304 
ull_sync_init(void)305 int ull_sync_init(void)
306 {
307 	int err;
308 
309 	err = init_reset();
310 	if (err) {
311 		return err;
312 	}
313 
314 	return 0;
315 }
316 
ull_sync_reset(void)317 int ull_sync_reset(void)
318 {
319 	uint16_t handle;
320 	void *rx;
321 	int err;
322 
323 	(void)ll_sync_create_cancel(&rx);
324 
325 	for (handle = 0U; handle < CONFIG_BT_PER_ADV_SYNC_MAX; handle++) {
326 		(void)ll_sync_terminate(handle);
327 	}
328 
329 	err = init_reset();
330 	if (err) {
331 		return err;
332 	}
333 
334 	return 0;
335 }
336 
ull_sync_set_get(uint16_t handle)337 struct ll_sync_set *ull_sync_set_get(uint16_t handle)
338 {
339 	if (handle >= CONFIG_BT_PER_ADV_SYNC_MAX) {
340 		return NULL;
341 	}
342 
343 	return &ll_sync_pool[handle];
344 }
345 
ull_sync_is_enabled_get(uint16_t handle)346 struct ll_sync_set *ull_sync_is_enabled_get(uint16_t handle)
347 {
348 	struct ll_sync_set *sync;
349 
350 	sync = ull_sync_set_get(handle);
351 	if (!sync || !sync->timeout_reload) {
352 		return NULL;
353 	}
354 
355 	return sync;
356 }
357 
ull_sync_is_valid_get(struct ll_sync_set * sync)358 struct ll_sync_set *ull_sync_is_valid_get(struct ll_sync_set *sync)
359 {
360 	if (((uint8_t *)sync < (uint8_t *)ll_sync_pool) ||
361 	    ((uint8_t *)sync > ((uint8_t *)ll_sync_pool +
362 	     (sizeof(struct ll_sync_set) * (CONFIG_BT_PER_ADV_SYNC_MAX - 1))))) {
363 		return NULL;
364 	}
365 
366 	return sync;
367 }
368 
ull_sync_handle_get(struct ll_sync_set * sync)369 uint16_t ull_sync_handle_get(struct ll_sync_set *sync)
370 {
371 	return mem_index_get(sync, ll_sync_pool, sizeof(struct ll_sync_set));
372 }
373 
ull_sync_lll_handle_get(struct lll_sync * lll)374 uint16_t ull_sync_lll_handle_get(struct lll_sync *lll)
375 {
376 	return ull_sync_handle_get(HDR_LLL2ULL(lll));
377 }
378 
ull_sync_release(struct ll_sync_set * sync)379 void ull_sync_release(struct ll_sync_set *sync)
380 {
381 	mem_release(sync, &sync_free);
382 }
383 
ull_sync_setup(struct ll_scan_set * scan,struct ll_scan_aux_set * aux,struct node_rx_hdr * node_rx,struct pdu_adv_sync_info * si)384 void ull_sync_setup(struct ll_scan_set *scan, struct ll_scan_aux_set *aux,
385 		    struct node_rx_hdr *node_rx, struct pdu_adv_sync_info *si)
386 {
387 	uint32_t ticks_slot_overhead;
388 	uint32_t ticks_slot_offset;
389 	struct ll_sync_set *sync;
390 	struct node_rx_sync *se;
391 	struct node_rx_ftr *ftr;
392 	uint32_t sync_offset_us;
393 	uint32_t ready_delay_us;
394 	struct node_rx_pdu *rx;
395 	uint8_t *data_chan_map;
396 	struct lll_sync *lll;
397 	uint16_t sync_handle;
398 	uint32_t interval_us;
399 	struct pdu_adv *pdu;
400 	uint16_t interval;
401 	uint8_t chm_last;
402 	uint32_t ret;
403 	uint8_t sca;
404 
405 	/* Populate the LLL context */
406 	sync = scan->per_scan.sync;
407 	lll = &sync->lll;
408 
409 	/* Copy channel map from sca_chm field in sync_info structure, and
410 	 * clear the SCA bits.
411 	 */
412 	chm_last = lll->chm_first;
413 	lll->chm_last = chm_last;
414 	data_chan_map = lll->chm[chm_last].data_chan_map;
415 	(void)memcpy(data_chan_map, si->sca_chm,
416 		     sizeof(lll->chm[chm_last].data_chan_map));
417 	data_chan_map[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &=
418 		~PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK;
419 	lll->chm[chm_last].data_chan_count =
420 		util_ones_count_get(data_chan_map,
421 				    sizeof(lll->chm[chm_last].data_chan_map));
422 	if (lll->chm[chm_last].data_chan_count < 2) {
423 		/* Ignore sync setup, invalid available channel count */
424 		return;
425 	}
426 
427 	memcpy(lll->access_addr, &si->aa, sizeof(lll->access_addr));
428 	lll->data_chan_id = lll_chan_id(lll->access_addr);
429 	memcpy(lll->crc_init, si->crc_init, sizeof(lll->crc_init));
430 	lll->event_counter = si->evt_cntr;
431 	lll->phy = aux->lll.phy;
432 
433 	/* Extract the SCA value from the sca_chm field of the sync_info
434 	 * structure.
435 	 */
436 	sca = (si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &
437 	       PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK) >>
438 	      PDU_SYNC_INFO_SCA_CHM_SCA_BIT_POS;
439 
440 	interval = sys_le16_to_cpu(si->interval);
441 	interval_us = interval * CONN_INT_UNIT_US;
442 
443 	sync->timeout_reload = RADIO_SYNC_EVENTS((sync->timeout * 10U * 1000U),
444 						 interval_us);
445 
446 	lll->window_widening_periodic_us =
447 		(((lll_clock_ppm_local_get() + lll_clock_ppm_get(sca)) *
448 		  interval_us) + (1000000 - 1)) / 1000000U;
449 	lll->window_widening_max_us = (interval_us >> 1) - EVENT_IFS_US;
450 	if (si->offs_units) {
451 		lll->window_size_event_us = OFFS_UNIT_300_US;
452 	} else {
453 		lll->window_size_event_us = OFFS_UNIT_30_US;
454 	}
455 
456 	/* Reset the sync context allocated to scan contexts */
457 	scan->per_scan.sync = NULL;
458 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
459 		struct ll_scan_set *scan_1m;
460 
461 		scan_1m = ull_scan_set_get(SCAN_HANDLE_1M);
462 		if (scan == scan_1m) {
463 			struct ll_scan_set *scan_coded;
464 
465 			scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
466 			scan_coded->per_scan.sync = NULL;
467 		} else {
468 			scan_1m->per_scan.sync = NULL;
469 		}
470 	}
471 
472 	sync_handle = ull_sync_handle_get(sync);
473 
474 	/* Prepare and dispatch sync notification */
475 	rx = (void *)sync->node_rx_sync_estab;
476 	rx->hdr.type = NODE_RX_TYPE_SYNC;
477 	rx->hdr.handle = sync_handle;
478 	rx->hdr.rx_ftr.param = scan;
479 	se = (void *)rx->pdu;
480 	se->interval = interval;
481 	se->phy = lll->phy;
482 	se->sca = sca;
483 
484 	/* Calculate offset and schedule sync radio events */
485 	ftr = &node_rx->rx_ftr;
486 	pdu = (void *)((struct node_rx_pdu *)node_rx)->pdu;
487 
488 	ready_delay_us = lll_radio_rx_ready_delay_get(lll->phy, 1);
489 
490 	sync_offset_us = ftr->radio_end_us;
491 	sync_offset_us += (uint32_t)si->offs * lll->window_size_event_us;
492 	/* offs_adjust may be 1 only if sync setup by LL_PERIODIC_SYNC_IND */
493 	sync_offset_us += (si->offs_adjust ? OFFS_ADJUST_US : 0U);
494 	sync_offset_us -= PDU_AC_US(pdu->len, lll->phy, ftr->phy_flags);
495 	sync_offset_us -= EVENT_TICKER_RES_MARGIN_US;
496 	sync_offset_us -= EVENT_JITTER_US;
497 	sync_offset_us -= ready_delay_us;
498 
499 	interval_us -= lll->window_widening_periodic_us;
500 
501 	/* TODO: active_to_start feature port */
502 	sync->ull.ticks_active_to_start = 0U;
503 	sync->ull.ticks_prepare_to_start =
504 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
505 	sync->ull.ticks_preempt_to_start =
506 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
507 	sync->ull.ticks_slot = HAL_TICKER_US_TO_TICKS(
508 			EVENT_OVERHEAD_START_US + ready_delay_us +
509 			PDU_AC_MAX_US(PDU_AC_EXT_PAYLOAD_SIZE_MAX, lll->phy) +
510 			EVENT_OVERHEAD_END_US);
511 
512 	ticks_slot_offset = MAX(sync->ull.ticks_active_to_start,
513 				sync->ull.ticks_prepare_to_start);
514 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
515 		ticks_slot_overhead = ticks_slot_offset;
516 	} else {
517 		ticks_slot_overhead = 0U;
518 	}
519 	ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
520 
521 	mfy_lll_prepare.fp = lll_sync_create_prepare;
522 
523 	ret = ticker_start(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
524 			   (TICKER_ID_SCAN_SYNC_BASE + sync_handle),
525 			   ftr->ticks_anchor - ticks_slot_offset,
526 			   HAL_TICKER_US_TO_TICKS(sync_offset_us),
527 			   HAL_TICKER_US_TO_TICKS(interval_us),
528 			   HAL_TICKER_REMAINDER(interval_us),
529 			   TICKER_NULL_LAZY,
530 			   (sync->ull.ticks_slot + ticks_slot_overhead),
531 			   ticker_cb, sync, ticker_op_cb, (void *)__LINE__);
532 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
533 		  (ret == TICKER_STATUS_BUSY));
534 }
535 
ull_sync_established_report(memq_link_t * link,struct node_rx_hdr * rx)536 void ull_sync_established_report(memq_link_t *link, struct node_rx_hdr *rx)
537 {
538 	struct node_rx_pdu *rx_establ;
539 	struct ll_sync_set *ull_sync;
540 	struct node_rx_ftr *ftr;
541 	struct node_rx_sync *se;
542 	struct lll_sync *lll;
543 
544 	ftr = &rx->rx_ftr;
545 
546 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
547 	enum sync_status sync_status;
548 
549 #if defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
550 	sync_status = ftr->sync_status;
551 #else
552 	struct pdu_cte_info *rx_cte_info;
553 
554 	lll = ftr->param;
555 
556 	rx_cte_info = pdu_cte_info_get((struct pdu_adv *)((struct node_rx_pdu *)rx)->pdu);
557 	if (rx_cte_info != NULL) {
558 		sync_status = lll_sync_cte_is_allowed(lll->cte_type, lll->filter_policy,
559 						      rx_cte_info->time, rx_cte_info->type);
560 	} else {
561 		sync_status = lll_sync_cte_is_allowed(lll->cte_type, lll->filter_policy, 0,
562 						      BT_HCI_LE_NO_CTE);
563 	}
564 #endif /* CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
565 
566 	/* Send periodic advertisement sync established report when sync has correct CTE type
567 	 * or the CTE type is incorrect and filter policy doesn't allow to continue scanning.
568 	 */
569 	if (sync_status != SYNC_STAT_READY_OR_CONT_SCAN) {
570 #else
571 	if (1) {
572 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
573 
574 		/* Set the sync handle corresponding to the LLL context passed in the node rx
575 		 * footer field.
576 		 */
577 		lll = ftr->param;
578 		ull_sync = HDR_LLL2ULL(lll);
579 
580 		/* Prepare and dispatch sync notification */
581 		rx_establ = (void *)ull_sync->node_rx_sync_estab;
582 		rx_establ->hdr.type = NODE_RX_TYPE_SYNC;
583 		se = (void *)rx_establ->pdu;
584 
585 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
586 		se->status = (ftr->sync_status == SYNC_STAT_TERM) ?
587 					   BT_HCI_ERR_UNSUPP_REMOTE_FEATURE :
588 					   BT_HCI_ERR_SUCCESS;
589 
590 #if !defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
591 		/* Notify done event handler to terminate sync scan if required. */
592 		ull_sync->sync_term = sync_status == SYNC_STAT_TERM;
593 #endif /* !CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
594 #else
595 		se->status = BT_HCI_ERR_SUCCESS;
596 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
597 		ll_rx_put(rx_establ->hdr.link, rx_establ);
598 		ll_rx_sched();
599 	}
600 
601 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
602 	/* Handle periodic advertising PDU and send periodic advertising scan report when
603 	 * the sync was found or was established in the past. The report is not send if
604 	 * scanning is terminated due to wrong CTE type.
605 	 */
606 	if (sync_status != SYNC_STAT_TERM) {
607 #else
608 	if (1) {
609 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
610 		/* Switch sync event prepare function to one reposnsible for regular PDUs receive */
611 		mfy_lll_prepare.fp = lll_sync_prepare;
612 
613 		/* Change node type to appropriately handle periodic advertising PDU report */
614 		rx->type = NODE_RX_TYPE_SYNC_REPORT;
615 		ull_scan_aux_setup(link, rx);
616 	}
617 }
618 
619 void ull_sync_done(struct node_rx_event_done *done)
620 {
621 	uint32_t ticks_drift_minus;
622 	uint32_t ticks_drift_plus;
623 	struct ll_sync_set *sync;
624 	uint16_t elapsed_event;
625 	struct lll_sync *lll;
626 	uint16_t skip_event;
627 	uint16_t lazy;
628 	uint8_t force;
629 
630 	/* Get reference to ULL context */
631 	sync = CONTAINER_OF(done->param, struct ll_sync_set, ull);
632 	lll = &sync->lll;
633 
634 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
635 #if defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
636 	if (done->extra.sync_term) {
637 #else
638 	if (sync->sync_term) {
639 #endif /* CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
640 		/* Stop periodic advertising scan ticker */
641 		sync_ticker_cleanup(sync, NULL);
642 	} else
643 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
644 	{
645 		/* Events elapsed used in timeout checks below */
646 		skip_event = lll->skip_event;
647 		elapsed_event = skip_event + 1;
648 
649 		/* Sync drift compensation and new skip calculation */
650 		ticks_drift_plus = 0U;
651 		ticks_drift_minus = 0U;
652 		if (done->extra.trx_cnt) {
653 			/* Calculate drift in ticks unit */
654 			ull_drift_ticks_get(done, &ticks_drift_plus, &ticks_drift_minus);
655 
656 			/* Enforce skip */
657 			lll->skip_event = sync->skip;
658 		}
659 
660 		/* Reset supervision countdown */
661 		if (done->extra.crc_valid) {
662 			sync->timeout_expire = 0U;
663 		}
664 		/* If anchor point not sync-ed, start timeout countdown, and break skip if any */
665 		else if (!sync->timeout_expire) {
666 			sync->timeout_expire = sync->timeout_reload;
667 		}
668 
669 		/* check timeout */
670 		force = 0U;
671 		if (sync->timeout_expire) {
672 			if (sync->timeout_expire > elapsed_event) {
673 				sync->timeout_expire -= elapsed_event;
674 
675 				/* break skip */
676 				lll->skip_event = 0U;
677 
678 				if (skip_event) {
679 					force = 1U;
680 				}
681 			} else {
682 				sync_ticker_cleanup(sync, ticker_stop_op_cb);
683 
684 				return;
685 			}
686 		}
687 
688 		/* Check if skip needs update */
689 		lazy = 0U;
690 		if ((force) || (skip_event != lll->skip_event)) {
691 			lazy = lll->skip_event + 1U;
692 		}
693 
694 		/* Update Sync ticker instance */
695 		if (ticks_drift_plus || ticks_drift_minus || lazy || force) {
696 			uint16_t sync_handle = ull_sync_handle_get(sync);
697 			uint32_t ticker_status;
698 
699 			/* Call to ticker_update can fail under the race
700 			 * condition where in the periodic sync role is being stopped
701 			 * but at the same time it is preempted by periodic sync event
702 			 * that gets into close state. Accept failure when periodic sync
703 			 * role is being stopped.
704 			 */
705 			ticker_status =
706 				ticker_update(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
707 					      (TICKER_ID_SCAN_SYNC_BASE + sync_handle),
708 					      ticks_drift_plus, ticks_drift_minus, 0, 0, lazy,
709 					      force, ticker_update_sync_op_cb, sync);
710 			LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
711 				  (ticker_status == TICKER_STATUS_BUSY) ||
712 				  ((void *)sync == ull_disable_mark_get()));
713 		}
714 	}
715 }
716 
717 void ull_sync_chm_update(uint8_t sync_handle, uint8_t *acad, uint8_t acad_len)
718 {
719 	struct pdu_adv_sync_chm_upd_ind *chm_upd_ind;
720 	struct ll_sync_set *sync;
721 	struct lll_sync *lll;
722 	uint8_t chm_last;
723 	uint16_t ad_len;
724 
725 	/* Get reference to LLL context */
726 	sync = ull_sync_set_get(sync_handle);
727 	LL_ASSERT(sync);
728 	lll = &sync->lll;
729 
730 	/* Ignore if already in progress */
731 	if (lll->chm_last != lll->chm_first) {
732 		return;
733 	}
734 
735 	/* Find the Channel Map Update Indication */
736 	do {
737 		/* Pick the length and find the Channel Map Update Indication */
738 		ad_len = acad[0];
739 		if (ad_len && (acad[1] == BT_DATA_CHANNEL_MAP_UPDATE_IND)) {
740 			break;
741 		}
742 
743 		/* Add length field size */
744 		ad_len += 1U;
745 		if (ad_len < acad_len) {
746 			acad_len -= ad_len;
747 		} else {
748 			return;
749 		}
750 
751 		/* Move to next AD data */
752 		acad += ad_len;
753 	} while (acad_len);
754 
755 	/* Validate the size of the Channel Map Update Indication */
756 	if (ad_len != (sizeof(*chm_upd_ind) + 1U)) {
757 		return;
758 	}
759 
760 	/* Pick the parameters into the procedure context */
761 	chm_last = lll->chm_last + 1U;
762 	if (chm_last == DOUBLE_BUFFER_SIZE) {
763 		chm_last = 0U;
764 	}
765 
766 	chm_upd_ind = (void *)&acad[2];
767 	(void)memcpy(lll->chm[chm_last].data_chan_map, chm_upd_ind->chm,
768 		     sizeof(lll->chm[chm_last].data_chan_map));
769 	lll->chm[chm_last].data_chan_count =
770 		util_ones_count_get(lll->chm[chm_last].data_chan_map,
771 				    sizeof(lll->chm[chm_last].data_chan_map));
772 	if (lll->chm[chm_last].data_chan_count < 2) {
773 		/* Ignore channel map, invalid available channel count */
774 		return;
775 	}
776 
777 	lll->chm_instant = sys_le16_to_cpu(chm_upd_ind->instant);
778 
779 	/* Set Channel Map Update Procedure in progress */
780 	lll->chm_last = chm_last;
781 }
782 
783 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
784 /* @brief Function updates periodic sync slot duration.
785  *
786  * @param[in] sync              Pointer to sync instance
787  * @param[in] slot_plus_us      Number of microsecond to add to ticker slot
788  * @param[in] slot_minus_us     Number of microsecond to subtracks from ticker slot
789  *
790  * @retval 0            Successful ticker slot update.
791  * @retval -ENOENT      Ticker node related with provided sync is already stopped.
792  * @retval -ENOMEM      Couldn't enqueue update ticker job.
793  * @retval -EFAULT      Somethin else went wrong.
794  */
795 int ull_sync_slot_update(struct ll_sync_set *sync, uint32_t slot_plus_us,
796 			 uint32_t slot_minus_us)
797 {
798 	uint32_t ret;
799 	uint32_t ret_cb;
800 
801 	ret_cb = TICKER_STATUS_BUSY;
802 	ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
803 			    TICKER_USER_ID_THREAD,
804 			    (TICKER_ID_SCAN_SYNC_BASE +
805 			    ull_sync_handle_get(sync)),
806 			    0, 0,
807 			    slot_plus_us,
808 			    slot_minus_us,
809 			    0, 0,
810 			    ticker_update_op_status_give,
811 			    (void *)&ret_cb);
812 	if (ret == TICKER_STATUS_BUSY || ret == TICKER_STATUS_SUCCESS) {
813 		/* Wait for callback or clear semaphore is callback was already
814 		 * executed.
815 		 */
816 		k_sem_take(&sem_ticker_cb, K_FOREVER);
817 
818 		if (ret_cb == TICKER_STATUS_FAILURE) {
819 			return -EFAULT; /* Something went wrong */
820 		} else {
821 			return 0;
822 		}
823 	} else {
824 		if (ret_cb != TICKER_STATUS_BUSY) {
825 			/* Ticker callback was executed and job enqueue was successful.
826 			 * Call k_sem_take to clear ticker callback semaphore.
827 			 */
828 			k_sem_take(&sem_ticker_cb, K_FOREVER);
829 		}
830 		/* Ticker was already stopped or job was not enqueued. */
831 		return (ret_cb == TICKER_STATUS_FAILURE) ? -ENOENT : -ENOMEM;
832 	}
833 }
834 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
835 
836 static int init_reset(void)
837 {
838 	/* Initialize sync pool. */
839 	mem_init(ll_sync_pool, sizeof(struct ll_sync_set),
840 		 sizeof(ll_sync_pool) / sizeof(struct ll_sync_set),
841 		 &sync_free);
842 
843 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
844 	k_sem_init(&sem_ticker_cb, 0, 1);
845 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
846 
847 	return 0;
848 }
849 
850 static inline struct ll_sync_set *sync_acquire(void)
851 {
852 	return mem_acquire(&sync_free);
853 }
854 
855 static void sync_ticker_cleanup(struct ll_sync_set *sync, ticker_op_func stop_of_cb)
856 {
857 	uint16_t sync_handle = ull_sync_handle_get(sync);
858 	uint32_t ret;
859 
860 	/* Stop Periodic Sync Ticker */
861 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
862 			  TICKER_ID_SCAN_SYNC_BASE + sync_handle, stop_of_cb, (void *)sync);
863 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
864 		  (ret == TICKER_STATUS_BUSY));
865 }
866 
867 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
868 		      uint32_t remainder, uint16_t lazy, uint8_t force,
869 		      void *param)
870 {
871 	static struct lll_prepare_param p;
872 	struct ll_sync_set *sync = param;
873 	struct lll_sync *lll;
874 	uint32_t ret;
875 	uint8_t ref;
876 
877 	DEBUG_RADIO_PREPARE_O(1);
878 
879 	lll = &sync->lll;
880 
881 	/* Increment prepare reference count */
882 	ref = ull_ref_inc(&sync->ull);
883 	LL_ASSERT(ref);
884 
885 	/* Append timing parameters */
886 	p.ticks_at_expire = ticks_at_expire;
887 	p.remainder = remainder;
888 	p.lazy = lazy;
889 	p.force = force;
890 	p.param = lll;
891 	mfy_lll_prepare.param = &p;
892 
893 	/* Kick LLL prepare */
894 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0, &mfy_lll_prepare);
895 	LL_ASSERT(!ret);
896 
897 	DEBUG_RADIO_PREPARE_O(1);
898 }
899 
900 static void ticker_op_cb(uint32_t status, void *param)
901 {
902 	ARG_UNUSED(param);
903 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
904 }
905 
906 static void ticker_update_sync_op_cb(uint32_t status, void *param)
907 {
908 	LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
909 		  param == ull_disable_mark_get());
910 }
911 
912 static void ticker_stop_op_cb(uint32_t status, void *param)
913 {
914 	uint32_t retval;
915 	static memq_link_t link;
916 	static struct mayfly mfy = {0, 0, &link, NULL, sync_lost};
917 
918 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
919 
920 	mfy.param = param;
921 
922 	retval = mayfly_enqueue(TICKER_USER_ID_ULL_LOW, TICKER_USER_ID_ULL_HIGH,
923 				0, &mfy);
924 	LL_ASSERT(!retval);
925 }
926 
927 static void sync_lost(void *param)
928 {
929 	struct ll_sync_set *sync = param;
930 	struct node_rx_pdu *rx;
931 
932 	/* Generate Periodic advertising sync lost */
933 	rx = (void *)&sync->node_rx_lost;
934 	rx->hdr.handle = ull_sync_handle_get(sync);
935 	rx->hdr.type = NODE_RX_TYPE_SYNC_LOST;
936 	rx->hdr.rx_ftr.param = sync;
937 
938 	/* Enqueue the sync lost towards ULL context */
939 	ll_rx_put(rx->hdr.link, rx);
940 	ll_rx_sched();
941 }
942 
943 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
944 static void ticker_update_op_status_give(uint32_t status, void *param)
945 {
946 	*((uint32_t volatile *)param) = status;
947 
948 	k_sem_give(&sem_ticker_cb);
949 }
950 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
951 
952 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
953 	!defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
954 static struct pdu_cte_info *pdu_cte_info_get(struct pdu_adv *pdu)
955 {
956 	struct pdu_adv_com_ext_adv *com_hdr;
957 	struct pdu_adv_ext_hdr *hdr;
958 	uint8_t *dptr;
959 
960 	com_hdr = &pdu->adv_ext_ind;
961 	hdr = &com_hdr->ext_hdr;
962 
963 	if (!com_hdr->ext_hdr_len || (com_hdr->ext_hdr_len != 0 && !hdr->cte_info)) {
964 		return NULL;
965 	}
966 
967 	/* Skip flags in extended advertising header */
968 	dptr = hdr->data;
969 
970 	/* Make sure there are no fields that are not allowd for AUX_SYNC_IND and AUX_CHAIN_IND */
971 	LL_ASSERT(!hdr->adv_addr);
972 	LL_ASSERT(!hdr->tgt_addr);
973 
974 	return (struct pdu_cte_info *)hdr->data;
975 }
976 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && !CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
977