1 /*
2  * Copyright (c) 2020-2021 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stdlib.h>
8 #include <zephyr/kernel.h>
9 #include <soc.h>
10 #include <zephyr/sys/byteorder.h>
11 #include <zephyr/bluetooth/hci_types.h>
12 
13 #include "util/util.h"
14 #include "util/mem.h"
15 #include "util/memq.h"
16 #include "util/mayfly.h"
17 #include "util/dbuf.h"
18 
19 #include "hal/cpu.h"
20 #include "hal/ccm.h"
21 #include "hal/radio.h"
22 #include "hal/ticker.h"
23 
24 #include "ticker/ticker.h"
25 
26 #include "pdu_df.h"
27 #include "lll/pdu_vendor.h"
28 #include "pdu.h"
29 
30 #include "lll.h"
31 #include "lll/lll_adv_types.h"
32 #include "lll_adv.h"
33 #include "lll/lll_adv_pdu.h"
34 #include "lll_clock.h"
35 #include "lll/lll_vendor.h"
36 #include "lll_chan.h"
37 #include "lll_scan.h"
38 #include "lll/lll_df_types.h"
39 #include "lll_conn.h"
40 #include "lll_conn_iso.h"
41 #include "lll_sync.h"
42 #include "lll_sync_iso.h"
43 
44 #include "isoal.h"
45 
46 #include "ull_tx_queue.h"
47 
48 #include "ull_filter.h"
49 #include "ull_iso_types.h"
50 #include "ull_scan_types.h"
51 #include "ull_sync_types.h"
52 #include "ull_conn_types.h"
53 #include "ull_adv_types.h"
54 #include "ull_conn_iso_types.h"
55 
56 #include "ull_internal.h"
57 #include "ull_adv_internal.h"
58 #include "ull_scan_internal.h"
59 #include "ull_sync_internal.h"
60 #include "ull_conn_internal.h"
61 #include "ull_conn_iso_internal.h"
62 #include "ull_df_types.h"
63 #include "ull_df_internal.h"
64 
65 #include "ull_llcp.h"
66 #include "ll.h"
67 
68 #include "hal/debug.h"
69 
70 /* Check that timeout_reload member is at safe offset when ll_sync_set is
71  * allocated using mem interface. timeout_reload being non-zero is used to
72  * indicate that a sync is established. And is used to check for sync being
73  * terminated under race conditions between HCI Tx and Rx thread when
74  * Periodic Advertising Reports are generated.
75  */
76 MEM_FREE_MEMBER_ACCESS_BUILD_ASSERT(struct ll_sync_set, timeout_reload);
77 
78 static struct ll_sync_set *ull_sync_create(uint8_t sid, uint16_t timeout, uint16_t skip,
79 					   uint8_t cte_type, uint8_t rx_enable, uint8_t nodups);
80 static int init_reset(void);
81 static inline struct ll_sync_set *sync_acquire(void);
82 static void sync_ticker_cleanup(struct ll_sync_set *sync, ticker_op_func stop_op_cb);
83 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
84 		      uint32_t remainder, uint16_t lazy, uint8_t force,
85 		      void *param);
86 static void ticker_start_op_cb(uint32_t status, void *param);
87 static void ticker_update_op_cb(uint32_t status, void *param);
88 static void ticker_stop_sync_expire_op_cb(uint32_t status, void *param);
89 static void sync_expire(void *param);
90 static void ticker_stop_sync_lost_op_cb(uint32_t status, void *param);
91 static void sync_lost(void *param);
92 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)
93 static bool peer_sid_sync_exists(uint8_t const peer_id_addr_type,
94 				 uint8_t const *const peer_id_addr,
95 				 uint8_t sid);
96 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC */
97 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
98 	!defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
99 static struct pdu_cte_info *pdu_cte_info_get(struct pdu_adv *pdu);
100 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && !CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
101 
102 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
103 static void ticker_update_op_status_give(uint32_t status, void *param);
104 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
105 
106 static struct ll_sync_set ll_sync_pool[CONFIG_BT_PER_ADV_SYNC_MAX];
107 static void *sync_free;
108 
109 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
110 /* Semaphore to wakeup thread on ticker API callback */
111 static struct k_sem sem_ticker_cb;
112 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
113 
ll_sync_create(uint8_t options,uint8_t sid,uint8_t adv_addr_type,uint8_t * adv_addr,uint16_t skip,uint16_t sync_timeout,uint8_t sync_cte_type)114 uint8_t ll_sync_create(uint8_t options, uint8_t sid, uint8_t adv_addr_type,
115 			    uint8_t *adv_addr, uint16_t skip,
116 			    uint16_t sync_timeout, uint8_t sync_cte_type)
117 {
118 	struct ll_scan_set *scan_coded;
119 	struct ll_scan_set *scan;
120 	struct ll_sync_set *sync;
121 	uint8_t rx_enable;
122 	uint8_t nodups;
123 
124 	scan = ull_scan_set_get(SCAN_HANDLE_1M);
125 	if (!scan || scan->periodic.sync) {
126 		return BT_HCI_ERR_CMD_DISALLOWED;
127 	}
128 
129 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
130 		scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
131 		if (!scan_coded || scan_coded->periodic.sync) {
132 			return BT_HCI_ERR_CMD_DISALLOWED;
133 		}
134 	}
135 
136 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)
137 	/* Do not sync twice to the same peer and same SID */
138 	if (((options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST) == 0U) &&
139 	    peer_sid_sync_exists(adv_addr_type, adv_addr, sid)) {
140 		return BT_HCI_ERR_CONN_ALREADY_EXISTS;
141 	}
142 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC */
143 
144 	rx_enable = !(options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_REPORTS_DISABLED);
145 	nodups = (options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE) ? 1U : 0U;
146 
147 	sync = ull_sync_create(sid, sync_timeout, skip, sync_cte_type, rx_enable, nodups);
148 	if (!sync) {
149 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
150 	}
151 
152 	scan->periodic.cancelled = 0U;
153 	scan->periodic.state = LL_SYNC_STATE_IDLE;
154 	scan->periodic.param = NULL;
155 	scan->periodic.filter_policy =
156 		options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST;
157 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
158 		scan_coded->periodic.cancelled = 0U;
159 		scan_coded->periodic.state = LL_SYNC_STATE_IDLE;
160 		scan_coded->periodic.param = NULL;
161 		scan_coded->periodic.filter_policy =
162 			scan->periodic.filter_policy;
163 	}
164 
165 	if (!scan->periodic.filter_policy) {
166 		sync->peer_id_addr_type = adv_addr_type;
167 		(void)memcpy(sync->peer_id_addr, adv_addr, BDADDR_SIZE);
168 	}
169 
170 	/* Remember the peer address when periodic advertiser list is not
171 	 * used.
172 	 * NOTE: Peer address will be filled/overwritten with correct identity
173 	 * address on sync setup when privacy is enabled.
174 	 */
175 	if ((options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST) == 0U) {
176 		sync->peer_id_addr_type = adv_addr_type;
177 		(void)memcpy(sync->peer_id_addr, adv_addr,
178 			     sizeof(sync->peer_id_addr));
179 	}
180 
181 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
182 	/* Set filter policy in lll_sync */
183 	sync->lll.filter_policy = scan->periodic.filter_policy;
184 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
185 
186 	/* Enable scanner to create sync */
187 	scan->periodic.sync = sync;
188 
189 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
190 	scan->lll.is_sync = 1U;
191 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
192 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
193 		scan_coded->periodic.sync = sync;
194 
195 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
196 		scan_coded->lll.is_sync = 1U;
197 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
198 	}
199 
200 	return 0;
201 }
202 
203 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
ull_sync_setup_from_sync_transfer(struct ll_conn * conn,uint16_t service_data,struct ll_sync_set * sync,struct pdu_adv_sync_info * si,int16_t conn_evt_offset,uint16_t last_pa_event_counter,uint16_t sync_conn_event_count,uint8_t sender_sca)204 void ull_sync_setup_from_sync_transfer(struct ll_conn *conn, uint16_t service_data,
205 				       struct ll_sync_set *sync, struct pdu_adv_sync_info *si,
206 				       int16_t conn_evt_offset, uint16_t last_pa_event_counter,
207 				       uint16_t sync_conn_event_count, uint8_t sender_sca)
208 {
209 	struct node_rx_past_received *se_past;
210 	uint32_t ticks_slot_overhead;
211 	uint32_t ticks_slot_offset;
212 	uint32_t conn_interval_us;
213 	uint32_t sync_offset_us;
214 	uint32_t ready_delay_us;
215 	struct node_rx_pdu *rx;
216 	uint8_t *data_chan_map;
217 	struct lll_sync *lll;
218 	uint32_t interval_us;
219 	uint32_t slot_us;
220 	uint32_t ticks_anchor;
221 	uint8_t chm_last;
222 	uint32_t ret;
223 	uint16_t interval;
224 	uint16_t sync_handle;
225 	uint8_t sca;
226 
227 	lll = &sync->lll;
228 
229 	/* Copy channel map from sca_chm field in sync_info structure, and
230 	 * clear the SCA bits.
231 	 */
232 	chm_last = lll->chm_first;
233 	lll->chm_last = chm_last;
234 	data_chan_map = lll->chm[chm_last].data_chan_map;
235 	(void)memcpy(data_chan_map, si->sca_chm,
236 		     sizeof(lll->chm[chm_last].data_chan_map));
237 	data_chan_map[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &=
238 		~PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK;
239 	lll->chm[chm_last].data_chan_count =
240 		util_ones_count_get(data_chan_map,
241 				    sizeof(lll->chm[chm_last].data_chan_map));
242 	if (lll->chm[chm_last].data_chan_count < CHM_USED_COUNT_MIN) {
243 		/* Ignore sync setup, invalid available channel count */
244 		return;
245 	}
246 
247 	memcpy(lll->access_addr, si->aa, sizeof(lll->access_addr));
248 	lll->data_chan_id = lll_chan_id(lll->access_addr);
249 	memcpy(lll->crc_init, si->crc_init, sizeof(lll->crc_init));
250 	lll->event_counter = sys_le16_to_cpu(si->evt_cntr);
251 
252 	interval = sys_le16_to_cpu(si->interval);
253 	interval_us = interval * PERIODIC_INT_UNIT_US;
254 
255 	/* Convert fromm 10ms units to interval units */
256 	if (sync->timeout != 0  && interval_us != 0) {
257 		sync->timeout_reload = RADIO_SYNC_EVENTS((sync->timeout * 10U *
258 						  USEC_PER_MSEC), interval_us);
259 	}
260 
261 	/* Adjust Skip value so that there is minimum of 6 events that can be
262 	 * listened to before Sync_Timeout occurs.
263 	 * The adjustment of the skip value is controller implementation
264 	 * specific and not specified by the Bluetooth Core Specification v5.3.
265 	 * The Controller `may` use the Skip value, and the implementation here
266 	 * covers a case where Skip value could lead to less events being
267 	 * listened to until Sync_Timeout. Listening to more consecutive events
268 	 * before Sync_Timeout increases probability of retaining the Periodic
269 	 * Synchronization.
270 	 */
271 	if (sync->timeout_reload > CONN_ESTAB_COUNTDOWN) {
272 		uint16_t skip_max = sync->timeout_reload - CONN_ESTAB_COUNTDOWN;
273 
274 		if (sync->skip > skip_max) {
275 			sync->skip = skip_max;
276 		}
277 	}
278 
279 	sync->sync_expire = CONN_ESTAB_COUNTDOWN;
280 
281 	/* Extract the SCA value from the sca_chm field of the sync_info
282 	 * structure.
283 	 */
284 	sca = (si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &
285 	       PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK) >>
286 	      PDU_SYNC_INFO_SCA_CHM_SCA_BIT_POS;
287 
288 	lll->sca = sca;
289 
290 	lll->window_widening_periodic_us =
291 		DIV_ROUND_UP(((lll_clock_ppm_local_get() +
292 				   lll_clock_ppm_get(sca)) *
293 				  interval_us), USEC_PER_SEC);
294 	lll->window_widening_max_us = (interval_us >> 1) - EVENT_IFS_US;
295 	if (PDU_ADV_SYNC_INFO_OFFS_UNITS_GET(si)) {
296 		lll->window_size_event_us = OFFS_UNIT_300_US;
297 	} else {
298 		lll->window_size_event_us = OFFS_UNIT_30_US;
299 	}
300 
301 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
302 	lll->node_cte_incomplete = NULL;
303 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
304 
305 	/* Prepare Periodic Advertising Sync Transfer Received event (dispatched later) */
306 	sync_handle = ull_sync_handle_get(sync);
307 	rx = (void *)sync->node_rx_sync_estab;
308 	rx->hdr.type = NODE_RX_TYPE_SYNC_TRANSFER_RECEIVED;
309 	rx->hdr.handle = sync_handle;
310 	rx->rx_ftr.param = sync;
311 
312 	/* Create node_rx and assign values */
313 	se_past = (void *)rx->pdu;
314 	se_past->rx_sync.status = BT_HCI_ERR_SUCCESS;
315 	se_past->rx_sync.interval = interval;
316 	se_past->rx_sync.phy = sync->lll.phy;
317 	se_past->rx_sync.sca = sca;
318 	se_past->conn_handle = ll_conn_handle_get(conn);
319 	se_past->service_data = service_data;
320 
321 	conn_interval_us = conn->lll.interval * CONN_INT_UNIT_US;
322 
323 	/* Calculate offset and schedule sync radio events */
324 	ready_delay_us = lll_radio_rx_ready_delay_get(lll->phy, PHY_FLAGS_S8);
325 
326 	sync_offset_us = PDU_ADV_SYNC_INFO_OFFSET_GET(si) * lll->window_size_event_us;
327 	/* offs_adjust may be 1 only if sync setup by LL_PERIODIC_SYNC_IND */
328 	sync_offset_us += (PDU_ADV_SYNC_INFO_OFFS_ADJUST_GET(si) ? OFFS_ADJUST_US : 0U);
329 	sync_offset_us -= EVENT_TICKER_RES_MARGIN_US;
330 	sync_offset_us -= EVENT_JITTER_US;
331 	sync_offset_us -= ready_delay_us;
332 
333 	if (conn_evt_offset) {
334 		int64_t conn_offset_us = (int64_t)conn_evt_offset * conn_interval_us;
335 
336 		if ((int64_t)sync_offset_us + conn_offset_us < 0) {
337 			uint32_t total_offset_us = llabs((int64_t)sync_offset_us + conn_offset_us);
338 			uint32_t sync_intervals = DIV_ROUND_UP(total_offset_us, interval_us);
339 
340 			lll->event_counter += sync_intervals;
341 			sync_offset_us = (sync_intervals * interval_us) - total_offset_us;
342 		} else {
343 			sync_offset_us += conn_offset_us;
344 		}
345 	}
346 
347 	/* Calculate initial window widening - see Core Spec vol 6, part B, 5.1.13.1 */
348 	{
349 		uint16_t event_delta;
350 		uint32_t drift_us;
351 		uint64_t da;
352 		uint64_t db;
353 		uint64_t d;
354 
355 		const uint32_t local_sca_ppm = lll_clock_ppm_local_get();
356 
357 		event_delta = lll->event_counter - last_pa_event_counter;
358 
359 		da = (uint64_t)(local_sca_ppm + lll_clock_ppm_get(sca)) * interval_us;
360 		da = DIV_ROUND_UP(da * (uint64_t)event_delta, USEC_PER_SEC);
361 
362 		db = (uint64_t)(local_sca_ppm + lll_clock_ppm_get(sender_sca)) * conn_interval_us;
363 		db = DIV_ROUND_UP(db * (uint64_t)(ull_conn_event_counter(conn) -
364 						  sync_conn_event_count), USEC_PER_SEC);
365 
366 		d = DIV_ROUND_UP((da + db) * (USEC_PER_SEC + local_sca_ppm +
367 					      lll_clock_ppm_get(sca) +
368 					      lll_clock_ppm_get(sender_sca)), USEC_PER_SEC);
369 
370 		/* Limit drift compenstion to the maximum window widening */
371 		drift_us = MIN((uint32_t)d, lll->window_widening_max_us);
372 
373 		/* Apply total drift to initial window size */
374 		lll->window_size_event_us += drift_us;
375 
376 		/* Adjust offset if less than the drift compensation */
377 		while (sync_offset_us < drift_us) {
378 			sync_offset_us += interval_us;
379 			lll->event_counter++;
380 		}
381 
382 		sync_offset_us -= drift_us;
383 	}
384 
385 	interval_us -= lll->window_widening_periodic_us;
386 
387 	/* Calculate event time reservation */
388 	slot_us = PDU_AC_MAX_US(PDU_AC_EXT_PAYLOAD_RX_SIZE, lll->phy);
389 	slot_us += ready_delay_us;
390 
391 	/* Add implementation defined radio event overheads */
392 	if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX)) {
393 		slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
394 	}
395 
396 	sync->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
397 
398 	ticks_slot_offset = HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
399 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
400 		ticks_slot_overhead = ticks_slot_offset;
401 	} else {
402 		ticks_slot_overhead = 0U;
403 	}
404 	ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
405 
406 	sync->lll_sync_prepare = lll_sync_create_prepare;
407 
408 	ticks_anchor = conn->llcp.prep.ticks_at_expire;
409 
410 #if defined(CONFIG_BT_PERIPHERAL)
411 	if (conn->lll.role == BT_HCI_ROLE_PERIPHERAL) {
412 		/* Compensate for window widening */
413 		ticks_anchor += HAL_TICKER_US_TO_TICKS(conn->lll.periph.window_widening_event_us);
414 	}
415 #endif /* CONFIG_BT_PERIPHERAL */
416 
417 	ret = ticker_start(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
418 			   (TICKER_ID_SCAN_SYNC_BASE + sync_handle),
419 			   ticks_anchor,
420 			   HAL_TICKER_US_TO_TICKS(sync_offset_us),
421 			   HAL_TICKER_US_TO_TICKS(interval_us),
422 			   HAL_TICKER_REMAINDER(interval_us),
423 			   TICKER_NULL_LAZY,
424 			   (sync->ull.ticks_slot + ticks_slot_overhead),
425 			   ticker_cb, sync,
426 			   ticker_start_op_cb, (void *)__LINE__);
427 	LL_ASSERT_ERR((ret == TICKER_STATUS_SUCCESS) ||
428 		      (ret == TICKER_STATUS_BUSY));
429 }
430 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
431 
432 
ll_sync_create_cancel(void ** rx)433 uint8_t ll_sync_create_cancel(void **rx)
434 {
435 	struct ll_scan_set *scan_coded;
436 	memq_link_t *link_sync_estab;
437 	memq_link_t *link_sync_lost;
438 	struct node_rx_pdu *node_rx;
439 	struct ll_scan_set *scan;
440 	struct ll_sync_set *sync;
441 	struct node_rx_sync *se;
442 
443 	scan = ull_scan_set_get(SCAN_HANDLE_1M);
444 	if (!scan || !scan->periodic.sync) {
445 		return BT_HCI_ERR_CMD_DISALLOWED;
446 	}
447 
448 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
449 		scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
450 		if (!scan_coded || !scan_coded->periodic.sync) {
451 			return BT_HCI_ERR_CMD_DISALLOWED;
452 		}
453 	}
454 
455 	/* Check for race condition where in sync is established when sync
456 	 * create cancel is invoked.
457 	 *
458 	 * Setting `scan->periodic.cancelled` to represent cancellation
459 	 * requested in the thread context. Checking `scan->periodic.sync` for
460 	 * NULL confirms if synchronization was established before
461 	 * `scan->periodic.cancelled` was set to 1U.
462 	 */
463 	scan->periodic.cancelled = 1U;
464 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
465 		scan_coded->periodic.cancelled = 1U;
466 	}
467 	cpu_dmb();
468 	sync = scan->periodic.sync;
469 	if (!sync) {
470 		return BT_HCI_ERR_CMD_DISALLOWED;
471 	}
472 
473 	/* node_rx_sync_estab is assigned when Host calls create sync and cleared when sync is
474 	 * established. timeout_reload is set when sync is found and setup. It is non-zero until
475 	 * sync is terminated. Together they give information about current sync state:
476 	 * - node_rx_sync_estab == NULL && timeout_reload != 0 => sync is established
477 	 * - node_rx_sync_estab == NULL && timeout_reload == 0 => sync is terminated
478 	 * - node_rx_sync_estab != NULL && timeout_reload == 0 => sync is created
479 	 * - node_rx_sync_estab != NULL && timeout_reload != 0 => sync is waiting to be established
480 	 */
481 	if (!sync->node_rx_sync_estab) {
482 		/* There is no sync to be cancelled */
483 		return BT_HCI_ERR_CMD_DISALLOWED;
484 	}
485 
486 	sync->is_stop = 1U;
487 	cpu_dmb();
488 
489 	if (sync->timeout_reload != 0U) {
490 		uint16_t sync_handle = ull_sync_handle_get(sync);
491 
492 		LL_ASSERT_DBG(sync_handle <= UINT8_MAX);
493 
494 		/* Sync is not established yet, so stop sync ticker */
495 		const int err =
496 			ull_ticker_stop_with_mark((TICKER_ID_SCAN_SYNC_BASE +
497 						   (uint8_t)sync_handle),
498 						  sync, &sync->lll);
499 		if (err != 0 && err != -EALREADY) {
500 			return BT_HCI_ERR_CMD_DISALLOWED;
501 		}
502 	} /* else: sync was created but not yet setup, there is no sync ticker yet. */
503 
504 	/* It is safe to remove association with scanner as cancelled flag is
505 	 * set, sync is_stop flag was set and sync has not been established.
506 	 */
507 	ull_sync_setup_reset(sync);
508 
509 	/* Mark the sync context as sync create cancelled */
510 	if (IS_ENABLED(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)) {
511 		sync->timeout = 0U;
512 	}
513 
514 	node_rx = sync->node_rx_sync_estab;
515 	link_sync_estab = node_rx->hdr.link;
516 	link_sync_lost = sync->node_rx_lost.rx.hdr.link;
517 
518 	ll_rx_link_release(link_sync_lost);
519 	ll_rx_link_release(link_sync_estab);
520 	ll_rx_release(node_rx);
521 
522 	/* Clear the node after release to mark the sync establish as being completed.
523 	 * In this case the completion reason is sync cancelled by Host.
524 	 */
525 	sync->node_rx_sync_estab = NULL;
526 
527 	node_rx = (void *)&sync->node_rx_lost;
528 	node_rx->hdr.type = NODE_RX_TYPE_SYNC;
529 	node_rx->hdr.handle = LLL_HANDLE_INVALID;
530 
531 	/* NOTE: struct node_rx_lost has uint8_t member following the
532 	 *       struct node_rx_hdr to store the reason.
533 	 */
534 	se = (void *)node_rx->pdu;
535 	se->status = BT_HCI_ERR_OP_CANCELLED_BY_HOST;
536 
537 	/* NOTE: Since NODE_RX_TYPE_SYNC is only generated from ULL context,
538 	 *       pass ULL sync context as parameter.
539 	 */
540 	node_rx->rx_ftr.param = sync;
541 
542 	*rx = node_rx;
543 
544 	return 0;
545 }
546 
ll_sync_terminate(uint16_t handle)547 uint8_t ll_sync_terminate(uint16_t handle)
548 {
549 	struct lll_scan_aux *lll_aux;
550 	memq_link_t *link_sync_lost;
551 	struct ll_sync_set *sync;
552 	int err;
553 
554 	sync = ull_sync_is_enabled_get(handle);
555 	if (!sync) {
556 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
557 	}
558 
559 	/* Request terminate, no new ULL scheduling to be setup */
560 	sync->is_stop = 1U;
561 	cpu_dmb();
562 
563 	/* Stop periodic sync ticker timeouts */
564 	err = ull_ticker_stop_with_mark(TICKER_ID_SCAN_SYNC_BASE + handle,
565 					sync, &sync->lll);
566 	LL_ASSERT_INFO2(err == 0 || err == -EALREADY, handle, err);
567 	if (err) {
568 		return BT_HCI_ERR_CMD_DISALLOWED;
569 	}
570 
571 	/* Check and stop any auxiliary PDU receptions */
572 	lll_aux = sync->lll.lll_aux;
573 	if (lll_aux) {
574 #if defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
575 		err = ull_scan_aux_stop(&sync->lll);
576 #else /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
577 		struct ll_scan_aux_set *aux;
578 
579 		aux = HDR_LLL2ULL(lll_aux);
580 		err = ull_scan_aux_stop(aux);
581 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
582 		if (err && (err != -EALREADY)) {
583 			return BT_HCI_ERR_CMD_DISALLOWED;
584 		}
585 
586 #if !defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
587 		LL_ASSERT_DBG(!aux->parent);
588 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
589 	}
590 
591 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
592 	/* Clean up node_rx_sync_estab if still present */
593 	if (sync->node_rx_sync_estab) {
594 		memq_link_t *link_sync_estab;
595 		struct node_rx_pdu *node_rx;
596 
597 		node_rx = (void *)sync->node_rx_sync_estab;
598 		link_sync_estab = node_rx->hdr.link;
599 
600 		ll_rx_link_release(link_sync_estab);
601 		ll_rx_release(node_rx);
602 
603 		sync->node_rx_sync_estab = NULL;
604 	}
605 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
606 
607 	link_sync_lost = sync->node_rx_lost.rx.hdr.link;
608 	ll_rx_link_release(link_sync_lost);
609 
610 	/* Mark sync context not sync established */
611 	sync->timeout_reload = 0U;
612 
613 	ull_sync_release(sync);
614 
615 	return 0;
616 }
617 
618 /* @brief Link Layer interface function corresponding to HCI LE Set Periodic
619  *        Advertising Receive Enable command.
620  *
621  * @param[in] handle Sync_Handle identifying the periodic advertising
622  *                   train. Range: 0x0000 to 0x0EFF.
623  * @param[in] enable Bit number 0 - Reporting Enabled.
624  *                   Bit number 1 - Duplicate filtering enabled.
625  *                   All other bits - Reserved for future use.
626  *
627  * @return HCI error codes as documented in Bluetooth Core Specification v5.3.
628  */
ll_sync_recv_enable(uint16_t handle,uint8_t enable)629 uint8_t ll_sync_recv_enable(uint16_t handle, uint8_t enable)
630 {
631 	struct ll_sync_set *sync;
632 
633 	sync = ull_sync_is_enabled_get(handle);
634 	if (!sync) {
635 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
636 	}
637 
638 	/* Reporting enabled/disabled */
639 	sync->rx_enable = (enable & BT_HCI_LE_SET_PER_ADV_RECV_ENABLE_ENABLE) ?
640 			  1U : 0U;
641 
642 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
643 	sync->nodups = (enable & BT_HCI_LE_SET_PER_ADV_RECV_ENABLE_FILTER_DUPLICATE) ?
644 		       1U : 0U;
645 #endif
646 
647 	return 0;
648 }
649 
650 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
651 /* @brief Link Layer interface function corresponding to HCI LE Set Periodic
652  *        Advertising Sync Transfer command.
653  *
654  * @param[in] conn_handle Connection_Handle identifying the connected device
655  *                        Range: 0x0000 to 0x0EFF.
656  * @param[in] service_data Service_Data value provided by the Host for use by the
657  *                         Host of the peer device.
658  * @param[in] sync_handle Sync_Handle identifying the periodic advertising
659  *                        train. Range: 0x0000 to 0x0EFF.
660  *
661  * @return HCI error codes as documented in Bluetooth Core Specification v5.4.
662  */
ll_sync_transfer(uint16_t conn_handle,uint16_t service_data,uint16_t sync_handle)663 uint8_t ll_sync_transfer(uint16_t conn_handle, uint16_t service_data, uint16_t sync_handle)
664 {
665 	struct ll_sync_set *sync;
666 	struct ll_conn *conn;
667 
668 	conn = ll_connected_get(conn_handle);
669 	if (!conn) {
670 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
671 	}
672 
673 	/* Verify that sync_handle is valid */
674 	sync = ull_sync_is_enabled_get(sync_handle);
675 	if (!sync) {
676 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
677 	}
678 
679 	/* Call llcp to start LLCP_PERIODIC_SYNC_IND */
680 	return ull_cp_periodic_sync(conn, sync, NULL, service_data);
681 }
682 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
683 
684 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
685 /* @brief Link Layer interface function corresponding to HCI LE Set Periodic
686  *        Advertising Sync Transfer Parameters command.
687  *
688  * @param[in] conn_handle Connection_Handle identifying the connected device
689  *                        Range: 0x0000 to 0x0EFF.
690  * @param[in] mode Mode specifies the action to be taken when a periodic advertising
691  *                 synchronization is received.
692  * @param[in] skip Skip specifying the number of consectutive periodic advertising
693  *                 packets that the receiver may skip after successfully reciving a
694  *                 periodic advertising packet. Range: 0x0000 to 0x01F3.
695  * @param[in] timeout Sync_timeout specifying the maximum permitted time between
696  *                    successful receives. Range: 0x000A to 0x4000.
697  * @param[in] cte_type CTE_Type specifying whether to only synchronize to periodic
698  *                     advertising with certain types of Constant Tone Extension.
699  *
700  * @return HCI error codes as documented in Bluetooth Core Specification v5.4.
701  */
ll_past_param(uint16_t conn_handle,uint8_t mode,uint16_t skip,uint16_t timeout,uint8_t cte_type)702 uint8_t ll_past_param(uint16_t conn_handle, uint8_t mode, uint16_t skip, uint16_t timeout,
703 		      uint8_t cte_type)
704 {
705 	struct ll_conn *conn;
706 
707 	conn = ll_connected_get(conn_handle);
708 	if (!conn) {
709 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
710 	}
711 
712 	if (mode == BT_HCI_LE_PAST_MODE_SYNC_FILTER_DUPLICATES &&
713 	    !IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)) {
714 		return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
715 	}
716 
717 	/* Set PAST Param for connection instance */
718 	conn->past.mode     = mode;
719 	conn->past.skip     = skip;
720 	conn->past.timeout  = timeout;
721 	conn->past.cte_type = cte_type;
722 
723 	return 0;
724 }
725 
726 /* @brief Link Layer interface function corresponding to HCI LE Set Default Periodic
727  *        Advertising Sync Transfer Parameters command.
728  *
729  * @param[in] mode Mode specifies the action to be taken when a periodic advertising
730  *                   synchronization is received.
731  * @param[in] skip Skip specifying the number of consectutive periodic advertising
732  *                   packets that the receiver may skip after successfully reciving a
733  *                   periodic advertising packet. Range: 0x0000 to 0x01F3.
734  * @param[in] timeout Sync_timeout specifying the maximum permitted time between
735  *                    successful receives. Range: 0x000A to 0x4000.
736  * @param[in] cte_type CTE_Type specifying whether to only synchronize to periodic
737  *                   advertising with certain types of Constant Tone Extension.
738  *
739  * @return HCI error codes as documented in Bluetooth Core Specification v5.4.
740  */
ll_default_past_param(uint8_t mode,uint16_t skip,uint16_t timeout,uint8_t cte_type)741 uint8_t ll_default_past_param(uint8_t mode, uint16_t skip, uint16_t timeout, uint8_t cte_type)
742 {
743 	if (mode == BT_HCI_LE_PAST_MODE_SYNC_FILTER_DUPLICATES &&
744 	    !IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)) {
745 		return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
746 	}
747 
748 	/* Set default past param */
749 	ull_conn_default_past_param_set(mode, skip, timeout, cte_type);
750 
751 	return 0;
752 }
753 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
754 
ull_sync_init(void)755 int ull_sync_init(void)
756 {
757 	int err;
758 
759 	err = init_reset();
760 	if (err) {
761 		return err;
762 	}
763 
764 	return 0;
765 }
766 
ull_sync_reset(void)767 int ull_sync_reset(void)
768 {
769 	uint16_t handle;
770 	void *rx;
771 	int err;
772 
773 	(void)ll_sync_create_cancel(&rx);
774 
775 	for (handle = 0U; handle < CONFIG_BT_PER_ADV_SYNC_MAX; handle++) {
776 		(void)ll_sync_terminate(handle);
777 	}
778 
779 	err = init_reset();
780 	if (err) {
781 		return err;
782 	}
783 
784 	return 0;
785 }
786 
ull_sync_set_get(uint16_t handle)787 struct ll_sync_set *ull_sync_set_get(uint16_t handle)
788 {
789 	if (handle >= CONFIG_BT_PER_ADV_SYNC_MAX) {
790 		return NULL;
791 	}
792 
793 	return &ll_sync_pool[handle];
794 }
795 
ull_sync_is_enabled_get(uint16_t handle)796 struct ll_sync_set *ull_sync_is_enabled_get(uint16_t handle)
797 {
798 	struct ll_sync_set *sync;
799 
800 	sync = ull_sync_set_get(handle);
801 	if (!sync || !sync->timeout_reload) {
802 		return NULL;
803 	}
804 
805 	return sync;
806 }
807 
ull_sync_is_valid_get(struct ll_sync_set * sync)808 struct ll_sync_set *ull_sync_is_valid_get(struct ll_sync_set *sync)
809 {
810 	if (((uint8_t *)sync < (uint8_t *)ll_sync_pool) ||
811 	    ((uint8_t *)sync > ((uint8_t *)ll_sync_pool +
812 	     (sizeof(struct ll_sync_set) * (CONFIG_BT_PER_ADV_SYNC_MAX - 1))))) {
813 		return NULL;
814 	}
815 
816 	return sync;
817 }
818 
ull_sync_lll_is_valid_get(struct lll_sync * lll)819 struct lll_sync *ull_sync_lll_is_valid_get(struct lll_sync *lll)
820 {
821 	struct ll_sync_set *sync;
822 
823 	sync = HDR_LLL2ULL(lll);
824 	sync = ull_sync_is_valid_get(sync);
825 	if (sync) {
826 		return &sync->lll;
827 	}
828 
829 	return NULL;
830 }
831 
ull_sync_handle_get(struct ll_sync_set * sync)832 uint16_t ull_sync_handle_get(struct ll_sync_set *sync)
833 {
834 	return mem_index_get(sync, ll_sync_pool, sizeof(struct ll_sync_set));
835 }
836 
ull_sync_lll_handle_get(struct lll_sync * lll)837 uint16_t ull_sync_lll_handle_get(struct lll_sync *lll)
838 {
839 	return ull_sync_handle_get(HDR_LLL2ULL(lll));
840 }
841 
ull_sync_release(struct ll_sync_set * sync)842 void ull_sync_release(struct ll_sync_set *sync)
843 {
844 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
845 	struct lll_sync *lll = &sync->lll;
846 
847 	if (lll->node_cte_incomplete) {
848 		const uint8_t release_cnt = 1U;
849 		struct node_rx_pdu *node_rx;
850 		memq_link_t *link;
851 
852 		node_rx = &lll->node_cte_incomplete->rx;
853 		link = node_rx->hdr.link;
854 
855 		ll_rx_link_release(link);
856 		ull_iq_report_link_inc_quota(release_cnt);
857 		ull_df_iq_report_mem_release(node_rx);
858 		ull_df_rx_iq_report_alloc(release_cnt);
859 
860 		lll->node_cte_incomplete = NULL;
861 	}
862 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
863 
864 	/* Mark the sync context as sync create cancelled */
865 	if (IS_ENABLED(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)) {
866 		sync->timeout = 0U;
867 	}
868 
869 #if !defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
870 	/* reset accumulated data len */
871 	sync->data_len = 0U;
872 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
873 
874 	mem_release(sync, &sync_free);
875 }
876 
ull_sync_setup_addr_check(struct ll_sync_set * sync,uint8_t filter_policy,uint8_t addr_type,uint8_t * addr,uint8_t rl_idx)877 bool ull_sync_setup_addr_check(struct ll_sync_set *sync, uint8_t filter_policy,
878 			       uint8_t addr_type, uint8_t *addr, uint8_t rl_idx)
879 {
880 	/* Check if Periodic Advertiser list to be used */
881 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST) &&
882 	    filter_policy) {
883 		/* Check in Periodic Advertiser List */
884 		if (ull_filter_ull_pal_addr_match(addr_type, addr)) {
885 			/* Remember the address, to check with
886 			 * SID in Sync Info
887 			 */
888 			sync->peer_id_addr_type = addr_type;
889 			(void)memcpy(sync->peer_id_addr, addr,
890 				     BDADDR_SIZE);
891 
892 			/* Address matched */
893 			return true;
894 
895 		/* Check in Resolving List */
896 		} else if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY) &&
897 			   ull_filter_ull_pal_listed(rl_idx, &addr_type,
898 						     sync->peer_id_addr)) {
899 			/* Remember the address, to check with the
900 			 * SID in Sync Info
901 			 */
902 			sync->peer_id_addr_type = addr_type;
903 
904 			/* Mark it as identity address from RPA */
905 			sync->peer_addr_resolved = 1U;
906 
907 			/* Address matched */
908 			return true;
909 		}
910 
911 	/* Check with explicitly supplied address */
912 	} else if ((addr_type == sync->peer_id_addr_type) &&
913 		   !memcmp(addr, sync->peer_id_addr, BDADDR_SIZE)) {
914 		/* Address matched */
915 		return true;
916 
917 	/* Check identity address with explicitly supplied address */
918 	} else if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY) &&
919 		   (rl_idx < ll_rl_size_get())) {
920 		ll_rl_id_addr_get(rl_idx, &addr_type, addr);
921 		if ((addr_type == sync->peer_id_addr_type) &&
922 		    !memcmp(addr, sync->peer_id_addr, BDADDR_SIZE)) {
923 			/* Mark it as identity address from RPA */
924 			sync->peer_addr_resolved = 1U;
925 
926 			/* Identity address matched */
927 			return true;
928 		}
929 	}
930 
931 	return false;
932 }
933 
ull_sync_setup_sid_match(struct ll_sync_set * sync,struct ll_scan_set * scan,uint8_t sid)934 bool ull_sync_setup_sid_match(struct ll_sync_set *sync, struct ll_scan_set *scan, uint8_t sid)
935 {
936 	return (scan->periodic.state == LL_SYNC_STATE_ADDR_MATCH) &&
937 		((IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST) &&
938 		  scan->periodic.filter_policy &&
939 		  ull_filter_ull_pal_match(sync->peer_id_addr_type,
940 					   sync->peer_id_addr, sid)) ||
941 		 (!scan->periodic.filter_policy &&
942 		  (sid == sync->sid)));
943 }
944 
ull_sync_setup(struct ll_scan_set * scan,uint8_t phy,struct node_rx_pdu * node_rx,struct pdu_adv_sync_info * si)945 void ull_sync_setup(struct ll_scan_set *scan, uint8_t phy,
946 		    struct node_rx_pdu *node_rx, struct pdu_adv_sync_info *si)
947 {
948 	uint32_t ticks_slot_overhead;
949 	uint32_t ticks_slot_offset;
950 	struct ll_sync_set *sync;
951 	struct node_rx_sync *se;
952 	struct node_rx_ftr *ftr;
953 	uint32_t sync_offset_us;
954 	uint32_t ready_delay_us;
955 	struct node_rx_pdu *rx;
956 	uint8_t *data_chan_map;
957 	struct lll_sync *lll;
958 	uint16_t sync_handle;
959 	uint32_t interval_us;
960 	uint32_t overhead_us;
961 	struct pdu_adv *pdu;
962 	uint16_t interval;
963 	uint32_t slot_us;
964 	uint8_t chm_last;
965 	uint32_t ret;
966 	uint8_t sca;
967 
968 	/* Populate the LLL context */
969 	sync = scan->periodic.sync;
970 	lll = &sync->lll;
971 
972 	/* Copy channel map from sca_chm field in sync_info structure, and
973 	 * clear the SCA bits.
974 	 */
975 	chm_last = lll->chm_first;
976 	lll->chm_last = chm_last;
977 	data_chan_map = lll->chm[chm_last].data_chan_map;
978 	(void)memcpy(data_chan_map, si->sca_chm,
979 		     sizeof(lll->chm[chm_last].data_chan_map));
980 	data_chan_map[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &=
981 		~PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK;
982 	lll->chm[chm_last].data_chan_count =
983 		util_ones_count_get(data_chan_map,
984 				    sizeof(lll->chm[chm_last].data_chan_map));
985 	if (lll->chm[chm_last].data_chan_count < CHM_USED_COUNT_MIN) {
986 		/* Ignore sync setup, invalid available channel count */
987 		return;
988 	}
989 
990 	memcpy(lll->access_addr, si->aa, sizeof(lll->access_addr));
991 	lll->data_chan_id = lll_chan_id(lll->access_addr);
992 	memcpy(lll->crc_init, si->crc_init, sizeof(lll->crc_init));
993 	lll->event_counter = sys_le16_to_cpu(si->evt_cntr);
994 	lll->phy = phy;
995 	lll->forced = 0U;
996 
997 	interval = sys_le16_to_cpu(si->interval);
998 	interval_us = interval * PERIODIC_INT_UNIT_US;
999 
1000 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
1001 	/* Save Periodic Advertisement Interval */
1002 	sync->interval = interval;
1003 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
1004 
1005 	/* Convert fromm 10ms units to interval units */
1006 	sync->timeout_reload = RADIO_SYNC_EVENTS((sync->timeout * 10U *
1007 						  USEC_PER_MSEC), interval_us);
1008 
1009 	/* Adjust Skip value so that there is minimum of 6 events that can be
1010 	 * listened to before Sync_Timeout occurs.
1011 	 * The adjustment of the skip value is controller implementation
1012 	 * specific and not specified by the Bluetooth Core Specification v5.3.
1013 	 * The Controller `may` use the Skip value, and the implementation here
1014 	 * covers a case where Skip value could lead to less events being
1015 	 * listened to until Sync_Timeout. Listening to more consecutive events
1016 	 * before Sync_Timeout increases probability of retaining the Periodic
1017 	 * Synchronization.
1018 	 */
1019 	if (sync->timeout_reload > CONN_ESTAB_COUNTDOWN) {
1020 		uint16_t skip_max = sync->timeout_reload - CONN_ESTAB_COUNTDOWN;
1021 
1022 		if (sync->skip > skip_max) {
1023 			sync->skip = skip_max;
1024 		}
1025 	} else {
1026 		sync->skip = 0U;
1027 	}
1028 
1029 	sync->sync_expire = CONN_ESTAB_COUNTDOWN;
1030 
1031 	/* Extract the SCA value from the sca_chm field of the sync_info
1032 	 * structure.
1033 	 */
1034 	sca = (si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &
1035 	       PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK) >>
1036 	      PDU_SYNC_INFO_SCA_CHM_SCA_BIT_POS;
1037 
1038 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1039 	lll->sca = sca;
1040 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1041 
1042 	lll->window_widening_periodic_us =
1043 		DIV_ROUND_UP(((lll_clock_ppm_local_get() +
1044 				   lll_clock_ppm_get(sca)) *
1045 				  interval_us), USEC_PER_SEC);
1046 	lll->window_widening_max_us = (interval_us >> 1) - EVENT_IFS_US;
1047 	if (PDU_ADV_SYNC_INFO_OFFS_UNITS_GET(si)) {
1048 		lll->window_size_event_us = OFFS_UNIT_300_US;
1049 	} else {
1050 		lll->window_size_event_us = OFFS_UNIT_30_US;
1051 	}
1052 
1053 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1054 	lll->node_cte_incomplete = NULL;
1055 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1056 
1057 	/* Set the state to sync create */
1058 	scan->periodic.state = LL_SYNC_STATE_CREATED;
1059 	scan->periodic.param = NULL;
1060 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
1061 		struct ll_scan_set *scan_1m;
1062 
1063 		scan_1m = ull_scan_set_get(SCAN_HANDLE_1M);
1064 		if (scan == scan_1m) {
1065 			struct ll_scan_set *scan_coded;
1066 
1067 			scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
1068 			scan_coded->periodic.state = LL_SYNC_STATE_CREATED;
1069 			scan_coded->periodic.param = NULL;
1070 		} else {
1071 			scan_1m->periodic.state = LL_SYNC_STATE_CREATED;
1072 			scan_1m->periodic.param = NULL;
1073 		}
1074 	}
1075 
1076 	sync_handle = ull_sync_handle_get(sync);
1077 
1078 	/* Prepare sync notification, dispatch only on successful AUX_SYNC_IND
1079 	 * reception.
1080 	 */
1081 	rx = (void *)sync->node_rx_sync_estab;
1082 	rx->hdr.type = NODE_RX_TYPE_SYNC;
1083 	rx->hdr.handle = sync_handle;
1084 	rx->rx_ftr.param = sync;
1085 	se = (void *)rx->pdu;
1086 	se->interval = interval;
1087 	se->phy = lll->phy;
1088 	se->sca = sca;
1089 
1090 	/* Calculate offset and schedule sync radio events */
1091 	ftr = &node_rx->rx_ftr;
1092 	pdu = (void *)((struct node_rx_pdu *)node_rx)->pdu;
1093 
1094 	ready_delay_us = lll_radio_rx_ready_delay_get(lll->phy, PHY_FLAGS_S8);
1095 
1096 	sync_offset_us = ftr->radio_end_us;
1097 	sync_offset_us += PDU_ADV_SYNC_INFO_OFFSET_GET(si) *
1098 			  lll->window_size_event_us;
1099 	/* offs_adjust may be 1 only if sync setup by LL_PERIODIC_SYNC_IND */
1100 	sync_offset_us += (PDU_ADV_SYNC_INFO_OFFS_ADJUST_GET(si) ? OFFS_ADJUST_US : 0U);
1101 	sync_offset_us -= PDU_AC_US(pdu->len, lll->phy, ftr->phy_flags);
1102 	sync_offset_us -= EVENT_TICKER_RES_MARGIN_US;
1103 	sync_offset_us -= EVENT_JITTER_US;
1104 	sync_offset_us -= ready_delay_us;
1105 
1106 	/* Minimum prepare tick offset + minimum preempt tick offset are the
1107 	 * overheads before ULL scheduling can setup radio for reception
1108 	 */
1109 	overhead_us = HAL_TICKER_TICKS_TO_US(HAL_TICKER_CNTR_CMP_OFFSET_MIN << 1);
1110 
1111 	/* CPU execution overhead to setup the radio for reception */
1112 	overhead_us += EVENT_OVERHEAD_END_US + EVENT_OVERHEAD_START_US;
1113 
1114 	/* If not sufficient CPU processing time, skip to receiving next
1115 	 * event.
1116 	 */
1117 	if ((sync_offset_us - ftr->radio_end_us) < overhead_us) {
1118 		sync_offset_us += interval_us;
1119 		lll->event_counter++;
1120 	}
1121 
1122 	interval_us -= lll->window_widening_periodic_us;
1123 
1124 	/* Calculate event time reservation */
1125 	slot_us = PDU_AC_MAX_US(PDU_AC_EXT_PAYLOAD_RX_SIZE, lll->phy);
1126 	slot_us += ready_delay_us;
1127 
1128 	/* Add implementation defined radio event overheads */
1129 	if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX)) {
1130 		slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
1131 	}
1132 
1133 	sync->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
1134 
1135 	ticks_slot_offset = HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
1136 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
1137 		ticks_slot_overhead = ticks_slot_offset;
1138 	} else {
1139 		ticks_slot_overhead = 0U;
1140 	}
1141 	ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
1142 
1143 	sync->lll_sync_prepare = lll_sync_create_prepare;
1144 
1145 	ret = ticker_start(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
1146 			   (TICKER_ID_SCAN_SYNC_BASE + sync_handle),
1147 			   ftr->ticks_anchor - ticks_slot_offset,
1148 			   HAL_TICKER_US_TO_TICKS(sync_offset_us),
1149 			   HAL_TICKER_US_TO_TICKS(interval_us),
1150 			   HAL_TICKER_REMAINDER(interval_us),
1151 			   TICKER_NULL_LAZY,
1152 			   (sync->ull.ticks_slot + ticks_slot_overhead),
1153 			   ticker_cb, sync,
1154 			   ticker_start_op_cb, (void *)__LINE__);
1155 	LL_ASSERT_ERR((ret == TICKER_STATUS_SUCCESS) ||
1156 		      (ret == TICKER_STATUS_BUSY));
1157 }
1158 
ull_sync_setup_reset(struct ll_sync_set * sync)1159 void ull_sync_setup_reset(struct ll_sync_set *sync)
1160 {
1161 	struct ll_scan_set *scan;
1162 
1163 	/* Remove the sync context from being associated with scan contexts */
1164 	scan = ull_scan_set_get(SCAN_HANDLE_1M);
1165 
1166 	scan->periodic.sync = NULL;
1167 
1168 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
1169 	scan->lll.is_sync = 0U;
1170 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
1171 
1172 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
1173 		scan = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
1174 
1175 		scan->periodic.sync = NULL;
1176 
1177 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
1178 		scan->lll.is_sync = 0U;
1179 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
1180 	}
1181 }
1182 
ull_sync_established_report(memq_link_t * link,struct node_rx_pdu * rx)1183 void ull_sync_established_report(memq_link_t *link, struct node_rx_pdu *rx)
1184 {
1185 	struct node_rx_pdu *rx_establ;
1186 	struct ll_sync_set *sync;
1187 	struct node_rx_ftr *ftr;
1188 	struct node_rx_sync *se;
1189 	struct lll_sync *lll;
1190 
1191 	ftr = &rx->rx_ftr;
1192 	lll = ftr->param;
1193 	sync = HDR_LLL2ULL(lll);
1194 
1195 	/* Do nothing if sync is cancelled or lost. */
1196 	if (unlikely(sync->is_stop || !sync->timeout_reload)) {
1197 		return;
1198 	}
1199 
1200 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1201 	enum sync_status sync_status;
1202 
1203 #if defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1204 	sync_status = ftr->sync_status;
1205 #else
1206 	struct pdu_cte_info *rx_cte_info;
1207 
1208 	rx_cte_info = pdu_cte_info_get((struct pdu_adv *)rx->pdu);
1209 	if (rx_cte_info != NULL) {
1210 		sync_status = lll_sync_cte_is_allowed(lll->cte_type, lll->filter_policy,
1211 						      rx_cte_info->time, rx_cte_info->type);
1212 	} else {
1213 		sync_status = lll_sync_cte_is_allowed(lll->cte_type, lll->filter_policy, 0,
1214 						      BT_HCI_LE_NO_CTE);
1215 	}
1216 
1217 	/* If there is no CTEInline support, notify done event handler to terminate periodic
1218 	 * advertising sync in case the CTE is not allowed.
1219 	 * If the periodic filtering list is not used then terminate synchronization and notify
1220 	 * host. If the periodic filtering list is used then stop synchronization with this
1221 	 * particular periodic advertised but continue to search for other one.
1222 	 */
1223 	sync->is_term = ((sync_status == SYNC_STAT_TERM) || (sync_status == SYNC_STAT_CONT_SCAN));
1224 #endif /* CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1225 
1226 	/* Send periodic advertisement sync established report when sync has correct CTE type
1227 	 * or the CTE type is incorrect and filter policy doesn't allow to continue scanning.
1228 	 */
1229 	if (sync_status == SYNC_STAT_ALLOWED || sync_status == SYNC_STAT_TERM) {
1230 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1231 
1232 	if (1) {
1233 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1234 
1235 		/* Prepare and dispatch sync notification */
1236 		rx_establ = (void *)sync->node_rx_sync_estab;
1237 		rx_establ->hdr.handle = ull_sync_handle_get(sync);
1238 		se = (void *)rx_establ->pdu;
1239 		/* Clear the node to mark the sync establish as being completed.
1240 		 * In this case the completion reason is sync being established.
1241 		 */
1242 		sync->node_rx_sync_estab = NULL;
1243 
1244 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1245 		se->status = (ftr->sync_status == SYNC_STAT_TERM) ?
1246 					   BT_HCI_ERR_UNSUPP_REMOTE_FEATURE :
1247 					   BT_HCI_ERR_SUCCESS;
1248 #else
1249 		se->status = BT_HCI_ERR_SUCCESS;
1250 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1251 
1252 		/* NOTE: footer param has already been populated during sync
1253 		 * setup.
1254 		 */
1255 
1256 		ll_rx_put_sched(rx_establ->hdr.link, rx_establ);
1257 	}
1258 
1259 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1260 	/* Handle periodic advertising PDU and send periodic advertising scan report when
1261 	 * the sync was found or was established in the past. The report is not send if
1262 	 * scanning is terminated due to wrong CTE type.
1263 	 */
1264 	if (sync_status == SYNC_STAT_ALLOWED || sync_status == SYNC_STAT_READY) {
1265 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1266 
1267 	if (1) {
1268 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1269 
1270 		/* Switch sync event prepare function to one responsible for regular PDUs receive */
1271 		sync->lll_sync_prepare = lll_sync_prepare;
1272 
1273 		/* Change node type to appropriately handle periodic
1274 		 * advertising PDU report.
1275 		 */
1276 		rx->hdr.type = NODE_RX_TYPE_SYNC_REPORT;
1277 		ull_scan_aux_setup(link, rx);
1278 	} else {
1279 		rx->hdr.type = NODE_RX_TYPE_RELEASE;
1280 		ll_rx_put_sched(link, rx);
1281 	}
1282 }
1283 
1284 void ull_sync_done(struct node_rx_event_done *done)
1285 {
1286 	struct ll_sync_set *sync;
1287 
1288 	/* Get reference to ULL context */
1289 	sync = CONTAINER_OF(done->param, struct ll_sync_set, ull);
1290 
1291 	/* Do nothing if local terminate requested or sync lost */
1292 	if (unlikely(sync->is_stop || !sync->timeout_reload)) {
1293 		return;
1294 	}
1295 
1296 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1297 #if defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1298 	if (done->extra.sync_term) {
1299 #else
1300 	if (sync->is_term) {
1301 #endif /* CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1302 		/* In case the periodic advertising list filtering is not used the synchronization
1303 		 * must be terminated and host notification must be send.
1304 		 * In case the periodic advertising list filtering is used the synchronization with
1305 		 * this particular periodic advertiser but search for other one from the list.
1306 		 *
1307 		 * Stop periodic advertising sync ticker and clear variables informing the
1308 		 * sync is pending. That is a step to completely terminate the synchronization.
1309 		 * In case search for another periodic advertiser it allows to setup new ticker for
1310 		 * that.
1311 		 */
1312 		sync_ticker_cleanup(sync, NULL);
1313 	} else
1314 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1315 	{
1316 		uint32_t ticks_drift_minus;
1317 		uint32_t ticks_drift_plus;
1318 		uint16_t elapsed_event;
1319 		struct lll_sync *lll;
1320 		uint16_t skip_event;
1321 		uint8_t force_lll;
1322 		uint16_t lazy;
1323 		uint8_t force;
1324 
1325 		lll = &sync->lll;
1326 
1327 		/* Events elapsed used in timeout checks below */
1328 		skip_event = lll->skip_event;
1329 
1330 		/* Sync drift compensation and new skip calculation */
1331 		ticks_drift_plus = 0U;
1332 		ticks_drift_minus = 0U;
1333 		if (done->extra.trx_cnt) {
1334 			/* Calculate drift in ticks unit */
1335 			ull_drift_ticks_get(done, &ticks_drift_plus, &ticks_drift_minus);
1336 
1337 			/* Enforce skip */
1338 			lll->skip_event = sync->skip;
1339 
1340 			/* Reset failed to establish sync countdown */
1341 			sync->sync_expire = 0U;
1342 		}
1343 
1344 		elapsed_event = lll->lazy_prepare + 1U;
1345 
1346 		/* Reset supervision countdown */
1347 		if (done->extra.crc_valid) {
1348 			sync->timeout_expire = 0U;
1349 		}
1350 
1351 		/* check sync failed to establish */
1352 		else if (sync->sync_expire) {
1353 			if (sync->sync_expire > elapsed_event) {
1354 				sync->sync_expire -= elapsed_event;
1355 			} else {
1356 				sync_ticker_cleanup(sync, ticker_stop_sync_expire_op_cb);
1357 
1358 				return;
1359 			}
1360 		}
1361 
1362 		/* If anchor point not sync-ed, start timeout countdown, and break skip if any */
1363 		else if (!sync->timeout_expire) {
1364 			sync->timeout_expire = sync->timeout_reload;
1365 		}
1366 
1367 		/* check timeout */
1368 		force = 0U;
1369 		force_lll = 0U;
1370 		if (sync->timeout_expire) {
1371 			if (sync->timeout_expire > elapsed_event) {
1372 				sync->timeout_expire -= elapsed_event;
1373 
1374 				/* break skip */
1375 				lll->skip_event = 0U;
1376 
1377 				if (sync->timeout_expire <= 6U) {
1378 					force_lll = 1U;
1379 
1380 					force = 1U;
1381 				} else if (skip_event) {
1382 					force = 1U;
1383 				}
1384 			} else {
1385 				sync_ticker_cleanup(sync, ticker_stop_sync_lost_op_cb);
1386 
1387 				return;
1388 			}
1389 		}
1390 
1391 		lll->forced = force_lll;
1392 
1393 		/* Check if skip needs update */
1394 		lazy = 0U;
1395 		if ((force) || (skip_event != lll->skip_event)) {
1396 			lazy = lll->skip_event + 1U;
1397 		}
1398 
1399 		/* Update Sync ticker instance */
1400 		if (ticks_drift_plus || ticks_drift_minus || lazy || force) {
1401 			uint16_t sync_handle = ull_sync_handle_get(sync);
1402 			uint32_t ticker_status;
1403 
1404 			/* Call to ticker_update can fail under the race
1405 			 * condition where in the periodic sync role is being
1406 			 * stopped but at the same time it is preempted by
1407 			 * periodic sync event that gets into close state.
1408 			 * Accept failure when periodic sync role is being
1409 			 * stopped.
1410 			 */
1411 			ticker_status =
1412 				ticker_update(TICKER_INSTANCE_ID_CTLR,
1413 					      TICKER_USER_ID_ULL_HIGH,
1414 					      (TICKER_ID_SCAN_SYNC_BASE +
1415 					       sync_handle),
1416 					      ticks_drift_plus,
1417 					      ticks_drift_minus, 0, 0,
1418 					      lazy, force,
1419 					      ticker_update_op_cb, sync);
1420 			LL_ASSERT_ERR((ticker_status == TICKER_STATUS_SUCCESS) ||
1421 				      (ticker_status == TICKER_STATUS_BUSY) ||
1422 				      ((void *)sync == ull_disable_mark_get()));
1423 		}
1424 	}
1425 }
1426 
1427 void ull_sync_chm_update(uint8_t sync_handle, uint8_t *acad, uint8_t acad_len)
1428 {
1429 	struct pdu_adv_sync_chm_upd_ind *chm_upd_ind;
1430 	struct ll_sync_set *sync;
1431 	struct lll_sync *lll;
1432 	uint8_t chm_last;
1433 	uint16_t ad_len;
1434 
1435 	/* Get reference to LLL context */
1436 	sync = ull_sync_set_get(sync_handle);
1437 	LL_ASSERT_DBG(sync);
1438 	lll = &sync->lll;
1439 
1440 	/* Ignore if already in progress */
1441 	if (lll->chm_last != lll->chm_first) {
1442 		return;
1443 	}
1444 
1445 	/* Find the Channel Map Update Indication */
1446 	do {
1447 		/* Pick the length and find the Channel Map Update Indication */
1448 		ad_len = acad[PDU_ADV_DATA_HEADER_LEN_OFFSET];
1449 		if (ad_len &&
1450 		    (acad[PDU_ADV_DATA_HEADER_TYPE_OFFSET] ==
1451 		     PDU_ADV_DATA_TYPE_CHANNEL_MAP_UPDATE_IND)) {
1452 			break;
1453 		}
1454 
1455 		/* Add length field size */
1456 		ad_len += 1U;
1457 		if (ad_len < acad_len) {
1458 			acad_len -= ad_len;
1459 		} else {
1460 			return;
1461 		}
1462 
1463 		/* Move to next AD data */
1464 		acad += ad_len;
1465 	} while (acad_len);
1466 
1467 	/* Validate the size of the Channel Map Update Indication */
1468 	if (ad_len != (sizeof(*chm_upd_ind) + 1U)) {
1469 		return;
1470 	}
1471 
1472 	/* Pick the parameters into the procedure context */
1473 	chm_last = lll->chm_last + 1U;
1474 	if (chm_last == DOUBLE_BUFFER_SIZE) {
1475 		chm_last = 0U;
1476 	}
1477 
1478 	chm_upd_ind = (void *)&acad[PDU_ADV_DATA_HEADER_DATA_OFFSET];
1479 	(void)memcpy(lll->chm[chm_last].data_chan_map, chm_upd_ind->chm,
1480 		     sizeof(lll->chm[chm_last].data_chan_map));
1481 	lll->chm[chm_last].data_chan_count =
1482 		util_ones_count_get(lll->chm[chm_last].data_chan_map,
1483 				    sizeof(lll->chm[chm_last].data_chan_map));
1484 	if (lll->chm[chm_last].data_chan_count < CHM_USED_COUNT_MIN) {
1485 		/* Ignore channel map, invalid available channel count */
1486 		return;
1487 	}
1488 
1489 	lll->chm_instant = sys_le16_to_cpu(chm_upd_ind->instant);
1490 
1491 	/* Set Channel Map Update Procedure in progress */
1492 	lll->chm_last = chm_last;
1493 }
1494 
1495 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1496 /* @brief Function updates periodic sync slot duration.
1497  *
1498  * @param[in] sync              Pointer to sync instance
1499  * @param[in] slot_plus_us      Number of microsecond to add to ticker slot
1500  * @param[in] slot_minus_us     Number of microsecond to subtracks from ticker slot
1501  *
1502  * @retval 0            Successful ticker slot update.
1503  * @retval -ENOENT      Ticker node related with provided sync is already stopped.
1504  * @retval -ENOMEM      Couldn't enqueue update ticker job.
1505  * @retval -EFAULT      Somethin else went wrong.
1506  */
1507 int ull_sync_slot_update(struct ll_sync_set *sync, uint32_t slot_plus_us,
1508 			 uint32_t slot_minus_us)
1509 {
1510 	uint32_t volatile ret_cb;
1511 	uint32_t ret;
1512 
1513 	ret_cb = TICKER_STATUS_BUSY;
1514 	ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
1515 			    TICKER_USER_ID_THREAD,
1516 			    (TICKER_ID_SCAN_SYNC_BASE +
1517 			    ull_sync_handle_get(sync)),
1518 			    0, 0,
1519 			    HAL_TICKER_US_TO_TICKS(slot_plus_us),
1520 			    HAL_TICKER_US_TO_TICKS(slot_minus_us),
1521 			    0, 0,
1522 			    ticker_update_op_status_give,
1523 			    (void *)&ret_cb);
1524 	if (ret == TICKER_STATUS_BUSY || ret == TICKER_STATUS_SUCCESS) {
1525 		/* Wait for callback or clear semaphore is callback was already
1526 		 * executed.
1527 		 */
1528 		k_sem_take(&sem_ticker_cb, K_FOREVER);
1529 
1530 		if (ret_cb == TICKER_STATUS_FAILURE) {
1531 			return -EFAULT; /* Something went wrong */
1532 		} else {
1533 			return 0;
1534 		}
1535 	} else {
1536 		if (ret_cb != TICKER_STATUS_BUSY) {
1537 			/* Ticker callback was executed and job enqueue was successful.
1538 			 * Call k_sem_take to clear ticker callback semaphore.
1539 			 */
1540 			k_sem_take(&sem_ticker_cb, K_FOREVER);
1541 		}
1542 		/* Ticker was already stopped or job was not enqueued. */
1543 		return (ret_cb == TICKER_STATUS_FAILURE) ? -ENOENT : -ENOMEM;
1544 	}
1545 }
1546 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1547 
1548 static int init_reset(void)
1549 {
1550 	/* Initialize sync pool. */
1551 	mem_init(ll_sync_pool, sizeof(struct ll_sync_set),
1552 		 sizeof(ll_sync_pool) / sizeof(struct ll_sync_set),
1553 		 &sync_free);
1554 
1555 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1556 	k_sem_init(&sem_ticker_cb, 0, 1);
1557 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1558 
1559 	return 0;
1560 }
1561 
1562 static inline struct ll_sync_set *sync_acquire(void)
1563 {
1564 	return mem_acquire(&sync_free);
1565 }
1566 
1567 static struct ll_sync_set *ull_sync_create(uint8_t sid, uint16_t timeout, uint16_t skip,
1568 					   uint8_t cte_type, uint8_t rx_enable, uint8_t nodups)
1569 {
1570 	memq_link_t *link_sync_estab;
1571 	memq_link_t *link_sync_lost;
1572 	struct node_rx_pdu *node_rx;
1573 	struct lll_sync *lll;
1574 	struct ll_sync_set *sync;
1575 
1576 	link_sync_estab = ll_rx_link_alloc();
1577 	if (!link_sync_estab) {
1578 		return NULL;
1579 	}
1580 
1581 	link_sync_lost = ll_rx_link_alloc();
1582 	if (!link_sync_lost) {
1583 		ll_rx_link_release(link_sync_estab);
1584 
1585 		return NULL;
1586 	}
1587 
1588 	node_rx = ll_rx_alloc();
1589 	if (!node_rx) {
1590 		ll_rx_link_release(link_sync_lost);
1591 		ll_rx_link_release(link_sync_estab);
1592 
1593 		return NULL;
1594 	}
1595 
1596 	sync = sync_acquire();
1597 	if (!sync) {
1598 		ll_rx_release(node_rx);
1599 		ll_rx_link_release(link_sync_lost);
1600 		ll_rx_link_release(link_sync_estab);
1601 
1602 		return NULL;
1603 	}
1604 
1605 	sync->peer_addr_resolved = 0U;
1606 
1607 	/* Initialize sync context */
1608 	node_rx->hdr.link = link_sync_estab;
1609 	sync->node_rx_lost.rx.hdr.link = link_sync_lost;
1610 
1611 	/* Make sure that the node_rx_sync_establ hasn't got anything assigned. It is used to
1612 	 * mark when sync establishment is in progress.
1613 	 */
1614 	LL_ASSERT_DBG(!sync->node_rx_sync_estab);
1615 	sync->node_rx_sync_estab = node_rx;
1616 
1617 	/* Reporting initially enabled/disabled */
1618 	sync->rx_enable = rx_enable;
1619 
1620 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
1621 	sync->nodups = nodups;
1622 #endif
1623 	sync->skip = skip;
1624 	sync->is_stop = 0U;
1625 
1626 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1627 	sync->enc = 0U;
1628 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1629 
1630 	/* NOTE: Use timeout not zero to represent sync context used for sync
1631 	 * create.
1632 	 */
1633 	sync->timeout = timeout;
1634 
1635 	/* NOTE: Use timeout_reload not zero to represent sync established. */
1636 	sync->timeout_reload = 0U;
1637 	sync->timeout_expire = 0U;
1638 
1639 	/* Remember the SID */
1640 	sync->sid = sid;
1641 
1642 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1643 	/* Reset Broadcast Isochronous Group Sync Establishment */
1644 	sync->iso.sync_iso = NULL;
1645 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1646 
1647 	/* Initialize sync LLL context */
1648 	lll = &sync->lll;
1649 	lll->lll_aux = NULL;
1650 	lll->is_rx_enabled = sync->rx_enable;
1651 	lll->skip_prepare = 0U;
1652 	lll->skip_event = 0U;
1653 	lll->window_widening_prepare_us = 0U;
1654 	lll->window_widening_event_us = 0U;
1655 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1656 	lll->cte_type = cte_type;
1657 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1658 
1659 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1660 	ull_df_sync_cfg_init(&lll->df_cfg);
1661 	LL_ASSERT_DBG(!lll->node_cte_incomplete);
1662 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1663 
1664 	/* Initialise ULL and LLL headers */
1665 	ull_hdr_init(&sync->ull);
1666 	lll_hdr_init(lll, sync);
1667 
1668 	return sync;
1669 }
1670 
1671 static void sync_ticker_cleanup(struct ll_sync_set *sync, ticker_op_func stop_op_cb)
1672 {
1673 	uint16_t sync_handle = ull_sync_handle_get(sync);
1674 	uint32_t ret;
1675 
1676 	/* Stop Periodic Sync Ticker */
1677 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
1678 			  TICKER_ID_SCAN_SYNC_BASE + sync_handle, stop_op_cb, (void *)sync);
1679 	LL_ASSERT_ERR((ret == TICKER_STATUS_SUCCESS) ||
1680 		      (ret == TICKER_STATUS_BUSY));
1681 
1682 	/* Mark sync context not sync established */
1683 	sync->timeout_reload = 0U;
1684 }
1685 
1686 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
1687 		      uint32_t remainder, uint16_t lazy, uint8_t force,
1688 		      void *param)
1689 {
1690 	static memq_link_t link_lll_prepare;
1691 	static struct mayfly mfy_lll_prepare = {
1692 		0, 0, &link_lll_prepare, NULL, NULL};
1693 	static struct lll_prepare_param p;
1694 	struct ll_sync_set *sync = param;
1695 	struct lll_sync *lll;
1696 	uint32_t ret;
1697 	uint8_t ref;
1698 
1699 	DEBUG_RADIO_PREPARE_O(1);
1700 
1701 	lll = &sync->lll;
1702 
1703 	/* Commit receive enable changed value */
1704 	lll->is_rx_enabled = sync->rx_enable;
1705 
1706 	/* Increment prepare reference count */
1707 	ref = ull_ref_inc(&sync->ull);
1708 	LL_ASSERT_DBG(ref);
1709 
1710 	/* Append timing parameters */
1711 	p.ticks_at_expire = ticks_at_expire;
1712 	p.remainder = remainder;
1713 	p.lazy = lazy;
1714 	p.force = force;
1715 	p.param = lll;
1716 	mfy_lll_prepare.param = &p;
1717 	mfy_lll_prepare.fp = sync->lll_sync_prepare;
1718 
1719 	/* Kick LLL prepare */
1720 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0,
1721 			     &mfy_lll_prepare);
1722 	LL_ASSERT_ERR(!ret);
1723 
1724 	DEBUG_RADIO_PREPARE_O(1);
1725 }
1726 
1727 static void ticker_start_op_cb(uint32_t status, void *param)
1728 {
1729 	ARG_UNUSED(param);
1730 	LL_ASSERT_ERR(status == TICKER_STATUS_SUCCESS);
1731 }
1732 
1733 static void ticker_update_op_cb(uint32_t status, void *param)
1734 {
1735 	LL_ASSERT_ERR((status == TICKER_STATUS_SUCCESS) ||
1736 		      (param == ull_disable_mark_get()));
1737 }
1738 
1739 static void ticker_stop_sync_expire_op_cb(uint32_t status, void *param)
1740 {
1741 	uint32_t retval;
1742 	static memq_link_t link;
1743 	static struct mayfly mfy = {0, 0, &link, NULL, sync_expire};
1744 
1745 	LL_ASSERT_ERR(status == TICKER_STATUS_SUCCESS);
1746 
1747 	mfy.param = param;
1748 
1749 	retval = mayfly_enqueue(TICKER_USER_ID_ULL_LOW, TICKER_USER_ID_ULL_HIGH,
1750 				0, &mfy);
1751 	LL_ASSERT_ERR(!retval);
1752 }
1753 
1754 static void sync_expire(void *param)
1755 {
1756 	struct ll_sync_set *sync = param;
1757 	struct node_rx_sync *se;
1758 	struct node_rx_pdu *rx;
1759 
1760 	/* Generate Periodic advertising sync failed to establish */
1761 	rx = (void *)sync->node_rx_sync_estab;
1762 	rx->hdr.handle = LLL_HANDLE_INVALID;
1763 
1764 	/* Clear the node to mark the sync establish as being completed.
1765 	 * In this case the completion reason is sync expire.
1766 	 */
1767 	sync->node_rx_sync_estab = NULL;
1768 
1769 	/* NOTE: struct node_rx_sync_estab has uint8_t member following the
1770 	 *       struct node_rx_hdr to store the reason.
1771 	 */
1772 	se = (void *)rx->pdu;
1773 	se->status = BT_HCI_ERR_CONN_FAIL_TO_ESTAB;
1774 
1775 	/* NOTE: footer param has already been populated during sync setup */
1776 
1777 	/* Enqueue the sync failed to established towards ULL context */
1778 	ll_rx_put_sched(rx->hdr.link, rx);
1779 }
1780 
1781 static void ticker_stop_sync_lost_op_cb(uint32_t status, void *param)
1782 {
1783 	uint32_t retval;
1784 	static memq_link_t link;
1785 	static struct mayfly mfy = {0, 0, &link, NULL, sync_lost};
1786 
1787 	/* When in race between terminate requested in thread context and
1788 	 * sync lost scenario, do not generate the sync lost node rx from here
1789 	 */
1790 	if (status != TICKER_STATUS_SUCCESS) {
1791 		LL_ASSERT_DBG(param == ull_disable_mark_get());
1792 
1793 		return;
1794 	}
1795 
1796 	mfy.param = param;
1797 
1798 	retval = mayfly_enqueue(TICKER_USER_ID_ULL_LOW, TICKER_USER_ID_ULL_HIGH,
1799 				0, &mfy);
1800 	LL_ASSERT_ERR(!retval);
1801 }
1802 
1803 static void sync_lost(void *param)
1804 {
1805 	struct ll_sync_set *sync;
1806 	struct node_rx_pdu *rx;
1807 
1808 	/* sync established was not generated yet, no free node rx */
1809 	sync = param;
1810 	if (sync->lll_sync_prepare != lll_sync_prepare) {
1811 		sync_expire(param);
1812 
1813 		return;
1814 	}
1815 
1816 	/* Generate Periodic advertising sync lost */
1817 	rx = (void *)&sync->node_rx_lost;
1818 	rx->hdr.handle = ull_sync_handle_get(sync);
1819 	rx->hdr.type = NODE_RX_TYPE_SYNC_LOST;
1820 	rx->rx_ftr.param = sync;
1821 
1822 	/* Enqueue the sync lost towards ULL context */
1823 	ll_rx_put_sched(rx->hdr.link, rx);
1824 
1825 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1826 	if (sync->iso.sync_iso) {
1827 		/* ISO create BIG flag in the periodic advertising context is still set */
1828 		struct ll_sync_iso_set *sync_iso;
1829 
1830 		sync_iso = sync->iso.sync_iso;
1831 
1832 		rx = (void *)&sync_iso->node_rx_lost;
1833 		rx->hdr.handle = sync_iso->big_handle;
1834 		rx->hdr.type = NODE_RX_TYPE_SYNC_ISO;
1835 		rx->rx_ftr.param = sync_iso;
1836 		*((uint8_t *)rx->pdu) = BT_HCI_ERR_CONN_FAIL_TO_ESTAB;
1837 
1838 		/* Enqueue the sync iso lost towards ULL context */
1839 		ll_rx_put_sched(rx->hdr.link, rx);
1840 	}
1841 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1842 }
1843 
1844 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)
1845 static struct ll_sync_set *sync_is_create_get(uint16_t handle)
1846 {
1847 	struct ll_sync_set *sync;
1848 
1849 	sync = ull_sync_set_get(handle);
1850 	if (!sync || !sync->timeout) {
1851 		return NULL;
1852 	}
1853 
1854 	return sync;
1855 }
1856 
1857 static bool peer_sid_sync_exists(uint8_t const peer_id_addr_type,
1858 				 uint8_t const *const peer_id_addr,
1859 				 uint8_t sid)
1860 {
1861 	uint16_t handle;
1862 
1863 	for (handle = 0U; handle < CONFIG_BT_PER_ADV_SYNC_MAX; handle++) {
1864 		struct ll_sync_set *sync = sync_is_create_get(handle);
1865 
1866 		if (sync &&
1867 		    (sync->peer_id_addr_type == peer_id_addr_type) &&
1868 		    !memcmp(sync->peer_id_addr, peer_id_addr, BDADDR_SIZE) &&
1869 		    (sync->sid == sid)) {
1870 			return true;
1871 		}
1872 	}
1873 
1874 	return false;
1875 }
1876 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC */
1877 
1878 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1879 static void ticker_update_op_status_give(uint32_t status, void *param)
1880 {
1881 	*((uint32_t volatile *)param) = status;
1882 
1883 	k_sem_give(&sem_ticker_cb);
1884 }
1885 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1886 
1887 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
1888 	!defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1889 static struct pdu_cte_info *pdu_cte_info_get(struct pdu_adv *pdu)
1890 {
1891 	struct pdu_adv_com_ext_adv *com_hdr;
1892 	struct pdu_adv_ext_hdr *hdr;
1893 
1894 	com_hdr = &pdu->adv_ext_ind;
1895 	hdr = &com_hdr->ext_hdr;
1896 
1897 	if (!com_hdr->ext_hdr_len || (com_hdr->ext_hdr_len != 0 && !hdr->cte_info)) {
1898 		return NULL;
1899 	}
1900 
1901 	/* Make sure there are no fields that are not allowed for AUX_SYNC_IND and AUX_CHAIN_IND */
1902 	LL_ASSERT_DBG(!hdr->adv_addr);
1903 	LL_ASSERT_DBG(!hdr->tgt_addr);
1904 
1905 	return (struct pdu_cte_info *)hdr->data;
1906 }
1907 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && !CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1908 
1909 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
1910 void ull_sync_transfer_received(struct ll_conn *conn, uint16_t service_data,
1911 				struct pdu_adv_sync_info *si, uint16_t conn_event_count,
1912 				uint16_t last_pa_event_counter, uint8_t sid,
1913 				uint8_t addr_type, uint8_t sca, uint8_t phy,
1914 				uint8_t *adv_addr, uint16_t sync_conn_event_count,
1915 				uint8_t addr_resolved)
1916 {
1917 	struct ll_sync_set *sync;
1918 	uint16_t conn_evt_current;
1919 	uint8_t rx_enable;
1920 	uint8_t nodups;
1921 
1922 	if (conn->past.mode == BT_HCI_LE_PAST_MODE_NO_SYNC) {
1923 		/* Ignore LL_PERIODIC_SYNC_IND - see Bluetooth Core Specification v5.4
1924 		 * Vol 6, Part E, Section 7.8.91
1925 		 */
1926 		return;
1927 	}
1928 
1929 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)
1930 	/* Do not sync twice to the same peer and same SID */
1931 	if (peer_sid_sync_exists(addr_type, adv_addr, sid)) {
1932 		return;
1933 	}
1934 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC */
1935 
1936 	nodups = (conn->past.mode == BT_HCI_LE_PAST_MODE_SYNC_FILTER_DUPLICATES) ? 1U : 0U;
1937 	rx_enable = (conn->past.mode == BT_HCI_LE_PAST_MODE_NO_REPORTS) ? 0U : 1U;
1938 
1939 	sync = ull_sync_create(sid, conn->past.timeout, conn->past.skip, conn->past.cte_type,
1940 			       rx_enable, nodups);
1941 	if (!sync) {
1942 		return;
1943 	}
1944 
1945 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1946 	/* Reset filter policy in lll_sync */
1947 	sync->lll.filter_policy = 0U;
1948 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1949 
1950 	sync->peer_id_addr_type = addr_type;
1951 	sync->peer_addr_resolved = addr_resolved;
1952 	memcpy(sync->peer_id_addr, adv_addr, BDADDR_SIZE);
1953 	sync->lll.phy = phy;
1954 
1955 	conn_evt_current = ull_conn_event_counter(conn);
1956 
1957 	/* LLCP should have ensured this holds */
1958 	LL_ASSERT_DBG(sync_conn_event_count != conn_evt_current);
1959 
1960 	ull_sync_setup_from_sync_transfer(conn, service_data, sync, si,
1961 					  conn_event_count - conn_evt_current,
1962 					  last_pa_event_counter, sync_conn_event_count,
1963 					  sca);
1964 }
1965 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
1966