1 /*
2  * Copyright (c) 2020-2021 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stdlib.h>
8 #include <zephyr/kernel.h>
9 #include <soc.h>
10 #include <zephyr/sys/byteorder.h>
11 #include <zephyr/bluetooth/hci_types.h>
12 
13 #include "util/util.h"
14 #include "util/mem.h"
15 #include "util/memq.h"
16 #include "util/mayfly.h"
17 #include "util/dbuf.h"
18 
19 #include "hal/cpu.h"
20 #include "hal/ccm.h"
21 #include "hal/radio.h"
22 #include "hal/ticker.h"
23 
24 #include "ticker/ticker.h"
25 
26 #include "pdu_df.h"
27 #include "lll/pdu_vendor.h"
28 #include "pdu.h"
29 
30 #include "lll.h"
31 #include "lll/lll_adv_types.h"
32 #include "lll_adv.h"
33 #include "lll/lll_adv_pdu.h"
34 #include "lll_clock.h"
35 #include "lll/lll_vendor.h"
36 #include "lll_chan.h"
37 #include "lll_scan.h"
38 #include "lll/lll_df_types.h"
39 #include "lll_conn.h"
40 #include "lll_conn_iso.h"
41 #include "lll_sync.h"
42 #include "lll_sync_iso.h"
43 
44 #include "isoal.h"
45 
46 #include "ull_tx_queue.h"
47 
48 #include "ull_filter.h"
49 #include "ull_iso_types.h"
50 #include "ull_scan_types.h"
51 #include "ull_sync_types.h"
52 #include "ull_conn_types.h"
53 #include "ull_adv_types.h"
54 #include "ull_conn_iso_types.h"
55 
56 #include "ull_internal.h"
57 #include "ull_adv_internal.h"
58 #include "ull_scan_internal.h"
59 #include "ull_sync_internal.h"
60 #include "ull_conn_internal.h"
61 #include "ull_conn_iso_internal.h"
62 #include "ull_df_types.h"
63 #include "ull_df_internal.h"
64 
65 #include "ull_llcp.h"
66 #include "ll.h"
67 
68 #include <soc.h>
69 #include "hal/debug.h"
70 
71 /* Check that timeout_reload member is at safe offset when ll_sync_set is
72  * allocated using mem interface. timeout_reload being non-zero is used to
73  * indicate that a sync is established. And is used to check for sync being
74  * terminated under race conditions between HCI Tx and Rx thread when
75  * Periodic Advertising Reports are generated.
76  */
77 MEM_FREE_MEMBER_ACCESS_BUILD_ASSERT(struct ll_sync_set, timeout_reload);
78 
79 static struct ll_sync_set *ull_sync_create(uint8_t sid, uint16_t timeout, uint16_t skip,
80 					   uint8_t cte_type, uint8_t rx_enable, uint8_t nodups);
81 static int init_reset(void);
82 static inline struct ll_sync_set *sync_acquire(void);
83 static void sync_ticker_cleanup(struct ll_sync_set *sync, ticker_op_func stop_op_cb);
84 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
85 		      uint32_t remainder, uint16_t lazy, uint8_t force,
86 		      void *param);
87 static void ticker_start_op_cb(uint32_t status, void *param);
88 static void ticker_update_op_cb(uint32_t status, void *param);
89 static void ticker_stop_sync_expire_op_cb(uint32_t status, void *param);
90 static void sync_expire(void *param);
91 static void ticker_stop_sync_lost_op_cb(uint32_t status, void *param);
92 static void sync_lost(void *param);
93 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)
94 static bool peer_sid_sync_exists(uint8_t const peer_id_addr_type,
95 				 uint8_t const *const peer_id_addr,
96 				 uint8_t sid);
97 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC */
98 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
99 	!defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
100 static struct pdu_cte_info *pdu_cte_info_get(struct pdu_adv *pdu);
101 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && !CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
102 
103 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
104 static void ticker_update_op_status_give(uint32_t status, void *param);
105 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
106 
107 static struct ll_sync_set ll_sync_pool[CONFIG_BT_PER_ADV_SYNC_MAX];
108 static void *sync_free;
109 
110 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
111 /* Semaphore to wakeup thread on ticker API callback */
112 static struct k_sem sem_ticker_cb;
113 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
114 
ll_sync_create(uint8_t options,uint8_t sid,uint8_t adv_addr_type,uint8_t * adv_addr,uint16_t skip,uint16_t sync_timeout,uint8_t sync_cte_type)115 uint8_t ll_sync_create(uint8_t options, uint8_t sid, uint8_t adv_addr_type,
116 			    uint8_t *adv_addr, uint16_t skip,
117 			    uint16_t sync_timeout, uint8_t sync_cte_type)
118 {
119 	struct ll_scan_set *scan_coded;
120 	struct ll_scan_set *scan;
121 	struct ll_sync_set *sync;
122 	uint8_t rx_enable;
123 	uint8_t nodups;
124 
125 	scan = ull_scan_set_get(SCAN_HANDLE_1M);
126 	if (!scan || scan->periodic.sync) {
127 		return BT_HCI_ERR_CMD_DISALLOWED;
128 	}
129 
130 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
131 		scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
132 		if (!scan_coded || scan_coded->periodic.sync) {
133 			return BT_HCI_ERR_CMD_DISALLOWED;
134 		}
135 	}
136 
137 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)
138 	/* Do not sync twice to the same peer and same SID */
139 	if (((options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST) == 0U) &&
140 	    peer_sid_sync_exists(adv_addr_type, adv_addr, sid)) {
141 		return BT_HCI_ERR_CONN_ALREADY_EXISTS;
142 	}
143 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC */
144 
145 	rx_enable = !(options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_REPORTS_DISABLED);
146 	nodups = (options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE) ? 1U : 0U;
147 
148 	sync = ull_sync_create(sid, sync_timeout, skip, sync_cte_type, rx_enable, nodups);
149 	if (!sync) {
150 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
151 	}
152 
153 	scan->periodic.cancelled = 0U;
154 	scan->periodic.state = LL_SYNC_STATE_IDLE;
155 	scan->periodic.param = NULL;
156 	scan->periodic.filter_policy =
157 		options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST;
158 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
159 		scan_coded->periodic.cancelled = 0U;
160 		scan_coded->periodic.state = LL_SYNC_STATE_IDLE;
161 		scan_coded->periodic.param = NULL;
162 		scan_coded->periodic.filter_policy =
163 			scan->periodic.filter_policy;
164 	}
165 
166 	if (!scan->periodic.filter_policy) {
167 		sync->peer_id_addr_type = adv_addr_type;
168 		(void)memcpy(sync->peer_id_addr, adv_addr, BDADDR_SIZE);
169 	}
170 
171 	/* Remember the peer address when periodic advertiser list is not
172 	 * used.
173 	 * NOTE: Peer address will be filled/overwritten with correct identity
174 	 * address on sync setup when privacy is enabled.
175 	 */
176 	if ((options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST) == 0U) {
177 		sync->peer_id_addr_type = adv_addr_type;
178 		(void)memcpy(sync->peer_id_addr, adv_addr,
179 			     sizeof(sync->peer_id_addr));
180 	}
181 
182 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
183 	/* Set filter policy in lll_sync */
184 	sync->lll.filter_policy = scan->periodic.filter_policy;
185 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
186 
187 	/* Enable scanner to create sync */
188 	scan->periodic.sync = sync;
189 
190 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
191 	scan->lll.is_sync = 1U;
192 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
193 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
194 		scan_coded->periodic.sync = sync;
195 
196 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
197 		scan_coded->lll.is_sync = 1U;
198 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
199 	}
200 
201 	return 0;
202 }
203 
204 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
ull_sync_setup_from_sync_transfer(struct ll_conn * conn,uint16_t service_data,struct ll_sync_set * sync,struct pdu_adv_sync_info * si,int16_t conn_evt_offset,uint16_t last_pa_event_counter,uint16_t sync_conn_event_count,uint8_t sender_sca)205 void ull_sync_setup_from_sync_transfer(struct ll_conn *conn, uint16_t service_data,
206 				       struct ll_sync_set *sync, struct pdu_adv_sync_info *si,
207 				       int16_t conn_evt_offset, uint16_t last_pa_event_counter,
208 				       uint16_t sync_conn_event_count, uint8_t sender_sca)
209 {
210 	struct node_rx_past_received *se_past;
211 	uint32_t ticks_slot_overhead;
212 	uint32_t ticks_slot_offset;
213 	uint32_t conn_interval_us;
214 	uint32_t sync_offset_us;
215 	uint32_t ready_delay_us;
216 	struct node_rx_pdu *rx;
217 	uint8_t *data_chan_map;
218 	struct lll_sync *lll;
219 	uint32_t interval_us;
220 	uint32_t slot_us;
221 	uint32_t ticks_anchor;
222 	uint8_t chm_last;
223 	uint32_t ret;
224 	uint16_t interval;
225 	uint16_t sync_handle;
226 	uint8_t sca;
227 
228 	lll = &sync->lll;
229 
230 	/* Copy channel map from sca_chm field in sync_info structure, and
231 	 * clear the SCA bits.
232 	 */
233 	chm_last = lll->chm_first;
234 	lll->chm_last = chm_last;
235 	data_chan_map = lll->chm[chm_last].data_chan_map;
236 	(void)memcpy(data_chan_map, si->sca_chm,
237 		     sizeof(lll->chm[chm_last].data_chan_map));
238 	data_chan_map[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &=
239 		~PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK;
240 	lll->chm[chm_last].data_chan_count =
241 		util_ones_count_get(data_chan_map,
242 				    sizeof(lll->chm[chm_last].data_chan_map));
243 	if (lll->chm[chm_last].data_chan_count < CHM_USED_COUNT_MIN) {
244 		/* Ignore sync setup, invalid available channel count */
245 		return;
246 	}
247 
248 	memcpy(lll->access_addr, si->aa, sizeof(lll->access_addr));
249 	lll->data_chan_id = lll_chan_id(lll->access_addr);
250 	memcpy(lll->crc_init, si->crc_init, sizeof(lll->crc_init));
251 	lll->event_counter = sys_le16_to_cpu(si->evt_cntr);
252 
253 	interval = sys_le16_to_cpu(si->interval);
254 	interval_us = interval * PERIODIC_INT_UNIT_US;
255 
256 	/* Convert fromm 10ms units to interval units */
257 	if (sync->timeout != 0  && interval_us != 0) {
258 		sync->timeout_reload = RADIO_SYNC_EVENTS((sync->timeout * 10U *
259 						  USEC_PER_MSEC), interval_us);
260 	}
261 
262 	/* Adjust Skip value so that there is minimum of 6 events that can be
263 	 * listened to before Sync_Timeout occurs.
264 	 * The adjustment of the skip value is controller implementation
265 	 * specific and not specified by the Bluetooth Core Specification v5.3.
266 	 * The Controller `may` use the Skip value, and the implementation here
267 	 * covers a case where Skip value could lead to less events being
268 	 * listened to until Sync_Timeout. Listening to more consecutive events
269 	 * before Sync_Timeout increases probability of retaining the Periodic
270 	 * Synchronization.
271 	 */
272 	if (sync->timeout_reload > CONN_ESTAB_COUNTDOWN) {
273 		uint16_t skip_max = sync->timeout_reload - CONN_ESTAB_COUNTDOWN;
274 
275 		if (sync->skip > skip_max) {
276 			sync->skip = skip_max;
277 		}
278 	}
279 
280 	sync->sync_expire = CONN_ESTAB_COUNTDOWN;
281 
282 	/* Extract the SCA value from the sca_chm field of the sync_info
283 	 * structure.
284 	 */
285 	sca = (si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &
286 	       PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK) >>
287 	      PDU_SYNC_INFO_SCA_CHM_SCA_BIT_POS;
288 
289 	lll->sca = sca;
290 
291 	lll->window_widening_periodic_us =
292 		DIV_ROUND_UP(((lll_clock_ppm_local_get() +
293 				   lll_clock_ppm_get(sca)) *
294 				  interval_us), USEC_PER_SEC);
295 	lll->window_widening_max_us = (interval_us >> 1) - EVENT_IFS_US;
296 	if (PDU_ADV_SYNC_INFO_OFFS_UNITS_GET(si)) {
297 		lll->window_size_event_us = OFFS_UNIT_300_US;
298 	} else {
299 		lll->window_size_event_us = OFFS_UNIT_30_US;
300 	}
301 
302 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
303 	lll->node_cte_incomplete = NULL;
304 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
305 
306 	/* Prepare Periodic Advertising Sync Transfer Received event (dispatched later) */
307 	sync_handle = ull_sync_handle_get(sync);
308 	rx = (void *)sync->node_rx_sync_estab;
309 	rx->hdr.type = NODE_RX_TYPE_SYNC_TRANSFER_RECEIVED;
310 	rx->hdr.handle = sync_handle;
311 	rx->rx_ftr.param = sync;
312 
313 	/* Create node_rx and assign values */
314 	se_past = (void *)rx->pdu;
315 	se_past->rx_sync.status = BT_HCI_ERR_SUCCESS;
316 	se_past->rx_sync.interval = interval;
317 	se_past->rx_sync.phy = sync->lll.phy;
318 	se_past->rx_sync.sca = sca;
319 	se_past->conn_handle = ll_conn_handle_get(conn);
320 	se_past->service_data = service_data;
321 
322 	conn_interval_us = conn->lll.interval * CONN_INT_UNIT_US;
323 
324 	/* Calculate offset and schedule sync radio events */
325 	ready_delay_us = lll_radio_rx_ready_delay_get(lll->phy, PHY_FLAGS_S8);
326 
327 	sync_offset_us = PDU_ADV_SYNC_INFO_OFFSET_GET(si) * lll->window_size_event_us;
328 	/* offs_adjust may be 1 only if sync setup by LL_PERIODIC_SYNC_IND */
329 	sync_offset_us += (PDU_ADV_SYNC_INFO_OFFS_ADJUST_GET(si) ? OFFS_ADJUST_US : 0U);
330 	sync_offset_us -= EVENT_TICKER_RES_MARGIN_US;
331 	sync_offset_us -= EVENT_JITTER_US;
332 	sync_offset_us -= ready_delay_us;
333 
334 	if (conn_evt_offset) {
335 		int64_t conn_offset_us = (int64_t)conn_evt_offset * conn_interval_us;
336 
337 		if ((int64_t)sync_offset_us + conn_offset_us < 0) {
338 			uint32_t total_offset_us = abs((int64_t)sync_offset_us + conn_offset_us);
339 			uint32_t sync_intervals = DIV_ROUND_UP(total_offset_us, interval_us);
340 
341 			lll->event_counter += sync_intervals;
342 			sync_offset_us = (sync_intervals * interval_us) - total_offset_us;
343 		} else {
344 			sync_offset_us += conn_offset_us;
345 		}
346 	}
347 
348 	/* Calculate initial window widening - see Core Spec vol 6, part B, 5.1.13.1 */
349 	{
350 		uint16_t event_delta;
351 		uint32_t drift_us;
352 		uint64_t da;
353 		uint64_t db;
354 		uint64_t d;
355 
356 		const uint32_t local_sca_ppm = lll_clock_ppm_local_get();
357 
358 		event_delta = lll->event_counter - last_pa_event_counter;
359 
360 		da = (uint64_t)(local_sca_ppm + lll_clock_ppm_get(sca)) * interval_us;
361 		da = DIV_ROUND_UP(da * (uint64_t)event_delta, USEC_PER_SEC);
362 
363 		db = (uint64_t)(local_sca_ppm + lll_clock_ppm_get(sender_sca)) * conn_interval_us;
364 		db = DIV_ROUND_UP(db * (uint64_t)(ull_conn_event_counter(conn) -
365 						  sync_conn_event_count), USEC_PER_SEC);
366 
367 		d = DIV_ROUND_UP((da + db) * (USEC_PER_SEC + local_sca_ppm +
368 					      lll_clock_ppm_get(sca) +
369 					      lll_clock_ppm_get(sender_sca)), USEC_PER_SEC);
370 
371 		/* Limit drift compenstion to the maximum window widening */
372 		drift_us = MIN((uint32_t)d, lll->window_widening_max_us);
373 
374 		/* Apply total drift to initial window size */
375 		lll->window_size_event_us += drift_us;
376 
377 		/* Adjust offset if less than the drift compensation */
378 		while (sync_offset_us < drift_us) {
379 			sync_offset_us += interval_us;
380 			lll->event_counter++;
381 		}
382 
383 		sync_offset_us -= drift_us;
384 	}
385 
386 	interval_us -= lll->window_widening_periodic_us;
387 
388 	/* Calculate event time reservation */
389 	slot_us = PDU_AC_MAX_US(PDU_AC_EXT_PAYLOAD_RX_SIZE, lll->phy);
390 	slot_us += ready_delay_us;
391 
392 	/* Add implementation defined radio event overheads */
393 	if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX)) {
394 		slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
395 	}
396 
397 	/* TODO: active_to_start feature port */
398 	sync->ull.ticks_active_to_start = 0U;
399 	sync->ull.ticks_prepare_to_start =
400 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
401 	sync->ull.ticks_preempt_to_start =
402 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
403 	sync->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
404 
405 	ticks_slot_offset = MAX(sync->ull.ticks_active_to_start,
406 				sync->ull.ticks_prepare_to_start);
407 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
408 		ticks_slot_overhead = ticks_slot_offset;
409 	} else {
410 		ticks_slot_overhead = 0U;
411 	}
412 	ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
413 
414 	sync->lll_sync_prepare = lll_sync_create_prepare;
415 
416 	ticks_anchor = conn->llcp.prep.ticks_at_expire;
417 
418 #if defined(CONFIG_BT_PERIPHERAL)
419 	if (conn->lll.role == BT_HCI_ROLE_PERIPHERAL) {
420 		/* Compensate for window widening */
421 		ticks_anchor += HAL_TICKER_US_TO_TICKS(conn->lll.periph.window_widening_event_us);
422 	}
423 #endif /* CONFIG_BT_PERIPHERAL */
424 
425 	ret = ticker_start(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
426 			   (TICKER_ID_SCAN_SYNC_BASE + sync_handle),
427 			   ticks_anchor,
428 			   HAL_TICKER_US_TO_TICKS(sync_offset_us),
429 			   HAL_TICKER_US_TO_TICKS(interval_us),
430 			   HAL_TICKER_REMAINDER(interval_us),
431 			   TICKER_NULL_LAZY,
432 			   (sync->ull.ticks_slot + ticks_slot_overhead),
433 			   ticker_cb, sync,
434 			   ticker_start_op_cb, (void *)__LINE__);
435 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
436 		  (ret == TICKER_STATUS_BUSY));
437 }
438 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
439 
440 
ll_sync_create_cancel(void ** rx)441 uint8_t ll_sync_create_cancel(void **rx)
442 {
443 	struct ll_scan_set *scan_coded;
444 	memq_link_t *link_sync_estab;
445 	memq_link_t *link_sync_lost;
446 	struct node_rx_pdu *node_rx;
447 	struct ll_scan_set *scan;
448 	struct ll_sync_set *sync;
449 	struct node_rx_sync *se;
450 
451 	scan = ull_scan_set_get(SCAN_HANDLE_1M);
452 	if (!scan || !scan->periodic.sync) {
453 		return BT_HCI_ERR_CMD_DISALLOWED;
454 	}
455 
456 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
457 		scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
458 		if (!scan_coded || !scan_coded->periodic.sync) {
459 			return BT_HCI_ERR_CMD_DISALLOWED;
460 		}
461 	}
462 
463 	/* Check for race condition where in sync is established when sync
464 	 * create cancel is invoked.
465 	 *
466 	 * Setting `scan->periodic.cancelled` to represent cancellation
467 	 * requested in the thread context. Checking `scan->periodic.sync` for
468 	 * NULL confirms if synchronization was established before
469 	 * `scan->periodic.cancelled` was set to 1U.
470 	 */
471 	scan->periodic.cancelled = 1U;
472 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
473 		scan_coded->periodic.cancelled = 1U;
474 	}
475 	cpu_dmb();
476 	sync = scan->periodic.sync;
477 	if (!sync) {
478 		return BT_HCI_ERR_CMD_DISALLOWED;
479 	}
480 
481 	/* node_rx_sync_estab is assigned when Host calls create sync and cleared when sync is
482 	 * established. timeout_reload is set when sync is found and setup. It is non-zero until
483 	 * sync is terminated. Together they give information about current sync state:
484 	 * - node_rx_sync_estab == NULL && timeout_reload != 0 => sync is established
485 	 * - node_rx_sync_estab == NULL && timeout_reload == 0 => sync is terminated
486 	 * - node_rx_sync_estab != NULL && timeout_reload == 0 => sync is created
487 	 * - node_rx_sync_estab != NULL && timeout_reload != 0 => sync is waiting to be established
488 	 */
489 	if (!sync->node_rx_sync_estab) {
490 		/* There is no sync to be cancelled */
491 		return BT_HCI_ERR_CMD_DISALLOWED;
492 	}
493 
494 	sync->is_stop = 1U;
495 	cpu_dmb();
496 
497 	if (sync->timeout_reload != 0U) {
498 		uint16_t sync_handle = ull_sync_handle_get(sync);
499 
500 		LL_ASSERT(sync_handle <= UINT8_MAX);
501 
502 		/* Sync is not established yet, so stop sync ticker */
503 		const int err =
504 			ull_ticker_stop_with_mark((TICKER_ID_SCAN_SYNC_BASE +
505 						   (uint8_t)sync_handle),
506 						  sync, &sync->lll);
507 		if (err != 0 && err != -EALREADY) {
508 			return BT_HCI_ERR_CMD_DISALLOWED;
509 		}
510 	} /* else: sync was created but not yet setup, there is no sync ticker yet. */
511 
512 	/* It is safe to remove association with scanner as cancelled flag is
513 	 * set, sync is_stop flag was set and sync has not been established.
514 	 */
515 	ull_sync_setup_reset(sync);
516 
517 	/* Mark the sync context as sync create cancelled */
518 	if (IS_ENABLED(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)) {
519 		sync->timeout = 0U;
520 	}
521 
522 	node_rx = sync->node_rx_sync_estab;
523 	link_sync_estab = node_rx->hdr.link;
524 	link_sync_lost = sync->node_rx_lost.rx.hdr.link;
525 
526 	ll_rx_link_release(link_sync_lost);
527 	ll_rx_link_release(link_sync_estab);
528 	ll_rx_release(node_rx);
529 
530 	/* Clear the node after release to mark the sync establish as being completed.
531 	 * In this case the completion reason is sync cancelled by Host.
532 	 */
533 	sync->node_rx_sync_estab = NULL;
534 
535 	node_rx = (void *)&sync->node_rx_lost;
536 	node_rx->hdr.type = NODE_RX_TYPE_SYNC;
537 	node_rx->hdr.handle = LLL_HANDLE_INVALID;
538 
539 	/* NOTE: struct node_rx_lost has uint8_t member following the
540 	 *       struct node_rx_hdr to store the reason.
541 	 */
542 	se = (void *)node_rx->pdu;
543 	se->status = BT_HCI_ERR_OP_CANCELLED_BY_HOST;
544 
545 	/* NOTE: Since NODE_RX_TYPE_SYNC is only generated from ULL context,
546 	 *       pass ULL sync context as parameter.
547 	 */
548 	node_rx->rx_ftr.param = sync;
549 
550 	*rx = node_rx;
551 
552 	return 0;
553 }
554 
ll_sync_terminate(uint16_t handle)555 uint8_t ll_sync_terminate(uint16_t handle)
556 {
557 	struct lll_scan_aux *lll_aux;
558 	memq_link_t *link_sync_lost;
559 	struct ll_sync_set *sync;
560 	int err;
561 
562 	sync = ull_sync_is_enabled_get(handle);
563 	if (!sync) {
564 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
565 	}
566 
567 	/* Request terminate, no new ULL scheduling to be setup */
568 	sync->is_stop = 1U;
569 	cpu_dmb();
570 
571 	/* Stop periodic sync ticker timeouts */
572 	err = ull_ticker_stop_with_mark(TICKER_ID_SCAN_SYNC_BASE + handle,
573 					sync, &sync->lll);
574 	LL_ASSERT_INFO2(err == 0 || err == -EALREADY, handle, err);
575 	if (err) {
576 		return BT_HCI_ERR_CMD_DISALLOWED;
577 	}
578 
579 	/* Check and stop any auxiliary PDU receptions */
580 	lll_aux = sync->lll.lll_aux;
581 	if (lll_aux) {
582 #if defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
583 		err = ull_scan_aux_stop(&sync->lll);
584 #else /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
585 		struct ll_scan_aux_set *aux;
586 
587 		aux = HDR_LLL2ULL(lll_aux);
588 		err = ull_scan_aux_stop(aux);
589 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
590 		if (err && (err != -EALREADY)) {
591 			return BT_HCI_ERR_CMD_DISALLOWED;
592 		}
593 
594 #if !defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
595 		LL_ASSERT(!aux->parent);
596 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
597 	}
598 
599 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
600 	/* Clean up node_rx_sync_estab if still present */
601 	if (sync->node_rx_sync_estab) {
602 		memq_link_t *link_sync_estab;
603 		struct node_rx_pdu *node_rx;
604 
605 		node_rx = (void *)sync->node_rx_sync_estab;
606 		link_sync_estab = node_rx->hdr.link;
607 
608 		ll_rx_link_release(link_sync_estab);
609 		ll_rx_release(node_rx);
610 
611 		sync->node_rx_sync_estab = NULL;
612 	}
613 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
614 
615 	link_sync_lost = sync->node_rx_lost.rx.hdr.link;
616 	ll_rx_link_release(link_sync_lost);
617 
618 	/* Mark sync context not sync established */
619 	sync->timeout_reload = 0U;
620 
621 	ull_sync_release(sync);
622 
623 	return 0;
624 }
625 
626 /* @brief Link Layer interface function corresponding to HCI LE Set Periodic
627  *        Advertising Receive Enable command.
628  *
629  * @param[in] handle Sync_Handle identifying the periodic advertising
630  *                   train. Range: 0x0000 to 0x0EFF.
631  * @param[in] enable Bit number 0 - Reporting Enabled.
632  *                   Bit number 1 - Duplicate filtering enabled.
633  *                   All other bits - Reserved for future use.
634  *
635  * @return HCI error codes as documented in Bluetooth Core Specification v5.3.
636  */
ll_sync_recv_enable(uint16_t handle,uint8_t enable)637 uint8_t ll_sync_recv_enable(uint16_t handle, uint8_t enable)
638 {
639 	struct ll_sync_set *sync;
640 
641 	sync = ull_sync_is_enabled_get(handle);
642 	if (!sync) {
643 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
644 	}
645 
646 	/* Reporting enabled/disabled */
647 	sync->rx_enable = (enable & BT_HCI_LE_SET_PER_ADV_RECV_ENABLE_ENABLE) ?
648 			  1U : 0U;
649 
650 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
651 	sync->nodups = (enable & BT_HCI_LE_SET_PER_ADV_RECV_ENABLE_FILTER_DUPLICATE) ?
652 		       1U : 0U;
653 #endif
654 
655 	return 0;
656 }
657 
658 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
659 /* @brief Link Layer interface function corresponding to HCI LE Set Periodic
660  *        Advertising Sync Transfer command.
661  *
662  * @param[in] conn_handle Connection_Handle identifying the connected device
663  *                        Range: 0x0000 to 0x0EFF.
664  * @param[in] service_data Service_Data value provided by the Host for use by the
665  *                         Host of the peer device.
666  * @param[in] sync_handle Sync_Handle identifying the periodic advertising
667  *                        train. Range: 0x0000 to 0x0EFF.
668  *
669  * @return HCI error codes as documented in Bluetooth Core Specification v5.4.
670  */
ll_sync_transfer(uint16_t conn_handle,uint16_t service_data,uint16_t sync_handle)671 uint8_t ll_sync_transfer(uint16_t conn_handle, uint16_t service_data, uint16_t sync_handle)
672 {
673 	struct ll_sync_set *sync;
674 	struct ll_conn *conn;
675 
676 	conn = ll_connected_get(conn_handle);
677 	if (!conn) {
678 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
679 	}
680 
681 	/* Verify that sync_handle is valid */
682 	sync = ull_sync_is_enabled_get(sync_handle);
683 	if (!sync) {
684 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
685 	}
686 
687 	/* Call llcp to start LLCP_PERIODIC_SYNC_IND */
688 	return ull_cp_periodic_sync(conn, sync, NULL, service_data);
689 }
690 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
691 
692 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
693 /* @brief Link Layer interface function corresponding to HCI LE Set Periodic
694  *        Advertising Sync Transfer Parameters command.
695  *
696  * @param[in] conn_handle Connection_Handle identifying the connected device
697  *                        Range: 0x0000 to 0x0EFF.
698  * @param[in] mode Mode specifies the action to be taken when a periodic advertising
699  *                 synchronization is received.
700  * @param[in] skip Skip specifying the number of consectutive periodic advertising
701  *                 packets that the receiver may skip after successfully reciving a
702  *                 periodic advertising packet. Range: 0x0000 to 0x01F3.
703  * @param[in] timeout Sync_timeout specifying the maximum permitted time between
704  *                    successful receives. Range: 0x000A to 0x4000.
705  * @param[in] cte_type CTE_Type specifying whether to only synchronize to periodic
706  *                     advertising with certain types of Constant Tone Extension.
707  *
708  * @return HCI error codes as documented in Bluetooth Core Specification v5.4.
709  */
ll_past_param(uint16_t conn_handle,uint8_t mode,uint16_t skip,uint16_t timeout,uint8_t cte_type)710 uint8_t ll_past_param(uint16_t conn_handle, uint8_t mode, uint16_t skip, uint16_t timeout,
711 		      uint8_t cte_type)
712 {
713 	struct ll_conn *conn;
714 
715 	conn = ll_connected_get(conn_handle);
716 	if (!conn) {
717 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
718 	}
719 
720 	if (mode == BT_HCI_LE_PAST_MODE_SYNC_FILTER_DUPLICATES &&
721 	    !IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)) {
722 		return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
723 	}
724 
725 	/* Set PAST Param for connection instance */
726 	conn->past.mode     = mode;
727 	conn->past.skip     = skip;
728 	conn->past.timeout  = timeout;
729 	conn->past.cte_type = cte_type;
730 
731 	return 0;
732 }
733 
734 /* @brief Link Layer interface function corresponding to HCI LE Set Default Periodic
735  *        Advertising Sync Transfer Parameters command.
736  *
737  * @param[in] mode Mode specifies the action to be taken when a periodic advertising
738  *                   synchronization is received.
739  * @param[in] skip Skip specifying the number of consectutive periodic advertising
740  *                   packets that the receiver may skip after successfully reciving a
741  *                   periodic advertising packet. Range: 0x0000 to 0x01F3.
742  * @param[in] timeout Sync_timeout specifying the maximum permitted time between
743  *                    successful receives. Range: 0x000A to 0x4000.
744  * @param[in] cte_type CTE_Type specifying whether to only synchronize to periodic
745  *                   advertising with certain types of Constant Tone Extension.
746  *
747  * @return HCI error codes as documented in Bluetooth Core Specification v5.4.
748  */
ll_default_past_param(uint8_t mode,uint16_t skip,uint16_t timeout,uint8_t cte_type)749 uint8_t ll_default_past_param(uint8_t mode, uint16_t skip, uint16_t timeout, uint8_t cte_type)
750 {
751 	if (mode == BT_HCI_LE_PAST_MODE_SYNC_FILTER_DUPLICATES &&
752 	    !IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)) {
753 		return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
754 	}
755 
756 	/* Set default past param */
757 	ull_conn_default_past_param_set(mode, skip, timeout, cte_type);
758 
759 	return 0;
760 }
761 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
762 
ull_sync_init(void)763 int ull_sync_init(void)
764 {
765 	int err;
766 
767 	err = init_reset();
768 	if (err) {
769 		return err;
770 	}
771 
772 	return 0;
773 }
774 
ull_sync_reset(void)775 int ull_sync_reset(void)
776 {
777 	uint16_t handle;
778 	void *rx;
779 	int err;
780 
781 	(void)ll_sync_create_cancel(&rx);
782 
783 	for (handle = 0U; handle < CONFIG_BT_PER_ADV_SYNC_MAX; handle++) {
784 		(void)ll_sync_terminate(handle);
785 	}
786 
787 	err = init_reset();
788 	if (err) {
789 		return err;
790 	}
791 
792 	return 0;
793 }
794 
ull_sync_set_get(uint16_t handle)795 struct ll_sync_set *ull_sync_set_get(uint16_t handle)
796 {
797 	if (handle >= CONFIG_BT_PER_ADV_SYNC_MAX) {
798 		return NULL;
799 	}
800 
801 	return &ll_sync_pool[handle];
802 }
803 
ull_sync_is_enabled_get(uint16_t handle)804 struct ll_sync_set *ull_sync_is_enabled_get(uint16_t handle)
805 {
806 	struct ll_sync_set *sync;
807 
808 	sync = ull_sync_set_get(handle);
809 	if (!sync || !sync->timeout_reload) {
810 		return NULL;
811 	}
812 
813 	return sync;
814 }
815 
ull_sync_is_valid_get(struct ll_sync_set * sync)816 struct ll_sync_set *ull_sync_is_valid_get(struct ll_sync_set *sync)
817 {
818 	if (((uint8_t *)sync < (uint8_t *)ll_sync_pool) ||
819 	    ((uint8_t *)sync > ((uint8_t *)ll_sync_pool +
820 	     (sizeof(struct ll_sync_set) * (CONFIG_BT_PER_ADV_SYNC_MAX - 1))))) {
821 		return NULL;
822 	}
823 
824 	return sync;
825 }
826 
ull_sync_lll_is_valid_get(struct lll_sync * lll)827 struct lll_sync *ull_sync_lll_is_valid_get(struct lll_sync *lll)
828 {
829 	struct ll_sync_set *sync;
830 
831 	sync = HDR_LLL2ULL(lll);
832 	sync = ull_sync_is_valid_get(sync);
833 	if (sync) {
834 		return &sync->lll;
835 	}
836 
837 	return NULL;
838 }
839 
ull_sync_handle_get(struct ll_sync_set * sync)840 uint16_t ull_sync_handle_get(struct ll_sync_set *sync)
841 {
842 	return mem_index_get(sync, ll_sync_pool, sizeof(struct ll_sync_set));
843 }
844 
ull_sync_lll_handle_get(struct lll_sync * lll)845 uint16_t ull_sync_lll_handle_get(struct lll_sync *lll)
846 {
847 	return ull_sync_handle_get(HDR_LLL2ULL(lll));
848 }
849 
ull_sync_release(struct ll_sync_set * sync)850 void ull_sync_release(struct ll_sync_set *sync)
851 {
852 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
853 	struct lll_sync *lll = &sync->lll;
854 
855 	if (lll->node_cte_incomplete) {
856 		const uint8_t release_cnt = 1U;
857 		struct node_rx_pdu *node_rx;
858 		memq_link_t *link;
859 
860 		node_rx = &lll->node_cte_incomplete->rx;
861 		link = node_rx->hdr.link;
862 
863 		ll_rx_link_release(link);
864 		ull_iq_report_link_inc_quota(release_cnt);
865 		ull_df_iq_report_mem_release(node_rx);
866 		ull_df_rx_iq_report_alloc(release_cnt);
867 
868 		lll->node_cte_incomplete = NULL;
869 	}
870 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
871 
872 	/* Mark the sync context as sync create cancelled */
873 	if (IS_ENABLED(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)) {
874 		sync->timeout = 0U;
875 	}
876 
877 #if !defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
878 	/* reset accumulated data len */
879 	sync->data_len = 0U;
880 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
881 
882 	mem_release(sync, &sync_free);
883 }
884 
ull_sync_setup_addr_check(struct ll_sync_set * sync,uint8_t filter_policy,uint8_t addr_type,uint8_t * addr,uint8_t rl_idx)885 bool ull_sync_setup_addr_check(struct ll_sync_set *sync, uint8_t filter_policy,
886 			       uint8_t addr_type, uint8_t *addr, uint8_t rl_idx)
887 {
888 	/* Check if Periodic Advertiser list to be used */
889 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST) &&
890 	    filter_policy) {
891 		/* Check in Periodic Advertiser List */
892 		if (ull_filter_ull_pal_addr_match(addr_type, addr)) {
893 			/* Remember the address, to check with
894 			 * SID in Sync Info
895 			 */
896 			sync->peer_id_addr_type = addr_type;
897 			(void)memcpy(sync->peer_id_addr, addr,
898 				     BDADDR_SIZE);
899 
900 			/* Address matched */
901 			return true;
902 
903 		/* Check in Resolving List */
904 		} else if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY) &&
905 			   ull_filter_ull_pal_listed(rl_idx, &addr_type,
906 						     sync->peer_id_addr)) {
907 			/* Remember the address, to check with the
908 			 * SID in Sync Info
909 			 */
910 			sync->peer_id_addr_type = addr_type;
911 
912 			/* Mark it as identity address from RPA */
913 			sync->peer_addr_resolved = 1U;
914 
915 			/* Address matched */
916 			return true;
917 		}
918 
919 	/* Check with explicitly supplied address */
920 	} else if ((addr_type == sync->peer_id_addr_type) &&
921 		   !memcmp(addr, sync->peer_id_addr, BDADDR_SIZE)) {
922 		/* Address matched */
923 		return true;
924 
925 	/* Check identity address with explicitly supplied address */
926 	} else if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY) &&
927 		   (rl_idx < ll_rl_size_get())) {
928 		ll_rl_id_addr_get(rl_idx, &addr_type, addr);
929 		if ((addr_type == sync->peer_id_addr_type) &&
930 		    !memcmp(addr, sync->peer_id_addr, BDADDR_SIZE)) {
931 			/* Mark it as identity address from RPA */
932 			sync->peer_addr_resolved = 1U;
933 
934 			/* Identity address matched */
935 			return true;
936 		}
937 	}
938 
939 	return false;
940 }
941 
ull_sync_setup_sid_match(struct ll_sync_set * sync,struct ll_scan_set * scan,uint8_t sid)942 bool ull_sync_setup_sid_match(struct ll_sync_set *sync, struct ll_scan_set *scan, uint8_t sid)
943 {
944 	return (scan->periodic.state == LL_SYNC_STATE_ADDR_MATCH) &&
945 		((IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST) &&
946 		  scan->periodic.filter_policy &&
947 		  ull_filter_ull_pal_match(sync->peer_id_addr_type,
948 					   sync->peer_id_addr, sid)) ||
949 		 (!scan->periodic.filter_policy &&
950 		  (sid == sync->sid)));
951 }
952 
ull_sync_setup(struct ll_scan_set * scan,uint8_t phy,struct node_rx_pdu * node_rx,struct pdu_adv_sync_info * si)953 void ull_sync_setup(struct ll_scan_set *scan, uint8_t phy,
954 		    struct node_rx_pdu *node_rx, struct pdu_adv_sync_info *si)
955 {
956 	uint32_t ticks_slot_overhead;
957 	uint32_t ticks_slot_offset;
958 	struct ll_sync_set *sync;
959 	struct node_rx_sync *se;
960 	struct node_rx_ftr *ftr;
961 	uint32_t sync_offset_us;
962 	uint32_t ready_delay_us;
963 	struct node_rx_pdu *rx;
964 	uint8_t *data_chan_map;
965 	struct lll_sync *lll;
966 	uint16_t sync_handle;
967 	uint32_t interval_us;
968 	uint32_t overhead_us;
969 	struct pdu_adv *pdu;
970 	uint16_t interval;
971 	uint32_t slot_us;
972 	uint8_t chm_last;
973 	uint32_t ret;
974 	uint8_t sca;
975 
976 	/* Populate the LLL context */
977 	sync = scan->periodic.sync;
978 	lll = &sync->lll;
979 
980 	/* Copy channel map from sca_chm field in sync_info structure, and
981 	 * clear the SCA bits.
982 	 */
983 	chm_last = lll->chm_first;
984 	lll->chm_last = chm_last;
985 	data_chan_map = lll->chm[chm_last].data_chan_map;
986 	(void)memcpy(data_chan_map, si->sca_chm,
987 		     sizeof(lll->chm[chm_last].data_chan_map));
988 	data_chan_map[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &=
989 		~PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK;
990 	lll->chm[chm_last].data_chan_count =
991 		util_ones_count_get(data_chan_map,
992 				    sizeof(lll->chm[chm_last].data_chan_map));
993 	if (lll->chm[chm_last].data_chan_count < CHM_USED_COUNT_MIN) {
994 		/* Ignore sync setup, invalid available channel count */
995 		return;
996 	}
997 
998 	memcpy(lll->access_addr, si->aa, sizeof(lll->access_addr));
999 	lll->data_chan_id = lll_chan_id(lll->access_addr);
1000 	memcpy(lll->crc_init, si->crc_init, sizeof(lll->crc_init));
1001 	lll->event_counter = sys_le16_to_cpu(si->evt_cntr);
1002 	lll->phy = phy;
1003 	lll->forced = 0U;
1004 
1005 	interval = sys_le16_to_cpu(si->interval);
1006 	interval_us = interval * PERIODIC_INT_UNIT_US;
1007 
1008 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
1009 	/* Save Periodic Advertisement Interval */
1010 	sync->interval = interval;
1011 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
1012 
1013 	/* Convert fromm 10ms units to interval units */
1014 	sync->timeout_reload = RADIO_SYNC_EVENTS((sync->timeout * 10U *
1015 						  USEC_PER_MSEC), interval_us);
1016 
1017 	/* Adjust Skip value so that there is minimum of 6 events that can be
1018 	 * listened to before Sync_Timeout occurs.
1019 	 * The adjustment of the skip value is controller implementation
1020 	 * specific and not specified by the Bluetooth Core Specification v5.3.
1021 	 * The Controller `may` use the Skip value, and the implementation here
1022 	 * covers a case where Skip value could lead to less events being
1023 	 * listened to until Sync_Timeout. Listening to more consecutive events
1024 	 * before Sync_Timeout increases probability of retaining the Periodic
1025 	 * Synchronization.
1026 	 */
1027 	if (sync->timeout_reload > CONN_ESTAB_COUNTDOWN) {
1028 		uint16_t skip_max = sync->timeout_reload - CONN_ESTAB_COUNTDOWN;
1029 
1030 		if (sync->skip > skip_max) {
1031 			sync->skip = skip_max;
1032 		}
1033 	} else {
1034 		sync->skip = 0U;
1035 	}
1036 
1037 	sync->sync_expire = CONN_ESTAB_COUNTDOWN;
1038 
1039 	/* Extract the SCA value from the sca_chm field of the sync_info
1040 	 * structure.
1041 	 */
1042 	sca = (si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &
1043 	       PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK) >>
1044 	      PDU_SYNC_INFO_SCA_CHM_SCA_BIT_POS;
1045 
1046 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1047 	lll->sca = sca;
1048 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1049 
1050 	lll->window_widening_periodic_us =
1051 		DIV_ROUND_UP(((lll_clock_ppm_local_get() +
1052 				   lll_clock_ppm_get(sca)) *
1053 				  interval_us), USEC_PER_SEC);
1054 	lll->window_widening_max_us = (interval_us >> 1) - EVENT_IFS_US;
1055 	if (PDU_ADV_SYNC_INFO_OFFS_UNITS_GET(si)) {
1056 		lll->window_size_event_us = OFFS_UNIT_300_US;
1057 	} else {
1058 		lll->window_size_event_us = OFFS_UNIT_30_US;
1059 	}
1060 
1061 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1062 	lll->node_cte_incomplete = NULL;
1063 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1064 
1065 	/* Set the state to sync create */
1066 	scan->periodic.state = LL_SYNC_STATE_CREATED;
1067 	scan->periodic.param = NULL;
1068 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
1069 		struct ll_scan_set *scan_1m;
1070 
1071 		scan_1m = ull_scan_set_get(SCAN_HANDLE_1M);
1072 		if (scan == scan_1m) {
1073 			struct ll_scan_set *scan_coded;
1074 
1075 			scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
1076 			scan_coded->periodic.state = LL_SYNC_STATE_CREATED;
1077 			scan_coded->periodic.param = NULL;
1078 		} else {
1079 			scan_1m->periodic.state = LL_SYNC_STATE_CREATED;
1080 			scan_1m->periodic.param = NULL;
1081 		}
1082 	}
1083 
1084 	sync_handle = ull_sync_handle_get(sync);
1085 
1086 	/* Prepare sync notification, dispatch only on successful AUX_SYNC_IND
1087 	 * reception.
1088 	 */
1089 	rx = (void *)sync->node_rx_sync_estab;
1090 	rx->hdr.type = NODE_RX_TYPE_SYNC;
1091 	rx->hdr.handle = sync_handle;
1092 	rx->rx_ftr.param = sync;
1093 	se = (void *)rx->pdu;
1094 	se->interval = interval;
1095 	se->phy = lll->phy;
1096 	se->sca = sca;
1097 
1098 	/* Calculate offset and schedule sync radio events */
1099 	ftr = &node_rx->rx_ftr;
1100 	pdu = (void *)((struct node_rx_pdu *)node_rx)->pdu;
1101 
1102 	ready_delay_us = lll_radio_rx_ready_delay_get(lll->phy, PHY_FLAGS_S8);
1103 
1104 	sync_offset_us = ftr->radio_end_us;
1105 	sync_offset_us += PDU_ADV_SYNC_INFO_OFFSET_GET(si) *
1106 			  lll->window_size_event_us;
1107 	/* offs_adjust may be 1 only if sync setup by LL_PERIODIC_SYNC_IND */
1108 	sync_offset_us += (PDU_ADV_SYNC_INFO_OFFS_ADJUST_GET(si) ? OFFS_ADJUST_US : 0U);
1109 	sync_offset_us -= PDU_AC_US(pdu->len, lll->phy, ftr->phy_flags);
1110 	sync_offset_us -= EVENT_TICKER_RES_MARGIN_US;
1111 	sync_offset_us -= EVENT_JITTER_US;
1112 	sync_offset_us -= ready_delay_us;
1113 
1114 	/* Minimum prepare tick offset + minimum preempt tick offset are the
1115 	 * overheads before ULL scheduling can setup radio for reception
1116 	 */
1117 	overhead_us = HAL_TICKER_TICKS_TO_US(HAL_TICKER_CNTR_CMP_OFFSET_MIN << 1);
1118 
1119 	/* CPU execution overhead to setup the radio for reception */
1120 	overhead_us += EVENT_OVERHEAD_END_US + EVENT_OVERHEAD_START_US;
1121 
1122 	/* If not sufficient CPU processing time, skip to receiving next
1123 	 * event.
1124 	 */
1125 	if ((sync_offset_us - ftr->radio_end_us) < overhead_us) {
1126 		sync_offset_us += interval_us;
1127 		lll->event_counter++;
1128 	}
1129 
1130 	interval_us -= lll->window_widening_periodic_us;
1131 
1132 	/* Calculate event time reservation */
1133 	slot_us = PDU_AC_MAX_US(PDU_AC_EXT_PAYLOAD_RX_SIZE, lll->phy);
1134 	slot_us += ready_delay_us;
1135 
1136 	/* Add implementation defined radio event overheads */
1137 	if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX)) {
1138 		slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
1139 	}
1140 
1141 	/* TODO: active_to_start feature port */
1142 	sync->ull.ticks_active_to_start = 0U;
1143 	sync->ull.ticks_prepare_to_start =
1144 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
1145 	sync->ull.ticks_preempt_to_start =
1146 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
1147 	sync->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
1148 
1149 	ticks_slot_offset = MAX(sync->ull.ticks_active_to_start,
1150 				sync->ull.ticks_prepare_to_start);
1151 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
1152 		ticks_slot_overhead = ticks_slot_offset;
1153 	} else {
1154 		ticks_slot_overhead = 0U;
1155 	}
1156 	ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
1157 
1158 	sync->lll_sync_prepare = lll_sync_create_prepare;
1159 
1160 	ret = ticker_start(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
1161 			   (TICKER_ID_SCAN_SYNC_BASE + sync_handle),
1162 			   ftr->ticks_anchor - ticks_slot_offset,
1163 			   HAL_TICKER_US_TO_TICKS(sync_offset_us),
1164 			   HAL_TICKER_US_TO_TICKS(interval_us),
1165 			   HAL_TICKER_REMAINDER(interval_us),
1166 			   TICKER_NULL_LAZY,
1167 			   (sync->ull.ticks_slot + ticks_slot_overhead),
1168 			   ticker_cb, sync,
1169 			   ticker_start_op_cb, (void *)__LINE__);
1170 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1171 		  (ret == TICKER_STATUS_BUSY));
1172 }
1173 
ull_sync_setup_reset(struct ll_sync_set * sync)1174 void ull_sync_setup_reset(struct ll_sync_set *sync)
1175 {
1176 	struct ll_scan_set *scan;
1177 
1178 	/* Remove the sync context from being associated with scan contexts */
1179 	scan = ull_scan_set_get(SCAN_HANDLE_1M);
1180 
1181 	scan->periodic.sync = NULL;
1182 
1183 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
1184 	scan->lll.is_sync = 0U;
1185 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
1186 
1187 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
1188 		scan = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
1189 
1190 		scan->periodic.sync = NULL;
1191 
1192 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
1193 		scan->lll.is_sync = 0U;
1194 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
1195 	}
1196 }
1197 
ull_sync_established_report(memq_link_t * link,struct node_rx_pdu * rx)1198 void ull_sync_established_report(memq_link_t *link, struct node_rx_pdu *rx)
1199 {
1200 	struct node_rx_pdu *rx_establ;
1201 	struct ll_sync_set *sync;
1202 	struct node_rx_ftr *ftr;
1203 	struct node_rx_sync *se;
1204 	struct lll_sync *lll;
1205 
1206 	ftr = &rx->rx_ftr;
1207 	lll = ftr->param;
1208 	sync = HDR_LLL2ULL(lll);
1209 
1210 	/* Do nothing if sync is cancelled or lost. */
1211 	if (unlikely(sync->is_stop || !sync->timeout_reload)) {
1212 		return;
1213 	}
1214 
1215 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1216 	enum sync_status sync_status;
1217 
1218 #if defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1219 	sync_status = ftr->sync_status;
1220 #else
1221 	struct pdu_cte_info *rx_cte_info;
1222 
1223 	rx_cte_info = pdu_cte_info_get((struct pdu_adv *)rx->pdu);
1224 	if (rx_cte_info != NULL) {
1225 		sync_status = lll_sync_cte_is_allowed(lll->cte_type, lll->filter_policy,
1226 						      rx_cte_info->time, rx_cte_info->type);
1227 	} else {
1228 		sync_status = lll_sync_cte_is_allowed(lll->cte_type, lll->filter_policy, 0,
1229 						      BT_HCI_LE_NO_CTE);
1230 	}
1231 
1232 	/* If there is no CTEInline support, notify done event handler to terminate periodic
1233 	 * advertising sync in case the CTE is not allowed.
1234 	 * If the periodic filtering list is not used then terminate synchronization and notify
1235 	 * host. If the periodic filtering list is used then stop synchronization with this
1236 	 * particular periodic advertised but continue to search for other one.
1237 	 */
1238 	sync->is_term = ((sync_status == SYNC_STAT_TERM) || (sync_status == SYNC_STAT_CONT_SCAN));
1239 #endif /* CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1240 
1241 	/* Send periodic advertisement sync established report when sync has correct CTE type
1242 	 * or the CTE type is incorrect and filter policy doesn't allow to continue scanning.
1243 	 */
1244 	if (sync_status == SYNC_STAT_ALLOWED || sync_status == SYNC_STAT_TERM) {
1245 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1246 
1247 	if (1) {
1248 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1249 
1250 		/* Prepare and dispatch sync notification */
1251 		rx_establ = (void *)sync->node_rx_sync_estab;
1252 		rx_establ->hdr.handle = ull_sync_handle_get(sync);
1253 		se = (void *)rx_establ->pdu;
1254 		/* Clear the node to mark the sync establish as being completed.
1255 		 * In this case the completion reason is sync being established.
1256 		 */
1257 		sync->node_rx_sync_estab = NULL;
1258 
1259 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1260 		se->status = (ftr->sync_status == SYNC_STAT_TERM) ?
1261 					   BT_HCI_ERR_UNSUPP_REMOTE_FEATURE :
1262 					   BT_HCI_ERR_SUCCESS;
1263 #else
1264 		se->status = BT_HCI_ERR_SUCCESS;
1265 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1266 
1267 		/* NOTE: footer param has already been populated during sync
1268 		 * setup.
1269 		 */
1270 
1271 		ll_rx_put_sched(rx_establ->hdr.link, rx_establ);
1272 	}
1273 
1274 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1275 	/* Handle periodic advertising PDU and send periodic advertising scan report when
1276 	 * the sync was found or was established in the past. The report is not send if
1277 	 * scanning is terminated due to wrong CTE type.
1278 	 */
1279 	if (sync_status == SYNC_STAT_ALLOWED || sync_status == SYNC_STAT_READY) {
1280 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1281 
1282 	if (1) {
1283 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1284 
1285 		/* Switch sync event prepare function to one responsible for regular PDUs receive */
1286 		sync->lll_sync_prepare = lll_sync_prepare;
1287 
1288 		/* Change node type to appropriately handle periodic
1289 		 * advertising PDU report.
1290 		 */
1291 		rx->hdr.type = NODE_RX_TYPE_SYNC_REPORT;
1292 		ull_scan_aux_setup(link, rx);
1293 	} else {
1294 		rx->hdr.type = NODE_RX_TYPE_RELEASE;
1295 		ll_rx_put_sched(link, rx);
1296 	}
1297 }
1298 
1299 void ull_sync_done(struct node_rx_event_done *done)
1300 {
1301 	struct ll_sync_set *sync;
1302 
1303 	/* Get reference to ULL context */
1304 	sync = CONTAINER_OF(done->param, struct ll_sync_set, ull);
1305 
1306 	/* Do nothing if local terminate requested or sync lost */
1307 	if (unlikely(sync->is_stop || !sync->timeout_reload)) {
1308 		return;
1309 	}
1310 
1311 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1312 #if defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1313 	if (done->extra.sync_term) {
1314 #else
1315 	if (sync->is_term) {
1316 #endif /* CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1317 		/* In case the periodic advertising list filtering is not used the synchronization
1318 		 * must be terminated and host notification must be send.
1319 		 * In case the periodic advertising list filtering is used the synchronization with
1320 		 * this particular periodic advertiser but search for other one from the list.
1321 		 *
1322 		 * Stop periodic advertising sync ticker and clear variables informing the
1323 		 * sync is pending. That is a step to completely terminate the synchronization.
1324 		 * In case search for another periodic advertiser it allows to setup new ticker for
1325 		 * that.
1326 		 */
1327 		sync_ticker_cleanup(sync, NULL);
1328 	} else
1329 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1330 	{
1331 		uint32_t ticks_drift_minus;
1332 		uint32_t ticks_drift_plus;
1333 		uint16_t elapsed_event;
1334 		struct lll_sync *lll;
1335 		uint16_t skip_event;
1336 		uint8_t force_lll;
1337 		uint16_t lazy;
1338 		uint8_t force;
1339 
1340 		lll = &sync->lll;
1341 
1342 		/* Events elapsed used in timeout checks below */
1343 		skip_event = lll->skip_event;
1344 
1345 		/* Sync drift compensation and new skip calculation */
1346 		ticks_drift_plus = 0U;
1347 		ticks_drift_minus = 0U;
1348 		if (done->extra.trx_cnt) {
1349 			/* Calculate drift in ticks unit */
1350 			ull_drift_ticks_get(done, &ticks_drift_plus, &ticks_drift_minus);
1351 
1352 			/* Enforce skip */
1353 			lll->skip_event = sync->skip;
1354 
1355 			/* Reset failed to establish sync countdown */
1356 			sync->sync_expire = 0U;
1357 		}
1358 
1359 		elapsed_event = skip_event + lll->lazy_prepare + 1U;
1360 
1361 		/* Reset supervision countdown */
1362 		if (done->extra.crc_valid) {
1363 			sync->timeout_expire = 0U;
1364 		}
1365 
1366 		/* check sync failed to establish */
1367 		else if (sync->sync_expire) {
1368 			if (sync->sync_expire > elapsed_event) {
1369 				sync->sync_expire -= elapsed_event;
1370 			} else {
1371 				sync_ticker_cleanup(sync, ticker_stop_sync_expire_op_cb);
1372 
1373 				return;
1374 			}
1375 		}
1376 
1377 		/* If anchor point not sync-ed, start timeout countdown, and break skip if any */
1378 		else if (!sync->timeout_expire) {
1379 			sync->timeout_expire = sync->timeout_reload;
1380 		}
1381 
1382 		/* check timeout */
1383 		force = 0U;
1384 		force_lll = 0U;
1385 		if (sync->timeout_expire) {
1386 			if (sync->timeout_expire > elapsed_event) {
1387 				sync->timeout_expire -= elapsed_event;
1388 
1389 				/* break skip */
1390 				lll->skip_event = 0U;
1391 
1392 				if (sync->timeout_expire <= 6U) {
1393 					force_lll = 1U;
1394 
1395 					force = 1U;
1396 				} else if (skip_event) {
1397 					force = 1U;
1398 				}
1399 			} else {
1400 				sync_ticker_cleanup(sync, ticker_stop_sync_lost_op_cb);
1401 
1402 				return;
1403 			}
1404 		}
1405 
1406 		lll->forced = force_lll;
1407 
1408 		/* Check if skip needs update */
1409 		lazy = 0U;
1410 		if ((force) || (skip_event != lll->skip_event)) {
1411 			lazy = lll->skip_event + 1U;
1412 		}
1413 
1414 		/* Update Sync ticker instance */
1415 		if (ticks_drift_plus || ticks_drift_minus || lazy || force) {
1416 			uint16_t sync_handle = ull_sync_handle_get(sync);
1417 			uint32_t ticker_status;
1418 
1419 			/* Call to ticker_update can fail under the race
1420 			 * condition where in the periodic sync role is being
1421 			 * stopped but at the same time it is preempted by
1422 			 * periodic sync event that gets into close state.
1423 			 * Accept failure when periodic sync role is being
1424 			 * stopped.
1425 			 */
1426 			ticker_status =
1427 				ticker_update(TICKER_INSTANCE_ID_CTLR,
1428 					      TICKER_USER_ID_ULL_HIGH,
1429 					      (TICKER_ID_SCAN_SYNC_BASE +
1430 					       sync_handle),
1431 					      ticks_drift_plus,
1432 					      ticks_drift_minus, 0, 0,
1433 					      lazy, force,
1434 					      ticker_update_op_cb, sync);
1435 			LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
1436 				  (ticker_status == TICKER_STATUS_BUSY) ||
1437 				  ((void *)sync == ull_disable_mark_get()));
1438 		}
1439 	}
1440 }
1441 
1442 void ull_sync_chm_update(uint8_t sync_handle, uint8_t *acad, uint8_t acad_len)
1443 {
1444 	struct pdu_adv_sync_chm_upd_ind *chm_upd_ind;
1445 	struct ll_sync_set *sync;
1446 	struct lll_sync *lll;
1447 	uint8_t chm_last;
1448 	uint16_t ad_len;
1449 
1450 	/* Get reference to LLL context */
1451 	sync = ull_sync_set_get(sync_handle);
1452 	LL_ASSERT(sync);
1453 	lll = &sync->lll;
1454 
1455 	/* Ignore if already in progress */
1456 	if (lll->chm_last != lll->chm_first) {
1457 		return;
1458 	}
1459 
1460 	/* Find the Channel Map Update Indication */
1461 	do {
1462 		/* Pick the length and find the Channel Map Update Indication */
1463 		ad_len = acad[PDU_ADV_DATA_HEADER_LEN_OFFSET];
1464 		if (ad_len &&
1465 		    (acad[PDU_ADV_DATA_HEADER_TYPE_OFFSET] ==
1466 		     PDU_ADV_DATA_TYPE_CHANNEL_MAP_UPDATE_IND)) {
1467 			break;
1468 		}
1469 
1470 		/* Add length field size */
1471 		ad_len += 1U;
1472 		if (ad_len < acad_len) {
1473 			acad_len -= ad_len;
1474 		} else {
1475 			return;
1476 		}
1477 
1478 		/* Move to next AD data */
1479 		acad += ad_len;
1480 	} while (acad_len);
1481 
1482 	/* Validate the size of the Channel Map Update Indication */
1483 	if (ad_len != (sizeof(*chm_upd_ind) + 1U)) {
1484 		return;
1485 	}
1486 
1487 	/* Pick the parameters into the procedure context */
1488 	chm_last = lll->chm_last + 1U;
1489 	if (chm_last == DOUBLE_BUFFER_SIZE) {
1490 		chm_last = 0U;
1491 	}
1492 
1493 	chm_upd_ind = (void *)&acad[PDU_ADV_DATA_HEADER_DATA_OFFSET];
1494 	(void)memcpy(lll->chm[chm_last].data_chan_map, chm_upd_ind->chm,
1495 		     sizeof(lll->chm[chm_last].data_chan_map));
1496 	lll->chm[chm_last].data_chan_count =
1497 		util_ones_count_get(lll->chm[chm_last].data_chan_map,
1498 				    sizeof(lll->chm[chm_last].data_chan_map));
1499 	if (lll->chm[chm_last].data_chan_count < CHM_USED_COUNT_MIN) {
1500 		/* Ignore channel map, invalid available channel count */
1501 		return;
1502 	}
1503 
1504 	lll->chm_instant = sys_le16_to_cpu(chm_upd_ind->instant);
1505 
1506 	/* Set Channel Map Update Procedure in progress */
1507 	lll->chm_last = chm_last;
1508 }
1509 
1510 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1511 /* @brief Function updates periodic sync slot duration.
1512  *
1513  * @param[in] sync              Pointer to sync instance
1514  * @param[in] slot_plus_us      Number of microsecond to add to ticker slot
1515  * @param[in] slot_minus_us     Number of microsecond to subtracks from ticker slot
1516  *
1517  * @retval 0            Successful ticker slot update.
1518  * @retval -ENOENT      Ticker node related with provided sync is already stopped.
1519  * @retval -ENOMEM      Couldn't enqueue update ticker job.
1520  * @retval -EFAULT      Somethin else went wrong.
1521  */
1522 int ull_sync_slot_update(struct ll_sync_set *sync, uint32_t slot_plus_us,
1523 			 uint32_t slot_minus_us)
1524 {
1525 	uint32_t volatile ret_cb;
1526 	uint32_t ret;
1527 
1528 	ret_cb = TICKER_STATUS_BUSY;
1529 	ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
1530 			    TICKER_USER_ID_THREAD,
1531 			    (TICKER_ID_SCAN_SYNC_BASE +
1532 			    ull_sync_handle_get(sync)),
1533 			    0, 0,
1534 			    HAL_TICKER_US_TO_TICKS(slot_plus_us),
1535 			    HAL_TICKER_US_TO_TICKS(slot_minus_us),
1536 			    0, 0,
1537 			    ticker_update_op_status_give,
1538 			    (void *)&ret_cb);
1539 	if (ret == TICKER_STATUS_BUSY || ret == TICKER_STATUS_SUCCESS) {
1540 		/* Wait for callback or clear semaphore is callback was already
1541 		 * executed.
1542 		 */
1543 		k_sem_take(&sem_ticker_cb, K_FOREVER);
1544 
1545 		if (ret_cb == TICKER_STATUS_FAILURE) {
1546 			return -EFAULT; /* Something went wrong */
1547 		} else {
1548 			return 0;
1549 		}
1550 	} else {
1551 		if (ret_cb != TICKER_STATUS_BUSY) {
1552 			/* Ticker callback was executed and job enqueue was successful.
1553 			 * Call k_sem_take to clear ticker callback semaphore.
1554 			 */
1555 			k_sem_take(&sem_ticker_cb, K_FOREVER);
1556 		}
1557 		/* Ticker was already stopped or job was not enqueued. */
1558 		return (ret_cb == TICKER_STATUS_FAILURE) ? -ENOENT : -ENOMEM;
1559 	}
1560 }
1561 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1562 
1563 static int init_reset(void)
1564 {
1565 	/* Initialize sync pool. */
1566 	mem_init(ll_sync_pool, sizeof(struct ll_sync_set),
1567 		 sizeof(ll_sync_pool) / sizeof(struct ll_sync_set),
1568 		 &sync_free);
1569 
1570 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1571 	k_sem_init(&sem_ticker_cb, 0, 1);
1572 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1573 
1574 	return 0;
1575 }
1576 
1577 static inline struct ll_sync_set *sync_acquire(void)
1578 {
1579 	return mem_acquire(&sync_free);
1580 }
1581 
1582 static struct ll_sync_set *ull_sync_create(uint8_t sid, uint16_t timeout, uint16_t skip,
1583 					   uint8_t cte_type, uint8_t rx_enable, uint8_t nodups)
1584 {
1585 	memq_link_t *link_sync_estab;
1586 	memq_link_t *link_sync_lost;
1587 	struct node_rx_pdu *node_rx;
1588 	struct lll_sync *lll;
1589 	struct ll_sync_set *sync;
1590 
1591 	link_sync_estab = ll_rx_link_alloc();
1592 	if (!link_sync_estab) {
1593 		return NULL;
1594 	}
1595 
1596 	link_sync_lost = ll_rx_link_alloc();
1597 	if (!link_sync_lost) {
1598 		ll_rx_link_release(link_sync_estab);
1599 
1600 		return NULL;
1601 	}
1602 
1603 	node_rx = ll_rx_alloc();
1604 	if (!node_rx) {
1605 		ll_rx_link_release(link_sync_lost);
1606 		ll_rx_link_release(link_sync_estab);
1607 
1608 		return NULL;
1609 	}
1610 
1611 	sync = sync_acquire();
1612 	if (!sync) {
1613 		ll_rx_release(node_rx);
1614 		ll_rx_link_release(link_sync_lost);
1615 		ll_rx_link_release(link_sync_estab);
1616 
1617 		return NULL;
1618 	}
1619 
1620 	sync->peer_addr_resolved = 0U;
1621 
1622 	/* Initialize sync context */
1623 	node_rx->hdr.link = link_sync_estab;
1624 	sync->node_rx_lost.rx.hdr.link = link_sync_lost;
1625 
1626 	/* Make sure that the node_rx_sync_establ hasn't got anything assigned. It is used to
1627 	 * mark when sync establishment is in progress.
1628 	 */
1629 	LL_ASSERT(!sync->node_rx_sync_estab);
1630 	sync->node_rx_sync_estab = node_rx;
1631 
1632 	/* Reporting initially enabled/disabled */
1633 	sync->rx_enable = rx_enable;
1634 
1635 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
1636 	sync->nodups = nodups;
1637 #endif
1638 	sync->skip = skip;
1639 	sync->is_stop = 0U;
1640 
1641 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1642 	sync->enc = 0U;
1643 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1644 
1645 	/* NOTE: Use timeout not zero to represent sync context used for sync
1646 	 * create.
1647 	 */
1648 	sync->timeout = timeout;
1649 
1650 	/* NOTE: Use timeout_reload not zero to represent sync established. */
1651 	sync->timeout_reload = 0U;
1652 	sync->timeout_expire = 0U;
1653 
1654 	/* Remember the SID */
1655 	sync->sid = sid;
1656 
1657 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1658 	/* Reset Broadcast Isochronous Group Sync Establishment */
1659 	sync->iso.sync_iso = NULL;
1660 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1661 
1662 	/* Initialize sync LLL context */
1663 	lll = &sync->lll;
1664 	lll->lll_aux = NULL;
1665 	lll->is_rx_enabled = sync->rx_enable;
1666 	lll->skip_prepare = 0U;
1667 	lll->skip_event = 0U;
1668 	lll->window_widening_prepare_us = 0U;
1669 	lll->window_widening_event_us = 0U;
1670 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1671 	lll->cte_type = cte_type;
1672 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1673 
1674 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1675 	ull_df_sync_cfg_init(&lll->df_cfg);
1676 	LL_ASSERT(!lll->node_cte_incomplete);
1677 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1678 
1679 	/* Initialise ULL and LLL headers */
1680 	ull_hdr_init(&sync->ull);
1681 	lll_hdr_init(lll, sync);
1682 
1683 	return sync;
1684 }
1685 
1686 static void sync_ticker_cleanup(struct ll_sync_set *sync, ticker_op_func stop_op_cb)
1687 {
1688 	uint16_t sync_handle = ull_sync_handle_get(sync);
1689 	uint32_t ret;
1690 
1691 	/* Stop Periodic Sync Ticker */
1692 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
1693 			  TICKER_ID_SCAN_SYNC_BASE + sync_handle, stop_op_cb, (void *)sync);
1694 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1695 		  (ret == TICKER_STATUS_BUSY));
1696 
1697 	/* Mark sync context not sync established */
1698 	sync->timeout_reload = 0U;
1699 }
1700 
1701 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
1702 		      uint32_t remainder, uint16_t lazy, uint8_t force,
1703 		      void *param)
1704 {
1705 	static memq_link_t link_lll_prepare;
1706 	static struct mayfly mfy_lll_prepare = {
1707 		0, 0, &link_lll_prepare, NULL, NULL};
1708 	static struct lll_prepare_param p;
1709 	struct ll_sync_set *sync = param;
1710 	struct lll_sync *lll;
1711 	uint32_t ret;
1712 	uint8_t ref;
1713 
1714 	DEBUG_RADIO_PREPARE_O(1);
1715 
1716 	lll = &sync->lll;
1717 
1718 	/* Commit receive enable changed value */
1719 	lll->is_rx_enabled = sync->rx_enable;
1720 
1721 	/* Increment prepare reference count */
1722 	ref = ull_ref_inc(&sync->ull);
1723 	LL_ASSERT(ref);
1724 
1725 	/* Append timing parameters */
1726 	p.ticks_at_expire = ticks_at_expire;
1727 	p.remainder = remainder;
1728 	p.lazy = lazy;
1729 	p.force = force;
1730 	p.param = lll;
1731 	mfy_lll_prepare.param = &p;
1732 	mfy_lll_prepare.fp = sync->lll_sync_prepare;
1733 
1734 	/* Kick LLL prepare */
1735 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0,
1736 			     &mfy_lll_prepare);
1737 	LL_ASSERT(!ret);
1738 
1739 	DEBUG_RADIO_PREPARE_O(1);
1740 }
1741 
1742 static void ticker_start_op_cb(uint32_t status, void *param)
1743 {
1744 	ARG_UNUSED(param);
1745 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1746 }
1747 
1748 static void ticker_update_op_cb(uint32_t status, void *param)
1749 {
1750 	LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
1751 		  param == ull_disable_mark_get());
1752 }
1753 
1754 static void ticker_stop_sync_expire_op_cb(uint32_t status, void *param)
1755 {
1756 	uint32_t retval;
1757 	static memq_link_t link;
1758 	static struct mayfly mfy = {0, 0, &link, NULL, sync_expire};
1759 
1760 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1761 
1762 	mfy.param = param;
1763 
1764 	retval = mayfly_enqueue(TICKER_USER_ID_ULL_LOW, TICKER_USER_ID_ULL_HIGH,
1765 				0, &mfy);
1766 	LL_ASSERT(!retval);
1767 }
1768 
1769 static void sync_expire(void *param)
1770 {
1771 	struct ll_sync_set *sync = param;
1772 	struct node_rx_sync *se;
1773 	struct node_rx_pdu *rx;
1774 
1775 	/* Generate Periodic advertising sync failed to establish */
1776 	rx = (void *)sync->node_rx_sync_estab;
1777 	rx->hdr.handle = LLL_HANDLE_INVALID;
1778 
1779 	/* Clear the node to mark the sync establish as being completed.
1780 	 * In this case the completion reason is sync expire.
1781 	 */
1782 	sync->node_rx_sync_estab = NULL;
1783 
1784 	/* NOTE: struct node_rx_sync_estab has uint8_t member following the
1785 	 *       struct node_rx_hdr to store the reason.
1786 	 */
1787 	se = (void *)rx->pdu;
1788 	se->status = BT_HCI_ERR_CONN_FAIL_TO_ESTAB;
1789 
1790 	/* NOTE: footer param has already been populated during sync setup */
1791 
1792 	/* Enqueue the sync failed to established towards ULL context */
1793 	ll_rx_put_sched(rx->hdr.link, rx);
1794 }
1795 
1796 static void ticker_stop_sync_lost_op_cb(uint32_t status, void *param)
1797 {
1798 	uint32_t retval;
1799 	static memq_link_t link;
1800 	static struct mayfly mfy = {0, 0, &link, NULL, sync_lost};
1801 
1802 	/* When in race between terminate requested in thread context and
1803 	 * sync lost scenario, do not generate the sync lost node rx from here
1804 	 */
1805 	if (status != TICKER_STATUS_SUCCESS) {
1806 		LL_ASSERT(param == ull_disable_mark_get());
1807 
1808 		return;
1809 	}
1810 
1811 	mfy.param = param;
1812 
1813 	retval = mayfly_enqueue(TICKER_USER_ID_ULL_LOW, TICKER_USER_ID_ULL_HIGH,
1814 				0, &mfy);
1815 	LL_ASSERT(!retval);
1816 }
1817 
1818 static void sync_lost(void *param)
1819 {
1820 	struct ll_sync_set *sync;
1821 	struct node_rx_pdu *rx;
1822 
1823 	/* sync established was not generated yet, no free node rx */
1824 	sync = param;
1825 	if (sync->lll_sync_prepare != lll_sync_prepare) {
1826 		sync_expire(param);
1827 
1828 		return;
1829 	}
1830 
1831 	/* Generate Periodic advertising sync lost */
1832 	rx = (void *)&sync->node_rx_lost;
1833 	rx->hdr.handle = ull_sync_handle_get(sync);
1834 	rx->hdr.type = NODE_RX_TYPE_SYNC_LOST;
1835 	rx->rx_ftr.param = sync;
1836 
1837 	/* Enqueue the sync lost towards ULL context */
1838 	ll_rx_put_sched(rx->hdr.link, rx);
1839 
1840 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1841 	if (sync->iso.sync_iso) {
1842 		/* ISO create BIG flag in the periodic advertising context is still set */
1843 		struct ll_sync_iso_set *sync_iso;
1844 
1845 		sync_iso = sync->iso.sync_iso;
1846 
1847 		rx = (void *)&sync_iso->node_rx_lost;
1848 		rx->hdr.handle = sync_iso->big_handle;
1849 		rx->hdr.type = NODE_RX_TYPE_SYNC_ISO;
1850 		rx->rx_ftr.param = sync_iso;
1851 		*((uint8_t *)rx->pdu) = BT_HCI_ERR_CONN_FAIL_TO_ESTAB;
1852 
1853 		/* Enqueue the sync iso lost towards ULL context */
1854 		ll_rx_put_sched(rx->hdr.link, rx);
1855 	}
1856 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1857 }
1858 
1859 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)
1860 static struct ll_sync_set *sync_is_create_get(uint16_t handle)
1861 {
1862 	struct ll_sync_set *sync;
1863 
1864 	sync = ull_sync_set_get(handle);
1865 	if (!sync || !sync->timeout) {
1866 		return NULL;
1867 	}
1868 
1869 	return sync;
1870 }
1871 
1872 static bool peer_sid_sync_exists(uint8_t const peer_id_addr_type,
1873 				 uint8_t const *const peer_id_addr,
1874 				 uint8_t sid)
1875 {
1876 	uint16_t handle;
1877 
1878 	for (handle = 0U; handle < CONFIG_BT_PER_ADV_SYNC_MAX; handle++) {
1879 		struct ll_sync_set *sync = sync_is_create_get(handle);
1880 
1881 		if (sync &&
1882 		    (sync->peer_id_addr_type == peer_id_addr_type) &&
1883 		    !memcmp(sync->peer_id_addr, peer_id_addr, BDADDR_SIZE) &&
1884 		    (sync->sid == sid)) {
1885 			return true;
1886 		}
1887 	}
1888 
1889 	return false;
1890 }
1891 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC */
1892 
1893 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1894 static void ticker_update_op_status_give(uint32_t status, void *param)
1895 {
1896 	*((uint32_t volatile *)param) = status;
1897 
1898 	k_sem_give(&sem_ticker_cb);
1899 }
1900 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1901 
1902 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
1903 	!defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1904 static struct pdu_cte_info *pdu_cte_info_get(struct pdu_adv *pdu)
1905 {
1906 	struct pdu_adv_com_ext_adv *com_hdr;
1907 	struct pdu_adv_ext_hdr *hdr;
1908 
1909 	com_hdr = &pdu->adv_ext_ind;
1910 	hdr = &com_hdr->ext_hdr;
1911 
1912 	if (!com_hdr->ext_hdr_len || (com_hdr->ext_hdr_len != 0 && !hdr->cte_info)) {
1913 		return NULL;
1914 	}
1915 
1916 	/* Make sure there are no fields that are not allowed for AUX_SYNC_IND and AUX_CHAIN_IND */
1917 	LL_ASSERT(!hdr->adv_addr);
1918 	LL_ASSERT(!hdr->tgt_addr);
1919 
1920 	return (struct pdu_cte_info *)hdr->data;
1921 }
1922 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && !CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1923 
1924 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
1925 void ull_sync_transfer_received(struct ll_conn *conn, uint16_t service_data,
1926 				struct pdu_adv_sync_info *si, uint16_t conn_event_count,
1927 				uint16_t last_pa_event_counter, uint8_t sid,
1928 				uint8_t addr_type, uint8_t sca, uint8_t phy,
1929 				uint8_t *adv_addr, uint16_t sync_conn_event_count,
1930 				uint8_t addr_resolved)
1931 {
1932 	struct ll_sync_set *sync;
1933 	uint16_t conn_evt_current;
1934 	uint8_t rx_enable;
1935 	uint8_t nodups;
1936 
1937 	if (conn->past.mode == BT_HCI_LE_PAST_MODE_NO_SYNC) {
1938 		/* Ignore LL_PERIODIC_SYNC_IND - see Bluetooth Core Specification v5.4
1939 		 * Vol 6, Part E, Section 7.8.91
1940 		 */
1941 		return;
1942 	}
1943 
1944 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)
1945 	/* Do not sync twice to the same peer and same SID */
1946 	if (peer_sid_sync_exists(addr_type, adv_addr, sid)) {
1947 		return;
1948 	}
1949 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC */
1950 
1951 	nodups = (conn->past.mode == BT_HCI_LE_PAST_MODE_SYNC_FILTER_DUPLICATES) ? 1U : 0U;
1952 	rx_enable = (conn->past.mode == BT_HCI_LE_PAST_MODE_NO_REPORTS) ? 0U : 1U;
1953 
1954 	sync = ull_sync_create(sid, conn->past.timeout, conn->past.skip, conn->past.cte_type,
1955 			       rx_enable, nodups);
1956 	if (!sync) {
1957 		return;
1958 	}
1959 
1960 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1961 	/* Reset filter policy in lll_sync */
1962 	sync->lll.filter_policy = 0U;
1963 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1964 
1965 	sync->peer_id_addr_type = addr_type;
1966 	sync->peer_addr_resolved = addr_resolved;
1967 	memcpy(sync->peer_id_addr, adv_addr, BDADDR_SIZE);
1968 	sync->lll.phy = phy;
1969 
1970 	conn_evt_current = ull_conn_event_counter(conn);
1971 
1972 	/* LLCP should have ensured this holds */
1973 	LL_ASSERT(sync_conn_event_count != conn_evt_current);
1974 
1975 	ull_sync_setup_from_sync_transfer(conn, service_data, sync, si,
1976 					  conn_event_count - conn_evt_current,
1977 					  last_pa_event_counter, sync_conn_event_count,
1978 					  sca);
1979 }
1980 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
1981