1 /*
2 * Copyright (c) 2020-2021 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <stdlib.h>
8 #include <zephyr/kernel.h>
9 #include <soc.h>
10 #include <zephyr/sys/byteorder.h>
11 #include <zephyr/bluetooth/hci_types.h>
12
13 #include "util/util.h"
14 #include "util/mem.h"
15 #include "util/memq.h"
16 #include "util/mayfly.h"
17 #include "util/dbuf.h"
18
19 #include "hal/cpu.h"
20 #include "hal/ccm.h"
21 #include "hal/radio.h"
22 #include "hal/ticker.h"
23
24 #include "ticker/ticker.h"
25
26 #include "pdu_df.h"
27 #include "lll/pdu_vendor.h"
28 #include "pdu.h"
29
30 #include "lll.h"
31 #include "lll/lll_adv_types.h"
32 #include "lll_adv.h"
33 #include "lll/lll_adv_pdu.h"
34 #include "lll_clock.h"
35 #include "lll/lll_vendor.h"
36 #include "lll_chan.h"
37 #include "lll_scan.h"
38 #include "lll/lll_df_types.h"
39 #include "lll_conn.h"
40 #include "lll_conn_iso.h"
41 #include "lll_sync.h"
42 #include "lll_sync_iso.h"
43
44 #include "isoal.h"
45
46 #include "ull_tx_queue.h"
47
48 #include "ull_filter.h"
49 #include "ull_iso_types.h"
50 #include "ull_scan_types.h"
51 #include "ull_sync_types.h"
52 #include "ull_conn_types.h"
53 #include "ull_adv_types.h"
54 #include "ull_conn_iso_types.h"
55
56 #include "ull_internal.h"
57 #include "ull_adv_internal.h"
58 #include "ull_scan_internal.h"
59 #include "ull_sync_internal.h"
60 #include "ull_conn_internal.h"
61 #include "ull_conn_iso_internal.h"
62 #include "ull_df_types.h"
63 #include "ull_df_internal.h"
64
65 #include "ull_llcp.h"
66 #include "ll.h"
67
68 #include <soc.h>
69 #include "hal/debug.h"
70
71 /* Check that timeout_reload member is at safe offset when ll_sync_set is
72 * allocated using mem interface. timeout_reload being non-zero is used to
73 * indicate that a sync is established. And is used to check for sync being
74 * terminated under race conditions between HCI Tx and Rx thread when
75 * Periodic Advertising Reports are generated.
76 */
77 MEM_FREE_MEMBER_ACCESS_BUILD_ASSERT(struct ll_sync_set, timeout_reload);
78
79 static struct ll_sync_set *ull_sync_create(uint8_t sid, uint16_t timeout, uint16_t skip,
80 uint8_t cte_type, uint8_t rx_enable, uint8_t nodups);
81 static int init_reset(void);
82 static inline struct ll_sync_set *sync_acquire(void);
83 static void sync_ticker_cleanup(struct ll_sync_set *sync, ticker_op_func stop_op_cb);
84 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
85 uint32_t remainder, uint16_t lazy, uint8_t force,
86 void *param);
87 static void ticker_start_op_cb(uint32_t status, void *param);
88 static void ticker_update_op_cb(uint32_t status, void *param);
89 static void ticker_stop_sync_expire_op_cb(uint32_t status, void *param);
90 static void sync_expire(void *param);
91 static void ticker_stop_sync_lost_op_cb(uint32_t status, void *param);
92 static void sync_lost(void *param);
93 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)
94 static bool peer_sid_sync_exists(uint8_t const peer_id_addr_type,
95 uint8_t const *const peer_id_addr,
96 uint8_t sid);
97 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC */
98 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
99 !defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
100 static struct pdu_cte_info *pdu_cte_info_get(struct pdu_adv *pdu);
101 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && !CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
102
103 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
104 static void ticker_update_op_status_give(uint32_t status, void *param);
105 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
106
107 static struct ll_sync_set ll_sync_pool[CONFIG_BT_PER_ADV_SYNC_MAX];
108 static void *sync_free;
109
110 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
111 /* Semaphore to wakeup thread on ticker API callback */
112 static struct k_sem sem_ticker_cb;
113 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
114
ll_sync_create(uint8_t options,uint8_t sid,uint8_t adv_addr_type,uint8_t * adv_addr,uint16_t skip,uint16_t sync_timeout,uint8_t sync_cte_type)115 uint8_t ll_sync_create(uint8_t options, uint8_t sid, uint8_t adv_addr_type,
116 uint8_t *adv_addr, uint16_t skip,
117 uint16_t sync_timeout, uint8_t sync_cte_type)
118 {
119 struct ll_scan_set *scan_coded;
120 struct ll_scan_set *scan;
121 struct ll_sync_set *sync;
122 uint8_t rx_enable;
123 uint8_t nodups;
124
125 scan = ull_scan_set_get(SCAN_HANDLE_1M);
126 if (!scan || scan->periodic.sync) {
127 return BT_HCI_ERR_CMD_DISALLOWED;
128 }
129
130 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
131 scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
132 if (!scan_coded || scan_coded->periodic.sync) {
133 return BT_HCI_ERR_CMD_DISALLOWED;
134 }
135 }
136
137 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)
138 /* Do not sync twice to the same peer and same SID */
139 if (((options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST) == 0U) &&
140 peer_sid_sync_exists(adv_addr_type, adv_addr, sid)) {
141 return BT_HCI_ERR_CONN_ALREADY_EXISTS;
142 }
143 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC */
144
145 rx_enable = !(options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_REPORTS_DISABLED);
146 nodups = (options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE) ? 1U : 0U;
147
148 sync = ull_sync_create(sid, sync_timeout, skip, sync_cte_type, rx_enable, nodups);
149 if (!sync) {
150 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
151 }
152
153 scan->periodic.cancelled = 0U;
154 scan->periodic.state = LL_SYNC_STATE_IDLE;
155 scan->periodic.filter_policy =
156 options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST;
157 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
158 scan_coded->periodic.cancelled = 0U;
159 scan_coded->periodic.state = LL_SYNC_STATE_IDLE;
160 scan_coded->periodic.filter_policy =
161 scan->periodic.filter_policy;
162 }
163
164 if (!scan->periodic.filter_policy) {
165 sync->peer_id_addr_type = adv_addr_type;
166 (void)memcpy(sync->peer_id_addr, adv_addr, BDADDR_SIZE);
167 }
168
169 /* Remember the peer address when periodic advertiser list is not
170 * used.
171 * NOTE: Peer address will be filled/overwritten with correct identity
172 * address on sync setup when privacy is enabled.
173 */
174 if ((options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST) == 0U) {
175 sync->peer_id_addr_type = adv_addr_type;
176 (void)memcpy(sync->peer_id_addr, adv_addr,
177 sizeof(sync->peer_id_addr));
178 }
179
180 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
181 /* Set filter policy in lll_sync */
182 sync->lll.filter_policy = scan->periodic.filter_policy;
183 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
184
185 /* Enable scanner to create sync */
186 scan->periodic.sync = sync;
187
188 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
189 scan->lll.is_sync = 1U;
190 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
191 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
192 scan_coded->periodic.sync = sync;
193
194 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
195 scan_coded->lll.is_sync = 1U;
196 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
197 }
198
199 return 0;
200 }
201
202 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
ull_sync_setup_from_sync_transfer(struct ll_conn * conn,uint16_t service_data,struct ll_sync_set * sync,struct pdu_adv_sync_info * si,int16_t conn_evt_offset,uint16_t last_pa_event_counter,uint16_t sync_conn_event_count,uint8_t sender_sca)203 void ull_sync_setup_from_sync_transfer(struct ll_conn *conn, uint16_t service_data,
204 struct ll_sync_set *sync, struct pdu_adv_sync_info *si,
205 int16_t conn_evt_offset, uint16_t last_pa_event_counter,
206 uint16_t sync_conn_event_count, uint8_t sender_sca)
207 {
208 struct node_rx_past_received *se_past;
209 uint32_t ticks_slot_overhead;
210 uint32_t ticks_slot_offset;
211 uint32_t conn_interval_us;
212 uint32_t sync_offset_us;
213 uint32_t ready_delay_us;
214 struct node_rx_pdu *rx;
215 uint8_t *data_chan_map;
216 struct lll_sync *lll;
217 uint32_t interval_us;
218 uint32_t slot_us;
219 uint32_t ticks_anchor;
220 uint8_t chm_last;
221 uint32_t ret;
222 uint16_t interval;
223 uint16_t sync_handle;
224 uint8_t sca;
225
226 lll = &sync->lll;
227
228 /* Copy channel map from sca_chm field in sync_info structure, and
229 * clear the SCA bits.
230 */
231 chm_last = lll->chm_first;
232 lll->chm_last = chm_last;
233 data_chan_map = lll->chm[chm_last].data_chan_map;
234 (void)memcpy(data_chan_map, si->sca_chm,
235 sizeof(lll->chm[chm_last].data_chan_map));
236 data_chan_map[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &=
237 ~PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK;
238 lll->chm[chm_last].data_chan_count =
239 util_ones_count_get(data_chan_map,
240 sizeof(lll->chm[chm_last].data_chan_map));
241 if (lll->chm[chm_last].data_chan_count < CHM_USED_COUNT_MIN) {
242 /* Ignore sync setup, invalid available channel count */
243 return;
244 }
245
246 memcpy(lll->access_addr, si->aa, sizeof(lll->access_addr));
247 lll->data_chan_id = lll_chan_id(lll->access_addr);
248 memcpy(lll->crc_init, si->crc_init, sizeof(lll->crc_init));
249 lll->event_counter = sys_le16_to_cpu(si->evt_cntr);
250
251 interval = sys_le16_to_cpu(si->interval);
252 interval_us = interval * PERIODIC_INT_UNIT_US;
253
254 /* Convert fromm 10ms units to interval units */
255 if (sync->timeout != 0 && interval_us != 0) {
256 sync->timeout_reload = RADIO_SYNC_EVENTS((sync->timeout * 10U *
257 USEC_PER_MSEC), interval_us);
258 }
259
260 /* Adjust Skip value so that there is minimum of 6 events that can be
261 * listened to before Sync_Timeout occurs.
262 * The adjustment of the skip value is controller implementation
263 * specific and not specified by the Bluetooth Core Specification v5.3.
264 * The Controller `may` use the Skip value, and the implementation here
265 * covers a case where Skip value could lead to less events being
266 * listened to until Sync_Timeout. Listening to more consecutive events
267 * before Sync_Timeout increases probability of retaining the Periodic
268 * Synchronization.
269 */
270 if (sync->timeout_reload > CONN_ESTAB_COUNTDOWN) {
271 uint16_t skip_max = sync->timeout_reload - CONN_ESTAB_COUNTDOWN;
272
273 if (sync->skip > skip_max) {
274 sync->skip = skip_max;
275 }
276 }
277
278 sync->sync_expire = CONN_ESTAB_COUNTDOWN;
279
280 /* Extract the SCA value from the sca_chm field of the sync_info
281 * structure.
282 */
283 sca = (si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &
284 PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK) >>
285 PDU_SYNC_INFO_SCA_CHM_SCA_BIT_POS;
286
287 lll->sca = sca;
288
289 lll->window_widening_periodic_us =
290 DIV_ROUND_UP(((lll_clock_ppm_local_get() +
291 lll_clock_ppm_get(sca)) *
292 interval_us), USEC_PER_SEC);
293 lll->window_widening_max_us = (interval_us >> 1) - EVENT_IFS_US;
294 if (PDU_ADV_SYNC_INFO_OFFS_UNITS_GET(si)) {
295 lll->window_size_event_us = OFFS_UNIT_300_US;
296 } else {
297 lll->window_size_event_us = OFFS_UNIT_30_US;
298 }
299
300 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
301 lll->node_cte_incomplete = NULL;
302 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
303
304 /* Prepare Periodic Advertising Sync Transfer Received event (dispatched later) */
305 sync_handle = ull_sync_handle_get(sync);
306 rx = (void *)sync->node_rx_sync_estab;
307 rx->hdr.type = NODE_RX_TYPE_SYNC_TRANSFER_RECEIVED;
308 rx->hdr.handle = sync_handle;
309 rx->rx_ftr.param = sync;
310
311 /* Create node_rx and assign values */
312 se_past = (void *)rx->pdu;
313 se_past->rx_sync.status = BT_HCI_ERR_SUCCESS;
314 se_past->rx_sync.interval = interval;
315 se_past->rx_sync.phy = sync->lll.phy;
316 se_past->rx_sync.sca = sca;
317 se_past->conn_handle = ll_conn_handle_get(conn);
318 se_past->service_data = service_data;
319
320 conn_interval_us = conn->lll.interval * CONN_INT_UNIT_US;
321
322 /* Calculate offset and schedule sync radio events */
323 ready_delay_us = lll_radio_rx_ready_delay_get(lll->phy, PHY_FLAGS_S8);
324
325 sync_offset_us = PDU_ADV_SYNC_INFO_OFFSET_GET(si) * lll->window_size_event_us;
326 /* offs_adjust may be 1 only if sync setup by LL_PERIODIC_SYNC_IND */
327 sync_offset_us += (PDU_ADV_SYNC_INFO_OFFS_ADJUST_GET(si) ? OFFS_ADJUST_US : 0U);
328 sync_offset_us -= EVENT_TICKER_RES_MARGIN_US;
329 sync_offset_us -= EVENT_JITTER_US;
330 sync_offset_us -= ready_delay_us;
331
332 if (conn_evt_offset) {
333 int64_t conn_offset_us = (int64_t)conn_evt_offset * conn_interval_us;
334
335 if ((int64_t)sync_offset_us + conn_offset_us < 0) {
336 uint32_t total_offset_us = abs((int64_t)sync_offset_us + conn_offset_us);
337 uint32_t sync_intervals = DIV_ROUND_UP(total_offset_us, interval_us);
338
339 lll->event_counter += sync_intervals;
340 sync_offset_us = (sync_intervals * interval_us) - total_offset_us;
341 } else {
342 sync_offset_us += conn_offset_us;
343 }
344 }
345
346 /* Calculate initial window widening - see Core Spec vol 6, part B, 5.1.13.1 */
347 {
348 uint16_t event_delta;
349 uint32_t drift_us;
350 uint64_t da;
351 uint64_t db;
352 uint64_t d;
353
354 const uint32_t local_sca_ppm = lll_clock_ppm_local_get();
355
356 event_delta = lll->event_counter - last_pa_event_counter;
357
358 da = (uint64_t)(local_sca_ppm + lll_clock_ppm_get(sca)) * interval_us;
359 da = DIV_ROUND_UP(da * (uint64_t)event_delta, USEC_PER_SEC);
360
361 db = (uint64_t)(local_sca_ppm + lll_clock_ppm_get(sender_sca)) * conn_interval_us;
362 db = DIV_ROUND_UP(db * (uint64_t)(ull_conn_event_counter(conn) -
363 sync_conn_event_count), USEC_PER_SEC);
364
365 d = DIV_ROUND_UP((da + db) * (USEC_PER_SEC + local_sca_ppm +
366 lll_clock_ppm_get(sca) +
367 lll_clock_ppm_get(sender_sca)), USEC_PER_SEC);
368
369 /* Limit drift compenstion to the maximum window widening */
370 drift_us = MIN((uint32_t)d, lll->window_widening_max_us);
371
372 /* Apply total drift to initial window size */
373 lll->window_size_event_us += drift_us;
374
375 /* Adjust offset if less than the drift compensation */
376 while (sync_offset_us < drift_us) {
377 sync_offset_us += interval_us;
378 lll->event_counter++;
379 }
380
381 sync_offset_us -= drift_us;
382 }
383
384 interval_us -= lll->window_widening_periodic_us;
385
386 /* Calculate event time reservation */
387 slot_us = PDU_AC_MAX_US(PDU_AC_EXT_PAYLOAD_RX_SIZE, lll->phy);
388 slot_us += ready_delay_us;
389
390 /* Add implementation defined radio event overheads */
391 if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX)) {
392 slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
393 }
394
395 /* TODO: active_to_start feature port */
396 sync->ull.ticks_active_to_start = 0U;
397 sync->ull.ticks_prepare_to_start =
398 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
399 sync->ull.ticks_preempt_to_start =
400 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
401 sync->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
402
403 ticks_slot_offset = MAX(sync->ull.ticks_active_to_start,
404 sync->ull.ticks_prepare_to_start);
405 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
406 ticks_slot_overhead = ticks_slot_offset;
407 } else {
408 ticks_slot_overhead = 0U;
409 }
410 ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
411
412 sync->lll_sync_prepare = lll_sync_create_prepare;
413
414 ticks_anchor = conn->llcp.prep.ticks_at_expire;
415
416 #if defined(CONFIG_BT_PERIPHERAL)
417 if (conn->lll.role == BT_HCI_ROLE_PERIPHERAL) {
418 /* Compensate for window widening */
419 ticks_anchor += HAL_TICKER_US_TO_TICKS(conn->lll.periph.window_widening_event_us);
420 }
421 #endif /* CONFIG_BT_PERIPHERAL */
422
423 ret = ticker_start(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
424 (TICKER_ID_SCAN_SYNC_BASE + sync_handle),
425 ticks_anchor,
426 HAL_TICKER_US_TO_TICKS(sync_offset_us),
427 HAL_TICKER_US_TO_TICKS(interval_us),
428 HAL_TICKER_REMAINDER(interval_us),
429 TICKER_NULL_LAZY,
430 (sync->ull.ticks_slot + ticks_slot_overhead),
431 ticker_cb, sync,
432 ticker_start_op_cb, (void *)__LINE__);
433 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
434 (ret == TICKER_STATUS_BUSY));
435 }
436 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
437
438
ll_sync_create_cancel(void ** rx)439 uint8_t ll_sync_create_cancel(void **rx)
440 {
441 struct ll_scan_set *scan_coded;
442 memq_link_t *link_sync_estab;
443 memq_link_t *link_sync_lost;
444 struct node_rx_pdu *node_rx;
445 struct ll_scan_set *scan;
446 struct ll_sync_set *sync;
447 struct node_rx_sync *se;
448
449 scan = ull_scan_set_get(SCAN_HANDLE_1M);
450 if (!scan || !scan->periodic.sync) {
451 return BT_HCI_ERR_CMD_DISALLOWED;
452 }
453
454 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
455 scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
456 if (!scan_coded || !scan_coded->periodic.sync) {
457 return BT_HCI_ERR_CMD_DISALLOWED;
458 }
459 }
460
461 /* Check for race condition where in sync is established when sync
462 * create cancel is invoked.
463 *
464 * Setting `scan->periodic.cancelled` to represent cancellation
465 * requested in the thread context. Checking `scan->periodic.sync` for
466 * NULL confirms if synchronization was established before
467 * `scan->periodic.cancelled` was set to 1U.
468 */
469 scan->periodic.cancelled = 1U;
470 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
471 scan_coded->periodic.cancelled = 1U;
472 }
473 cpu_dmb();
474 sync = scan->periodic.sync;
475 if (!sync) {
476 return BT_HCI_ERR_CMD_DISALLOWED;
477 }
478
479 /* node_rx_sync_estab is assigned when Host calls create sync and cleared when sync is
480 * established. timeout_reload is set when sync is found and setup. It is non-zero until
481 * sync is terminated. Together they give information about current sync state:
482 * - node_rx_sync_estab == NULL && timeout_reload != 0 => sync is established
483 * - node_rx_sync_estab == NULL && timeout_reload == 0 => sync is terminated
484 * - node_rx_sync_estab != NULL && timeout_reload == 0 => sync is created
485 * - node_rx_sync_estab != NULL && timeout_reload != 0 => sync is waiting to be established
486 */
487 if (!sync->node_rx_sync_estab) {
488 /* There is no sync to be cancelled */
489 return BT_HCI_ERR_CMD_DISALLOWED;
490 }
491
492 sync->is_stop = 1U;
493 cpu_dmb();
494
495 if (sync->timeout_reload != 0U) {
496 uint16_t sync_handle = ull_sync_handle_get(sync);
497
498 LL_ASSERT(sync_handle <= UINT8_MAX);
499
500 /* Sync is not established yet, so stop sync ticker */
501 const int err =
502 ull_ticker_stop_with_mark((TICKER_ID_SCAN_SYNC_BASE +
503 (uint8_t)sync_handle),
504 sync, &sync->lll);
505 if (err != 0 && err != -EALREADY) {
506 return BT_HCI_ERR_CMD_DISALLOWED;
507 }
508 } /* else: sync was created but not yet setup, there is no sync ticker yet. */
509
510 /* It is safe to remove association with scanner as cancelled flag is
511 * set, sync is_stop flag was set and sync has not been established.
512 */
513 ull_sync_setup_reset(sync);
514
515 /* Mark the sync context as sync create cancelled */
516 if (IS_ENABLED(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)) {
517 sync->timeout = 0U;
518 }
519
520 node_rx = sync->node_rx_sync_estab;
521 link_sync_estab = node_rx->hdr.link;
522 link_sync_lost = sync->node_rx_lost.rx.hdr.link;
523
524 ll_rx_link_release(link_sync_lost);
525 ll_rx_link_release(link_sync_estab);
526 ll_rx_release(node_rx);
527
528 /* Clear the node after release to mark the sync establish as being completed.
529 * In this case the completion reason is sync cancelled by Host.
530 */
531 sync->node_rx_sync_estab = NULL;
532
533 node_rx = (void *)&sync->node_rx_lost;
534 node_rx->hdr.type = NODE_RX_TYPE_SYNC;
535 node_rx->hdr.handle = LLL_HANDLE_INVALID;
536
537 /* NOTE: struct node_rx_lost has uint8_t member following the
538 * struct node_rx_hdr to store the reason.
539 */
540 se = (void *)node_rx->pdu;
541 se->status = BT_HCI_ERR_OP_CANCELLED_BY_HOST;
542
543 /* NOTE: Since NODE_RX_TYPE_SYNC is only generated from ULL context,
544 * pass ULL sync context as parameter.
545 */
546 node_rx->rx_ftr.param = sync;
547
548 *rx = node_rx;
549
550 return 0;
551 }
552
ll_sync_terminate(uint16_t handle)553 uint8_t ll_sync_terminate(uint16_t handle)
554 {
555 struct lll_scan_aux *lll_aux;
556 memq_link_t *link_sync_lost;
557 struct ll_sync_set *sync;
558 int err;
559
560 sync = ull_sync_is_enabled_get(handle);
561 if (!sync) {
562 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
563 }
564
565 /* Request terminate, no new ULL scheduling to be setup */
566 sync->is_stop = 1U;
567 cpu_dmb();
568
569 /* Stop periodic sync ticker timeouts */
570 err = ull_ticker_stop_with_mark(TICKER_ID_SCAN_SYNC_BASE + handle,
571 sync, &sync->lll);
572 LL_ASSERT_INFO2(err == 0 || err == -EALREADY, handle, err);
573 if (err) {
574 return BT_HCI_ERR_CMD_DISALLOWED;
575 }
576
577 /* Check and stop any auxiliary PDU receptions */
578 lll_aux = sync->lll.lll_aux;
579 if (lll_aux) {
580 #if defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
581 err = ull_scan_aux_stop(&sync->lll);
582 #else /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
583 struct ll_scan_aux_set *aux;
584
585 aux = HDR_LLL2ULL(lll_aux);
586 err = ull_scan_aux_stop(aux);
587 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
588 if (err && (err != -EALREADY)) {
589 return BT_HCI_ERR_CMD_DISALLOWED;
590 }
591
592 #if !defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
593 LL_ASSERT(!aux->parent);
594 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
595 }
596
597 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
598 /* Clean up node_rx_sync_estab if still present */
599 if (sync->node_rx_sync_estab) {
600 memq_link_t *link_sync_estab;
601 struct node_rx_pdu *node_rx;
602
603 node_rx = (void *)sync->node_rx_sync_estab;
604 link_sync_estab = node_rx->hdr.link;
605
606 ll_rx_link_release(link_sync_estab);
607 ll_rx_release(node_rx);
608
609 sync->node_rx_sync_estab = NULL;
610 }
611 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
612
613 link_sync_lost = sync->node_rx_lost.rx.hdr.link;
614 ll_rx_link_release(link_sync_lost);
615
616 /* Mark sync context not sync established */
617 sync->timeout_reload = 0U;
618
619 ull_sync_release(sync);
620
621 return 0;
622 }
623
624 /* @brief Link Layer interface function corresponding to HCI LE Set Periodic
625 * Advertising Receive Enable command.
626 *
627 * @param[in] handle Sync_Handle identifying the periodic advertising
628 * train. Range: 0x0000 to 0x0EFF.
629 * @param[in] enable Bit number 0 - Reporting Enabled.
630 * Bit number 1 - Duplicate filtering enabled.
631 * All other bits - Reserved for future use.
632 *
633 * @return HCI error codes as documented in Bluetooth Core Specification v5.3.
634 */
ll_sync_recv_enable(uint16_t handle,uint8_t enable)635 uint8_t ll_sync_recv_enable(uint16_t handle, uint8_t enable)
636 {
637 struct ll_sync_set *sync;
638
639 sync = ull_sync_is_enabled_get(handle);
640 if (!sync) {
641 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
642 }
643
644 /* Reporting enabled/disabled */
645 sync->rx_enable = (enable & BT_HCI_LE_SET_PER_ADV_RECV_ENABLE_ENABLE) ?
646 1U : 0U;
647
648 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
649 sync->nodups = (enable & BT_HCI_LE_SET_PER_ADV_RECV_ENABLE_FILTER_DUPLICATE) ?
650 1U : 0U;
651 #endif
652
653 return 0;
654 }
655
656 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
657 /* @brief Link Layer interface function corresponding to HCI LE Set Periodic
658 * Advertising Sync Transfer command.
659 *
660 * @param[in] conn_handle Connection_Handle identifying the connected device
661 * Range: 0x0000 to 0x0EFF.
662 * @param[in] service_data Service_Data value provided by the Host for use by the
663 * Host of the peer device.
664 * @param[in] sync_handle Sync_Handle identifying the periodic advertising
665 * train. Range: 0x0000 to 0x0EFF.
666 *
667 * @return HCI error codes as documented in Bluetooth Core Specification v5.4.
668 */
ll_sync_transfer(uint16_t conn_handle,uint16_t service_data,uint16_t sync_handle)669 uint8_t ll_sync_transfer(uint16_t conn_handle, uint16_t service_data, uint16_t sync_handle)
670 {
671 struct ll_sync_set *sync;
672 struct ll_conn *conn;
673
674 conn = ll_connected_get(conn_handle);
675 if (!conn) {
676 return BT_HCI_ERR_UNKNOWN_CONN_ID;
677 }
678
679 /* Verify that sync_handle is valid */
680 sync = ull_sync_is_enabled_get(sync_handle);
681 if (!sync) {
682 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
683 }
684
685 /* Call llcp to start LLCP_PERIODIC_SYNC_IND */
686 return ull_cp_periodic_sync(conn, sync, NULL, service_data);
687 }
688 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
689
690 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
691 /* @brief Link Layer interface function corresponding to HCI LE Set Periodic
692 * Advertising Sync Transfer Parameters command.
693 *
694 * @param[in] conn_handle Connection_Handle identifying the connected device
695 * Range: 0x0000 to 0x0EFF.
696 * @param[in] mode Mode specifies the action to be taken when a periodic advertising
697 * synchronization is received.
698 * @param[in] skip Skip specifying the number of consectutive periodic advertising
699 * packets that the receiver may skip after successfully reciving a
700 * periodic advertising packet. Range: 0x0000 to 0x01F3.
701 * @param[in] timeout Sync_timeout specifying the maximum permitted time between
702 * successful receives. Range: 0x000A to 0x4000.
703 * @param[in] cte_type CTE_Type specifying whether to only synchronize to periodic
704 * advertising with certain types of Constant Tone Extension.
705 *
706 * @return HCI error codes as documented in Bluetooth Core Specification v5.4.
707 */
ll_past_param(uint16_t conn_handle,uint8_t mode,uint16_t skip,uint16_t timeout,uint8_t cte_type)708 uint8_t ll_past_param(uint16_t conn_handle, uint8_t mode, uint16_t skip, uint16_t timeout,
709 uint8_t cte_type)
710 {
711 struct ll_conn *conn;
712
713 conn = ll_connected_get(conn_handle);
714 if (!conn) {
715 return BT_HCI_ERR_UNKNOWN_CONN_ID;
716 }
717
718 if (mode == BT_HCI_LE_PAST_MODE_SYNC_FILTER_DUPLICATES &&
719 !IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)) {
720 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
721 }
722
723 /* Set PAST Param for connection instance */
724 conn->past.mode = mode;
725 conn->past.skip = skip;
726 conn->past.timeout = timeout;
727 conn->past.cte_type = cte_type;
728
729 return 0;
730 }
731
732 /* @brief Link Layer interface function corresponding to HCI LE Set Default Periodic
733 * Advertising Sync Transfer Parameters command.
734 *
735 * @param[in] mode Mode specifies the action to be taken when a periodic advertising
736 * synchronization is received.
737 * @param[in] skip Skip specifying the number of consectutive periodic advertising
738 * packets that the receiver may skip after successfully reciving a
739 * periodic advertising packet. Range: 0x0000 to 0x01F3.
740 * @param[in] timeout Sync_timeout specifying the maximum permitted time between
741 * successful receives. Range: 0x000A to 0x4000.
742 * @param[in] cte_type CTE_Type specifying whether to only synchronize to periodic
743 * advertising with certain types of Constant Tone Extension.
744 *
745 * @return HCI error codes as documented in Bluetooth Core Specification v5.4.
746 */
ll_default_past_param(uint8_t mode,uint16_t skip,uint16_t timeout,uint8_t cte_type)747 uint8_t ll_default_past_param(uint8_t mode, uint16_t skip, uint16_t timeout, uint8_t cte_type)
748 {
749 if (mode == BT_HCI_LE_PAST_MODE_SYNC_FILTER_DUPLICATES &&
750 !IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)) {
751 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
752 }
753
754 /* Set default past param */
755 ull_conn_default_past_param_set(mode, skip, timeout, cte_type);
756
757 return 0;
758 }
759 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
760
ull_sync_init(void)761 int ull_sync_init(void)
762 {
763 int err;
764
765 err = init_reset();
766 if (err) {
767 return err;
768 }
769
770 return 0;
771 }
772
ull_sync_reset(void)773 int ull_sync_reset(void)
774 {
775 uint16_t handle;
776 void *rx;
777 int err;
778
779 (void)ll_sync_create_cancel(&rx);
780
781 for (handle = 0U; handle < CONFIG_BT_PER_ADV_SYNC_MAX; handle++) {
782 (void)ll_sync_terminate(handle);
783 }
784
785 err = init_reset();
786 if (err) {
787 return err;
788 }
789
790 return 0;
791 }
792
ull_sync_set_get(uint16_t handle)793 struct ll_sync_set *ull_sync_set_get(uint16_t handle)
794 {
795 if (handle >= CONFIG_BT_PER_ADV_SYNC_MAX) {
796 return NULL;
797 }
798
799 return &ll_sync_pool[handle];
800 }
801
ull_sync_is_enabled_get(uint16_t handle)802 struct ll_sync_set *ull_sync_is_enabled_get(uint16_t handle)
803 {
804 struct ll_sync_set *sync;
805
806 sync = ull_sync_set_get(handle);
807 if (!sync || !sync->timeout_reload) {
808 return NULL;
809 }
810
811 return sync;
812 }
813
ull_sync_is_valid_get(struct ll_sync_set * sync)814 struct ll_sync_set *ull_sync_is_valid_get(struct ll_sync_set *sync)
815 {
816 if (((uint8_t *)sync < (uint8_t *)ll_sync_pool) ||
817 ((uint8_t *)sync > ((uint8_t *)ll_sync_pool +
818 (sizeof(struct ll_sync_set) * (CONFIG_BT_PER_ADV_SYNC_MAX - 1))))) {
819 return NULL;
820 }
821
822 return sync;
823 }
824
ull_sync_lll_is_valid_get(struct lll_sync * lll)825 struct lll_sync *ull_sync_lll_is_valid_get(struct lll_sync *lll)
826 {
827 struct ll_sync_set *sync;
828
829 sync = HDR_LLL2ULL(lll);
830 sync = ull_sync_is_valid_get(sync);
831 if (sync) {
832 return &sync->lll;
833 }
834
835 return NULL;
836 }
837
ull_sync_handle_get(struct ll_sync_set * sync)838 uint16_t ull_sync_handle_get(struct ll_sync_set *sync)
839 {
840 return mem_index_get(sync, ll_sync_pool, sizeof(struct ll_sync_set));
841 }
842
ull_sync_lll_handle_get(struct lll_sync * lll)843 uint16_t ull_sync_lll_handle_get(struct lll_sync *lll)
844 {
845 return ull_sync_handle_get(HDR_LLL2ULL(lll));
846 }
847
ull_sync_release(struct ll_sync_set * sync)848 void ull_sync_release(struct ll_sync_set *sync)
849 {
850 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
851 struct lll_sync *lll = &sync->lll;
852
853 if (lll->node_cte_incomplete) {
854 const uint8_t release_cnt = 1U;
855 struct node_rx_pdu *node_rx;
856 memq_link_t *link;
857
858 node_rx = &lll->node_cte_incomplete->rx;
859 link = node_rx->hdr.link;
860
861 ll_rx_link_release(link);
862 ull_iq_report_link_inc_quota(release_cnt);
863 ull_df_iq_report_mem_release(node_rx);
864 ull_df_rx_iq_report_alloc(release_cnt);
865
866 lll->node_cte_incomplete = NULL;
867 }
868 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
869
870 /* Mark the sync context as sync create cancelled */
871 if (IS_ENABLED(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)) {
872 sync->timeout = 0U;
873 }
874
875 #if !defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
876 /* reset accumulated data len */
877 sync->data_len = 0U;
878 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
879
880 mem_release(sync, &sync_free);
881 }
882
ull_sync_setup_addr_check(struct ll_sync_set * sync,struct ll_scan_set * scan,uint8_t addr_type,uint8_t * addr,uint8_t rl_idx)883 void ull_sync_setup_addr_check(struct ll_sync_set *sync, struct ll_scan_set *scan,
884 uint8_t addr_type, uint8_t *addr, uint8_t rl_idx)
885 {
886 /* Check if Periodic Advertiser list to be used */
887 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST) &&
888 scan->periodic.filter_policy) {
889 /* Check in Periodic Advertiser List */
890 if (ull_filter_ull_pal_addr_match(addr_type, addr)) {
891 /* Remember the address, to check with
892 * SID in Sync Info
893 */
894 sync->peer_id_addr_type = addr_type;
895 (void)memcpy(sync->peer_id_addr, addr,
896 BDADDR_SIZE);
897
898 /* Address matched */
899 scan->periodic.state = LL_SYNC_STATE_ADDR_MATCH;
900
901 /* Check in Resolving List */
902 } else if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY) &&
903 ull_filter_ull_pal_listed(rl_idx, &addr_type,
904 sync->peer_id_addr)) {
905 /* Remember the address, to check with the
906 * SID in Sync Info
907 */
908 sync->peer_id_addr_type = addr_type;
909
910 /* Mark it as identity address from RPA */
911 sync->peer_addr_resolved = 1U;
912
913 /* Address matched */
914 scan->periodic.state = LL_SYNC_STATE_ADDR_MATCH;
915 }
916
917 /* Check with explicitly supplied address */
918 } else if ((addr_type == sync->peer_id_addr_type) &&
919 !memcmp(addr, sync->peer_id_addr, BDADDR_SIZE)) {
920 /* Address matched */
921 scan->periodic.state = LL_SYNC_STATE_ADDR_MATCH;
922
923 /* Check identity address with explicitly supplied address */
924 } else if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY) &&
925 (rl_idx < ll_rl_size_get())) {
926 ll_rl_id_addr_get(rl_idx, &addr_type, addr);
927 if ((addr_type == sync->peer_id_addr_type) &&
928 !memcmp(addr, sync->peer_id_addr, BDADDR_SIZE)) {
929 /* Mark it as identity address from RPA */
930 sync->peer_addr_resolved = 1U;
931
932 /* Identity address matched */
933 scan->periodic.state = LL_SYNC_STATE_ADDR_MATCH;
934 }
935 }
936 }
937
ull_sync_setup_sid_match(struct ll_sync_set * sync,struct ll_scan_set * scan,uint8_t sid)938 bool ull_sync_setup_sid_match(struct ll_sync_set *sync, struct ll_scan_set *scan, uint8_t sid)
939 {
940 return (scan->periodic.state == LL_SYNC_STATE_ADDR_MATCH) &&
941 ((IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST) &&
942 scan->periodic.filter_policy &&
943 ull_filter_ull_pal_match(sync->peer_id_addr_type,
944 sync->peer_id_addr, sid)) ||
945 (!scan->periodic.filter_policy &&
946 (sid == sync->sid)));
947 }
948
ull_sync_setup(struct ll_scan_set * scan,uint8_t phy,struct node_rx_pdu * node_rx,struct pdu_adv_sync_info * si)949 void ull_sync_setup(struct ll_scan_set *scan, uint8_t phy,
950 struct node_rx_pdu *node_rx, struct pdu_adv_sync_info *si)
951 {
952 uint32_t ticks_slot_overhead;
953 uint32_t ticks_slot_offset;
954 struct ll_sync_set *sync;
955 struct node_rx_sync *se;
956 struct node_rx_ftr *ftr;
957 uint32_t sync_offset_us;
958 uint32_t ready_delay_us;
959 struct node_rx_pdu *rx;
960 uint8_t *data_chan_map;
961 struct lll_sync *lll;
962 uint16_t sync_handle;
963 uint32_t interval_us;
964 uint32_t overhead_us;
965 struct pdu_adv *pdu;
966 uint16_t interval;
967 uint32_t slot_us;
968 uint8_t chm_last;
969 uint32_t ret;
970 uint8_t sca;
971
972 /* Populate the LLL context */
973 sync = scan->periodic.sync;
974 lll = &sync->lll;
975
976 /* Copy channel map from sca_chm field in sync_info structure, and
977 * clear the SCA bits.
978 */
979 chm_last = lll->chm_first;
980 lll->chm_last = chm_last;
981 data_chan_map = lll->chm[chm_last].data_chan_map;
982 (void)memcpy(data_chan_map, si->sca_chm,
983 sizeof(lll->chm[chm_last].data_chan_map));
984 data_chan_map[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &=
985 ~PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK;
986 lll->chm[chm_last].data_chan_count =
987 util_ones_count_get(data_chan_map,
988 sizeof(lll->chm[chm_last].data_chan_map));
989 if (lll->chm[chm_last].data_chan_count < CHM_USED_COUNT_MIN) {
990 /* Ignore sync setup, invalid available channel count */
991 return;
992 }
993
994 memcpy(lll->access_addr, si->aa, sizeof(lll->access_addr));
995 lll->data_chan_id = lll_chan_id(lll->access_addr);
996 memcpy(lll->crc_init, si->crc_init, sizeof(lll->crc_init));
997 lll->event_counter = sys_le16_to_cpu(si->evt_cntr);
998 lll->phy = phy;
999 lll->forced = 0U;
1000
1001 interval = sys_le16_to_cpu(si->interval);
1002 interval_us = interval * PERIODIC_INT_UNIT_US;
1003
1004 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
1005 /* Save Periodic Advertisement Interval */
1006 sync->interval = interval;
1007 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
1008
1009 /* Convert fromm 10ms units to interval units */
1010 sync->timeout_reload = RADIO_SYNC_EVENTS((sync->timeout * 10U *
1011 USEC_PER_MSEC), interval_us);
1012
1013 /* Adjust Skip value so that there is minimum of 6 events that can be
1014 * listened to before Sync_Timeout occurs.
1015 * The adjustment of the skip value is controller implementation
1016 * specific and not specified by the Bluetooth Core Specification v5.3.
1017 * The Controller `may` use the Skip value, and the implementation here
1018 * covers a case where Skip value could lead to less events being
1019 * listened to until Sync_Timeout. Listening to more consecutive events
1020 * before Sync_Timeout increases probability of retaining the Periodic
1021 * Synchronization.
1022 */
1023 if (sync->timeout_reload > CONN_ESTAB_COUNTDOWN) {
1024 uint16_t skip_max = sync->timeout_reload - CONN_ESTAB_COUNTDOWN;
1025
1026 if (sync->skip > skip_max) {
1027 sync->skip = skip_max;
1028 }
1029 } else {
1030 sync->skip = 0U;
1031 }
1032
1033 sync->sync_expire = CONN_ESTAB_COUNTDOWN;
1034
1035 /* Extract the SCA value from the sca_chm field of the sync_info
1036 * structure.
1037 */
1038 sca = (si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &
1039 PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK) >>
1040 PDU_SYNC_INFO_SCA_CHM_SCA_BIT_POS;
1041
1042 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1043 lll->sca = sca;
1044 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1045
1046 lll->window_widening_periodic_us =
1047 DIV_ROUND_UP(((lll_clock_ppm_local_get() +
1048 lll_clock_ppm_get(sca)) *
1049 interval_us), USEC_PER_SEC);
1050 lll->window_widening_max_us = (interval_us >> 1) - EVENT_IFS_US;
1051 if (PDU_ADV_SYNC_INFO_OFFS_UNITS_GET(si)) {
1052 lll->window_size_event_us = OFFS_UNIT_300_US;
1053 } else {
1054 lll->window_size_event_us = OFFS_UNIT_30_US;
1055 }
1056
1057 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1058 lll->node_cte_incomplete = NULL;
1059 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1060
1061 /* Set the state to sync create */
1062 scan->periodic.state = LL_SYNC_STATE_CREATED;
1063 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
1064 struct ll_scan_set *scan_1m;
1065
1066 scan_1m = ull_scan_set_get(SCAN_HANDLE_1M);
1067 if (scan == scan_1m) {
1068 struct ll_scan_set *scan_coded;
1069
1070 scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
1071 scan_coded->periodic.state = LL_SYNC_STATE_CREATED;
1072 } else {
1073 scan_1m->periodic.state = LL_SYNC_STATE_CREATED;
1074 }
1075 }
1076
1077 sync_handle = ull_sync_handle_get(sync);
1078
1079 /* Prepare sync notification, dispatch only on successful AUX_SYNC_IND
1080 * reception.
1081 */
1082 rx = (void *)sync->node_rx_sync_estab;
1083 rx->hdr.type = NODE_RX_TYPE_SYNC;
1084 rx->hdr.handle = sync_handle;
1085 rx->rx_ftr.param = sync;
1086 se = (void *)rx->pdu;
1087 se->interval = interval;
1088 se->phy = lll->phy;
1089 se->sca = sca;
1090
1091 /* Calculate offset and schedule sync radio events */
1092 ftr = &node_rx->rx_ftr;
1093 pdu = (void *)((struct node_rx_pdu *)node_rx)->pdu;
1094
1095 ready_delay_us = lll_radio_rx_ready_delay_get(lll->phy, PHY_FLAGS_S8);
1096
1097 sync_offset_us = ftr->radio_end_us;
1098 sync_offset_us += PDU_ADV_SYNC_INFO_OFFSET_GET(si) *
1099 lll->window_size_event_us;
1100 /* offs_adjust may be 1 only if sync setup by LL_PERIODIC_SYNC_IND */
1101 sync_offset_us += (PDU_ADV_SYNC_INFO_OFFS_ADJUST_GET(si) ? OFFS_ADJUST_US : 0U);
1102 sync_offset_us -= PDU_AC_US(pdu->len, lll->phy, ftr->phy_flags);
1103 sync_offset_us -= EVENT_TICKER_RES_MARGIN_US;
1104 sync_offset_us -= EVENT_JITTER_US;
1105 sync_offset_us -= ready_delay_us;
1106
1107 /* Minimum prepare tick offset + minimum preempt tick offset are the
1108 * overheads before ULL scheduling can setup radio for reception
1109 */
1110 overhead_us = HAL_TICKER_TICKS_TO_US(HAL_TICKER_CNTR_CMP_OFFSET_MIN << 1);
1111
1112 /* CPU execution overhead to setup the radio for reception */
1113 overhead_us += EVENT_OVERHEAD_END_US + EVENT_OVERHEAD_START_US;
1114
1115 /* If not sufficient CPU processing time, skip to receiving next
1116 * event.
1117 */
1118 if ((sync_offset_us - ftr->radio_end_us) < overhead_us) {
1119 sync_offset_us += interval_us;
1120 lll->event_counter++;
1121 }
1122
1123 interval_us -= lll->window_widening_periodic_us;
1124
1125 /* Calculate event time reservation */
1126 slot_us = PDU_AC_MAX_US(PDU_AC_EXT_PAYLOAD_RX_SIZE, lll->phy);
1127 slot_us += ready_delay_us;
1128
1129 /* Add implementation defined radio event overheads */
1130 if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX)) {
1131 slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
1132 }
1133
1134 /* TODO: active_to_start feature port */
1135 sync->ull.ticks_active_to_start = 0U;
1136 sync->ull.ticks_prepare_to_start =
1137 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
1138 sync->ull.ticks_preempt_to_start =
1139 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
1140 sync->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
1141
1142 ticks_slot_offset = MAX(sync->ull.ticks_active_to_start,
1143 sync->ull.ticks_prepare_to_start);
1144 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
1145 ticks_slot_overhead = ticks_slot_offset;
1146 } else {
1147 ticks_slot_overhead = 0U;
1148 }
1149 ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
1150
1151 sync->lll_sync_prepare = lll_sync_create_prepare;
1152
1153 ret = ticker_start(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
1154 (TICKER_ID_SCAN_SYNC_BASE + sync_handle),
1155 ftr->ticks_anchor - ticks_slot_offset,
1156 HAL_TICKER_US_TO_TICKS(sync_offset_us),
1157 HAL_TICKER_US_TO_TICKS(interval_us),
1158 HAL_TICKER_REMAINDER(interval_us),
1159 TICKER_NULL_LAZY,
1160 (sync->ull.ticks_slot + ticks_slot_overhead),
1161 ticker_cb, sync,
1162 ticker_start_op_cb, (void *)__LINE__);
1163 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1164 (ret == TICKER_STATUS_BUSY));
1165 }
1166
ull_sync_setup_reset(struct ll_sync_set * sync)1167 void ull_sync_setup_reset(struct ll_sync_set *sync)
1168 {
1169 struct ll_scan_set *scan;
1170
1171 /* Remove the sync context from being associated with scan contexts */
1172 scan = ull_scan_set_get(SCAN_HANDLE_1M);
1173
1174 scan->periodic.sync = NULL;
1175
1176 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
1177 scan->lll.is_sync = 0U;
1178 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
1179
1180 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
1181 scan = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
1182
1183 scan->periodic.sync = NULL;
1184
1185 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
1186 scan->lll.is_sync = 0U;
1187 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
1188 }
1189 }
1190
ull_sync_established_report(memq_link_t * link,struct node_rx_pdu * rx)1191 void ull_sync_established_report(memq_link_t *link, struct node_rx_pdu *rx)
1192 {
1193 struct node_rx_pdu *rx_establ;
1194 struct ll_sync_set *sync;
1195 struct node_rx_ftr *ftr;
1196 struct node_rx_sync *se;
1197 struct lll_sync *lll;
1198
1199 ftr = &rx->rx_ftr;
1200 lll = ftr->param;
1201 sync = HDR_LLL2ULL(lll);
1202
1203 /* Do nothing if sync is cancelled or lost. */
1204 if (unlikely(sync->is_stop || !sync->timeout_reload)) {
1205 return;
1206 }
1207
1208 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1209 enum sync_status sync_status;
1210
1211 #if defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1212 sync_status = ftr->sync_status;
1213 #else
1214 struct pdu_cte_info *rx_cte_info;
1215
1216 rx_cte_info = pdu_cte_info_get((struct pdu_adv *)rx->pdu);
1217 if (rx_cte_info != NULL) {
1218 sync_status = lll_sync_cte_is_allowed(lll->cte_type, lll->filter_policy,
1219 rx_cte_info->time, rx_cte_info->type);
1220 } else {
1221 sync_status = lll_sync_cte_is_allowed(lll->cte_type, lll->filter_policy, 0,
1222 BT_HCI_LE_NO_CTE);
1223 }
1224
1225 /* If there is no CTEInline support, notify done event handler to terminate periodic
1226 * advertising sync in case the CTE is not allowed.
1227 * If the periodic filtering list is not used then terminate synchronization and notify
1228 * host. If the periodic filtering list is used then stop synchronization with this
1229 * particular periodic advertised but continue to search for other one.
1230 */
1231 sync->is_term = ((sync_status == SYNC_STAT_TERM) || (sync_status == SYNC_STAT_CONT_SCAN));
1232 #endif /* CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1233
1234 /* Send periodic advertisement sync established report when sync has correct CTE type
1235 * or the CTE type is incorrect and filter policy doesn't allow to continue scanning.
1236 */
1237 if (sync_status == SYNC_STAT_ALLOWED || sync_status == SYNC_STAT_TERM) {
1238 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1239
1240 if (1) {
1241 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1242
1243 /* Prepare and dispatch sync notification */
1244 rx_establ = (void *)sync->node_rx_sync_estab;
1245 rx_establ->hdr.handle = ull_sync_handle_get(sync);
1246 se = (void *)rx_establ->pdu;
1247 /* Clear the node to mark the sync establish as being completed.
1248 * In this case the completion reason is sync being established.
1249 */
1250 sync->node_rx_sync_estab = NULL;
1251
1252 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1253 se->status = (ftr->sync_status == SYNC_STAT_TERM) ?
1254 BT_HCI_ERR_UNSUPP_REMOTE_FEATURE :
1255 BT_HCI_ERR_SUCCESS;
1256 #else
1257 se->status = BT_HCI_ERR_SUCCESS;
1258 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1259
1260 /* NOTE: footer param has already been populated during sync
1261 * setup.
1262 */
1263
1264 ll_rx_put_sched(rx_establ->hdr.link, rx_establ);
1265 }
1266
1267 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1268 /* Handle periodic advertising PDU and send periodic advertising scan report when
1269 * the sync was found or was established in the past. The report is not send if
1270 * scanning is terminated due to wrong CTE type.
1271 */
1272 if (sync_status == SYNC_STAT_ALLOWED || sync_status == SYNC_STAT_READY) {
1273 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1274
1275 if (1) {
1276 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1277
1278 /* Switch sync event prepare function to one responsible for regular PDUs receive */
1279 sync->lll_sync_prepare = lll_sync_prepare;
1280
1281 /* Change node type to appropriately handle periodic
1282 * advertising PDU report.
1283 */
1284 rx->hdr.type = NODE_RX_TYPE_SYNC_REPORT;
1285 ull_scan_aux_setup(link, rx);
1286 } else {
1287 rx->hdr.type = NODE_RX_TYPE_RELEASE;
1288 ll_rx_put_sched(link, rx);
1289 }
1290 }
1291
1292 void ull_sync_done(struct node_rx_event_done *done)
1293 {
1294 struct ll_sync_set *sync;
1295
1296 /* Get reference to ULL context */
1297 sync = CONTAINER_OF(done->param, struct ll_sync_set, ull);
1298
1299 /* Do nothing if local terminate requested or sync lost */
1300 if (unlikely(sync->is_stop || !sync->timeout_reload)) {
1301 return;
1302 }
1303
1304 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1305 #if defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1306 if (done->extra.sync_term) {
1307 #else
1308 if (sync->is_term) {
1309 #endif /* CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1310 /* In case the periodic advertising list filtering is not used the synchronization
1311 * must be terminated and host notification must be send.
1312 * In case the periodic advertising list filtering is used the synchronization with
1313 * this particular periodic advertiser but search for other one from the list.
1314 *
1315 * Stop periodic advertising sync ticker and clear variables informing the
1316 * sync is pending. That is a step to completely terminate the synchronization.
1317 * In case search for another periodic advertiser it allows to setup new ticker for
1318 * that.
1319 */
1320 sync_ticker_cleanup(sync, NULL);
1321 } else
1322 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1323 {
1324 uint32_t ticks_drift_minus;
1325 uint32_t ticks_drift_plus;
1326 uint16_t elapsed_event;
1327 struct lll_sync *lll;
1328 uint16_t skip_event;
1329 uint8_t force_lll;
1330 uint16_t lazy;
1331 uint8_t force;
1332
1333 lll = &sync->lll;
1334
1335 /* Events elapsed used in timeout checks below */
1336 skip_event = lll->skip_event;
1337
1338 /* Sync drift compensation and new skip calculation */
1339 ticks_drift_plus = 0U;
1340 ticks_drift_minus = 0U;
1341 if (done->extra.trx_cnt) {
1342 /* Calculate drift in ticks unit */
1343 ull_drift_ticks_get(done, &ticks_drift_plus, &ticks_drift_minus);
1344
1345 /* Enforce skip */
1346 lll->skip_event = sync->skip;
1347
1348 /* Reset failed to establish sync countdown */
1349 sync->sync_expire = 0U;
1350 }
1351
1352 elapsed_event = skip_event + lll->lazy_prepare + 1U;
1353
1354 /* Reset supervision countdown */
1355 if (done->extra.crc_valid) {
1356 sync->timeout_expire = 0U;
1357 }
1358
1359 /* check sync failed to establish */
1360 else if (sync->sync_expire) {
1361 if (sync->sync_expire > elapsed_event) {
1362 sync->sync_expire -= elapsed_event;
1363 } else {
1364 sync_ticker_cleanup(sync, ticker_stop_sync_expire_op_cb);
1365
1366 return;
1367 }
1368 }
1369
1370 /* If anchor point not sync-ed, start timeout countdown, and break skip if any */
1371 else if (!sync->timeout_expire) {
1372 sync->timeout_expire = sync->timeout_reload;
1373 }
1374
1375 /* check timeout */
1376 force = 0U;
1377 force_lll = 0U;
1378 if (sync->timeout_expire) {
1379 if (sync->timeout_expire > elapsed_event) {
1380 sync->timeout_expire -= elapsed_event;
1381
1382 /* break skip */
1383 lll->skip_event = 0U;
1384
1385 if (sync->timeout_expire <= 6U) {
1386 force_lll = 1U;
1387
1388 force = 1U;
1389 } else if (skip_event) {
1390 force = 1U;
1391 }
1392 } else {
1393 sync_ticker_cleanup(sync, ticker_stop_sync_lost_op_cb);
1394
1395 return;
1396 }
1397 }
1398
1399 lll->forced = force_lll;
1400
1401 /* Check if skip needs update */
1402 lazy = 0U;
1403 if ((force) || (skip_event != lll->skip_event)) {
1404 lazy = lll->skip_event + 1U;
1405 }
1406
1407 /* Update Sync ticker instance */
1408 if (ticks_drift_plus || ticks_drift_minus || lazy || force) {
1409 uint16_t sync_handle = ull_sync_handle_get(sync);
1410 uint32_t ticker_status;
1411
1412 /* Call to ticker_update can fail under the race
1413 * condition where in the periodic sync role is being
1414 * stopped but at the same time it is preempted by
1415 * periodic sync event that gets into close state.
1416 * Accept failure when periodic sync role is being
1417 * stopped.
1418 */
1419 ticker_status =
1420 ticker_update(TICKER_INSTANCE_ID_CTLR,
1421 TICKER_USER_ID_ULL_HIGH,
1422 (TICKER_ID_SCAN_SYNC_BASE +
1423 sync_handle),
1424 ticks_drift_plus,
1425 ticks_drift_minus, 0, 0,
1426 lazy, force,
1427 ticker_update_op_cb, sync);
1428 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
1429 (ticker_status == TICKER_STATUS_BUSY) ||
1430 ((void *)sync == ull_disable_mark_get()));
1431 }
1432 }
1433 }
1434
1435 void ull_sync_chm_update(uint8_t sync_handle, uint8_t *acad, uint8_t acad_len)
1436 {
1437 struct pdu_adv_sync_chm_upd_ind *chm_upd_ind;
1438 struct ll_sync_set *sync;
1439 struct lll_sync *lll;
1440 uint8_t chm_last;
1441 uint16_t ad_len;
1442
1443 /* Get reference to LLL context */
1444 sync = ull_sync_set_get(sync_handle);
1445 LL_ASSERT(sync);
1446 lll = &sync->lll;
1447
1448 /* Ignore if already in progress */
1449 if (lll->chm_last != lll->chm_first) {
1450 return;
1451 }
1452
1453 /* Find the Channel Map Update Indication */
1454 do {
1455 /* Pick the length and find the Channel Map Update Indication */
1456 ad_len = acad[PDU_ADV_DATA_HEADER_LEN_OFFSET];
1457 if (ad_len &&
1458 (acad[PDU_ADV_DATA_HEADER_TYPE_OFFSET] ==
1459 PDU_ADV_DATA_TYPE_CHANNEL_MAP_UPDATE_IND)) {
1460 break;
1461 }
1462
1463 /* Add length field size */
1464 ad_len += 1U;
1465 if (ad_len < acad_len) {
1466 acad_len -= ad_len;
1467 } else {
1468 return;
1469 }
1470
1471 /* Move to next AD data */
1472 acad += ad_len;
1473 } while (acad_len);
1474
1475 /* Validate the size of the Channel Map Update Indication */
1476 if (ad_len != (sizeof(*chm_upd_ind) + 1U)) {
1477 return;
1478 }
1479
1480 /* Pick the parameters into the procedure context */
1481 chm_last = lll->chm_last + 1U;
1482 if (chm_last == DOUBLE_BUFFER_SIZE) {
1483 chm_last = 0U;
1484 }
1485
1486 chm_upd_ind = (void *)&acad[PDU_ADV_DATA_HEADER_DATA_OFFSET];
1487 (void)memcpy(lll->chm[chm_last].data_chan_map, chm_upd_ind->chm,
1488 sizeof(lll->chm[chm_last].data_chan_map));
1489 lll->chm[chm_last].data_chan_count =
1490 util_ones_count_get(lll->chm[chm_last].data_chan_map,
1491 sizeof(lll->chm[chm_last].data_chan_map));
1492 if (lll->chm[chm_last].data_chan_count < CHM_USED_COUNT_MIN) {
1493 /* Ignore channel map, invalid available channel count */
1494 return;
1495 }
1496
1497 lll->chm_instant = sys_le16_to_cpu(chm_upd_ind->instant);
1498
1499 /* Set Channel Map Update Procedure in progress */
1500 lll->chm_last = chm_last;
1501 }
1502
1503 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1504 /* @brief Function updates periodic sync slot duration.
1505 *
1506 * @param[in] sync Pointer to sync instance
1507 * @param[in] slot_plus_us Number of microsecond to add to ticker slot
1508 * @param[in] slot_minus_us Number of microsecond to subtracks from ticker slot
1509 *
1510 * @retval 0 Successful ticker slot update.
1511 * @retval -ENOENT Ticker node related with provided sync is already stopped.
1512 * @retval -ENOMEM Couldn't enqueue update ticker job.
1513 * @retval -EFAULT Somethin else went wrong.
1514 */
1515 int ull_sync_slot_update(struct ll_sync_set *sync, uint32_t slot_plus_us,
1516 uint32_t slot_minus_us)
1517 {
1518 uint32_t volatile ret_cb;
1519 uint32_t ret;
1520
1521 ret_cb = TICKER_STATUS_BUSY;
1522 ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
1523 TICKER_USER_ID_THREAD,
1524 (TICKER_ID_SCAN_SYNC_BASE +
1525 ull_sync_handle_get(sync)),
1526 0, 0,
1527 HAL_TICKER_US_TO_TICKS(slot_plus_us),
1528 HAL_TICKER_US_TO_TICKS(slot_minus_us),
1529 0, 0,
1530 ticker_update_op_status_give,
1531 (void *)&ret_cb);
1532 if (ret == TICKER_STATUS_BUSY || ret == TICKER_STATUS_SUCCESS) {
1533 /* Wait for callback or clear semaphore is callback was already
1534 * executed.
1535 */
1536 k_sem_take(&sem_ticker_cb, K_FOREVER);
1537
1538 if (ret_cb == TICKER_STATUS_FAILURE) {
1539 return -EFAULT; /* Something went wrong */
1540 } else {
1541 return 0;
1542 }
1543 } else {
1544 if (ret_cb != TICKER_STATUS_BUSY) {
1545 /* Ticker callback was executed and job enqueue was successful.
1546 * Call k_sem_take to clear ticker callback semaphore.
1547 */
1548 k_sem_take(&sem_ticker_cb, K_FOREVER);
1549 }
1550 /* Ticker was already stopped or job was not enqueued. */
1551 return (ret_cb == TICKER_STATUS_FAILURE) ? -ENOENT : -ENOMEM;
1552 }
1553 }
1554 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1555
1556 static int init_reset(void)
1557 {
1558 /* Initialize sync pool. */
1559 mem_init(ll_sync_pool, sizeof(struct ll_sync_set),
1560 sizeof(ll_sync_pool) / sizeof(struct ll_sync_set),
1561 &sync_free);
1562
1563 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1564 k_sem_init(&sem_ticker_cb, 0, 1);
1565 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1566
1567 return 0;
1568 }
1569
1570 static inline struct ll_sync_set *sync_acquire(void)
1571 {
1572 return mem_acquire(&sync_free);
1573 }
1574
1575 static struct ll_sync_set *ull_sync_create(uint8_t sid, uint16_t timeout, uint16_t skip,
1576 uint8_t cte_type, uint8_t rx_enable, uint8_t nodups)
1577 {
1578 memq_link_t *link_sync_estab;
1579 memq_link_t *link_sync_lost;
1580 struct node_rx_pdu *node_rx;
1581 struct lll_sync *lll;
1582 struct ll_sync_set *sync;
1583
1584 link_sync_estab = ll_rx_link_alloc();
1585 if (!link_sync_estab) {
1586 return NULL;
1587 }
1588
1589 link_sync_lost = ll_rx_link_alloc();
1590 if (!link_sync_lost) {
1591 ll_rx_link_release(link_sync_estab);
1592
1593 return NULL;
1594 }
1595
1596 node_rx = ll_rx_alloc();
1597 if (!node_rx) {
1598 ll_rx_link_release(link_sync_lost);
1599 ll_rx_link_release(link_sync_estab);
1600
1601 return NULL;
1602 }
1603
1604 sync = sync_acquire();
1605 if (!sync) {
1606 ll_rx_release(node_rx);
1607 ll_rx_link_release(link_sync_lost);
1608 ll_rx_link_release(link_sync_estab);
1609
1610 return NULL;
1611 }
1612
1613 sync->peer_addr_resolved = 0U;
1614
1615 /* Initialize sync context */
1616 node_rx->hdr.link = link_sync_estab;
1617 sync->node_rx_lost.rx.hdr.link = link_sync_lost;
1618
1619 /* Make sure that the node_rx_sync_establ hasn't got anything assigned. It is used to
1620 * mark when sync establishment is in progress.
1621 */
1622 LL_ASSERT(!sync->node_rx_sync_estab);
1623 sync->node_rx_sync_estab = node_rx;
1624
1625 /* Reporting initially enabled/disabled */
1626 sync->rx_enable = rx_enable;
1627
1628 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
1629 sync->nodups = nodups;
1630 #endif
1631 sync->skip = skip;
1632 sync->is_stop = 0U;
1633
1634 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1635 sync->enc = 0U;
1636 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1637
1638 /* NOTE: Use timeout not zero to represent sync context used for sync
1639 * create.
1640 */
1641 sync->timeout = timeout;
1642
1643 /* NOTE: Use timeout_reload not zero to represent sync established. */
1644 sync->timeout_reload = 0U;
1645 sync->timeout_expire = 0U;
1646
1647 /* Remember the SID */
1648 sync->sid = sid;
1649
1650 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1651 /* Reset Broadcast Isochronous Group Sync Establishment */
1652 sync->iso.sync_iso = NULL;
1653 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1654
1655 /* Initialize sync LLL context */
1656 lll = &sync->lll;
1657 lll->lll_aux = NULL;
1658 lll->is_rx_enabled = sync->rx_enable;
1659 lll->skip_prepare = 0U;
1660 lll->skip_event = 0U;
1661 lll->window_widening_prepare_us = 0U;
1662 lll->window_widening_event_us = 0U;
1663 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1664 lll->cte_type = cte_type;
1665 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1666
1667 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1668 ull_df_sync_cfg_init(&lll->df_cfg);
1669 LL_ASSERT(!lll->node_cte_incomplete);
1670 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1671
1672 /* Initialise ULL and LLL headers */
1673 ull_hdr_init(&sync->ull);
1674 lll_hdr_init(lll, sync);
1675
1676 return sync;
1677 }
1678
1679 static void sync_ticker_cleanup(struct ll_sync_set *sync, ticker_op_func stop_op_cb)
1680 {
1681 uint16_t sync_handle = ull_sync_handle_get(sync);
1682 uint32_t ret;
1683
1684 /* Stop Periodic Sync Ticker */
1685 ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
1686 TICKER_ID_SCAN_SYNC_BASE + sync_handle, stop_op_cb, (void *)sync);
1687 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1688 (ret == TICKER_STATUS_BUSY));
1689
1690 /* Mark sync context not sync established */
1691 sync->timeout_reload = 0U;
1692 }
1693
1694 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
1695 uint32_t remainder, uint16_t lazy, uint8_t force,
1696 void *param)
1697 {
1698 static memq_link_t link_lll_prepare;
1699 static struct mayfly mfy_lll_prepare = {
1700 0, 0, &link_lll_prepare, NULL, NULL};
1701 static struct lll_prepare_param p;
1702 struct ll_sync_set *sync = param;
1703 struct lll_sync *lll;
1704 uint32_t ret;
1705 uint8_t ref;
1706
1707 DEBUG_RADIO_PREPARE_O(1);
1708
1709 lll = &sync->lll;
1710
1711 /* Commit receive enable changed value */
1712 lll->is_rx_enabled = sync->rx_enable;
1713
1714 /* Increment prepare reference count */
1715 ref = ull_ref_inc(&sync->ull);
1716 LL_ASSERT(ref);
1717
1718 /* Append timing parameters */
1719 p.ticks_at_expire = ticks_at_expire;
1720 p.remainder = remainder;
1721 p.lazy = lazy;
1722 p.force = force;
1723 p.param = lll;
1724 mfy_lll_prepare.param = &p;
1725 mfy_lll_prepare.fp = sync->lll_sync_prepare;
1726
1727 /* Kick LLL prepare */
1728 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0,
1729 &mfy_lll_prepare);
1730 LL_ASSERT(!ret);
1731
1732 DEBUG_RADIO_PREPARE_O(1);
1733 }
1734
1735 static void ticker_start_op_cb(uint32_t status, void *param)
1736 {
1737 ARG_UNUSED(param);
1738 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1739 }
1740
1741 static void ticker_update_op_cb(uint32_t status, void *param)
1742 {
1743 LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
1744 param == ull_disable_mark_get());
1745 }
1746
1747 static void ticker_stop_sync_expire_op_cb(uint32_t status, void *param)
1748 {
1749 uint32_t retval;
1750 static memq_link_t link;
1751 static struct mayfly mfy = {0, 0, &link, NULL, sync_expire};
1752
1753 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1754
1755 mfy.param = param;
1756
1757 retval = mayfly_enqueue(TICKER_USER_ID_ULL_LOW, TICKER_USER_ID_ULL_HIGH,
1758 0, &mfy);
1759 LL_ASSERT(!retval);
1760 }
1761
1762 static void sync_expire(void *param)
1763 {
1764 struct ll_sync_set *sync = param;
1765 struct node_rx_sync *se;
1766 struct node_rx_pdu *rx;
1767
1768 /* Generate Periodic advertising sync failed to establish */
1769 rx = (void *)sync->node_rx_sync_estab;
1770 rx->hdr.handle = LLL_HANDLE_INVALID;
1771
1772 /* Clear the node to mark the sync establish as being completed.
1773 * In this case the completion reason is sync expire.
1774 */
1775 sync->node_rx_sync_estab = NULL;
1776
1777 /* NOTE: struct node_rx_sync_estab has uint8_t member following the
1778 * struct node_rx_hdr to store the reason.
1779 */
1780 se = (void *)rx->pdu;
1781 se->status = BT_HCI_ERR_CONN_FAIL_TO_ESTAB;
1782
1783 /* NOTE: footer param has already been populated during sync setup */
1784
1785 /* Enqueue the sync failed to established towards ULL context */
1786 ll_rx_put_sched(rx->hdr.link, rx);
1787 }
1788
1789 static void ticker_stop_sync_lost_op_cb(uint32_t status, void *param)
1790 {
1791 uint32_t retval;
1792 static memq_link_t link;
1793 static struct mayfly mfy = {0, 0, &link, NULL, sync_lost};
1794
1795 /* When in race between terminate requested in thread context and
1796 * sync lost scenario, do not generate the sync lost node rx from here
1797 */
1798 if (status != TICKER_STATUS_SUCCESS) {
1799 LL_ASSERT(param == ull_disable_mark_get());
1800
1801 return;
1802 }
1803
1804 mfy.param = param;
1805
1806 retval = mayfly_enqueue(TICKER_USER_ID_ULL_LOW, TICKER_USER_ID_ULL_HIGH,
1807 0, &mfy);
1808 LL_ASSERT(!retval);
1809 }
1810
1811 static void sync_lost(void *param)
1812 {
1813 struct ll_sync_set *sync;
1814 struct node_rx_pdu *rx;
1815
1816 /* sync established was not generated yet, no free node rx */
1817 sync = param;
1818 if (sync->lll_sync_prepare != lll_sync_prepare) {
1819 sync_expire(param);
1820
1821 return;
1822 }
1823
1824 /* Generate Periodic advertising sync lost */
1825 rx = (void *)&sync->node_rx_lost;
1826 rx->hdr.handle = ull_sync_handle_get(sync);
1827 rx->hdr.type = NODE_RX_TYPE_SYNC_LOST;
1828 rx->rx_ftr.param = sync;
1829
1830 /* Enqueue the sync lost towards ULL context */
1831 ll_rx_put_sched(rx->hdr.link, rx);
1832
1833 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
1834 if (sync->iso.sync_iso) {
1835 /* ISO create BIG flag in the periodic advertising context is still set */
1836 struct ll_sync_iso_set *sync_iso;
1837
1838 sync_iso = sync->iso.sync_iso;
1839
1840 rx = (void *)&sync_iso->node_rx_lost;
1841 rx->hdr.handle = sync_iso->big_handle;
1842 rx->hdr.type = NODE_RX_TYPE_SYNC_ISO;
1843 rx->rx_ftr.param = sync_iso;
1844 *((uint8_t *)rx->pdu) = BT_HCI_ERR_CONN_FAIL_TO_ESTAB;
1845
1846 /* Enqueue the sync iso lost towards ULL context */
1847 ll_rx_put_sched(rx->hdr.link, rx);
1848 }
1849 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
1850 }
1851
1852 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)
1853 static struct ll_sync_set *sync_is_create_get(uint16_t handle)
1854 {
1855 struct ll_sync_set *sync;
1856
1857 sync = ull_sync_set_get(handle);
1858 if (!sync || !sync->timeout) {
1859 return NULL;
1860 }
1861
1862 return sync;
1863 }
1864
1865 static bool peer_sid_sync_exists(uint8_t const peer_id_addr_type,
1866 uint8_t const *const peer_id_addr,
1867 uint8_t sid)
1868 {
1869 uint16_t handle;
1870
1871 for (handle = 0U; handle < CONFIG_BT_PER_ADV_SYNC_MAX; handle++) {
1872 struct ll_sync_set *sync = sync_is_create_get(handle);
1873
1874 if (sync &&
1875 (sync->peer_id_addr_type == peer_id_addr_type) &&
1876 !memcmp(sync->peer_id_addr, peer_id_addr, BDADDR_SIZE) &&
1877 (sync->sid == sid)) {
1878 return true;
1879 }
1880 }
1881
1882 return false;
1883 }
1884 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC */
1885
1886 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1887 static void ticker_update_op_status_give(uint32_t status, void *param)
1888 {
1889 *((uint32_t volatile *)param) = status;
1890
1891 k_sem_give(&sem_ticker_cb);
1892 }
1893 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1894
1895 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
1896 !defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1897 static struct pdu_cte_info *pdu_cte_info_get(struct pdu_adv *pdu)
1898 {
1899 struct pdu_adv_com_ext_adv *com_hdr;
1900 struct pdu_adv_ext_hdr *hdr;
1901
1902 com_hdr = &pdu->adv_ext_ind;
1903 hdr = &com_hdr->ext_hdr;
1904
1905 if (!com_hdr->ext_hdr_len || (com_hdr->ext_hdr_len != 0 && !hdr->cte_info)) {
1906 return NULL;
1907 }
1908
1909 /* Make sure there are no fields that are not allowed for AUX_SYNC_IND and AUX_CHAIN_IND */
1910 LL_ASSERT(!hdr->adv_addr);
1911 LL_ASSERT(!hdr->tgt_addr);
1912
1913 return (struct pdu_cte_info *)hdr->data;
1914 }
1915 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && !CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1916
1917 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
1918 void ull_sync_transfer_received(struct ll_conn *conn, uint16_t service_data,
1919 struct pdu_adv_sync_info *si, uint16_t conn_event_count,
1920 uint16_t last_pa_event_counter, uint8_t sid,
1921 uint8_t addr_type, uint8_t sca, uint8_t phy,
1922 uint8_t *adv_addr, uint16_t sync_conn_event_count,
1923 uint8_t addr_resolved)
1924 {
1925 struct ll_sync_set *sync;
1926 uint16_t conn_evt_current;
1927 uint8_t rx_enable;
1928 uint8_t nodups;
1929
1930 if (conn->past.mode == BT_HCI_LE_PAST_MODE_NO_SYNC) {
1931 /* Ignore LL_PERIODIC_SYNC_IND - see Bluetooth Core Specification v5.4
1932 * Vol 6, Part E, Section 7.8.91
1933 */
1934 return;
1935 }
1936
1937 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)
1938 /* Do not sync twice to the same peer and same SID */
1939 if (peer_sid_sync_exists(addr_type, adv_addr, sid)) {
1940 return;
1941 }
1942 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC */
1943
1944 nodups = (conn->past.mode == BT_HCI_LE_PAST_MODE_SYNC_FILTER_DUPLICATES) ? 1U : 0U;
1945 rx_enable = (conn->past.mode == BT_HCI_LE_PAST_MODE_NO_REPORTS) ? 0U : 1U;
1946
1947 sync = ull_sync_create(sid, conn->past.timeout, conn->past.skip, conn->past.cte_type,
1948 rx_enable, nodups);
1949 if (!sync) {
1950 return;
1951 }
1952
1953 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1954 /* Reset filter policy in lll_sync */
1955 sync->lll.filter_policy = 0U;
1956 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1957
1958 sync->peer_id_addr_type = addr_type;
1959 sync->peer_addr_resolved = addr_resolved;
1960 memcpy(sync->peer_id_addr, adv_addr, BDADDR_SIZE);
1961 sync->lll.phy = phy;
1962
1963 conn_evt_current = ull_conn_event_counter(conn);
1964
1965 /* LLCP should have ensured this holds */
1966 LL_ASSERT(sync_conn_event_count != conn_evt_current);
1967
1968 ull_sync_setup_from_sync_transfer(conn, service_data, sync, si,
1969 conn_event_count - conn_evt_current,
1970 last_pa_event_counter, sync_conn_event_count,
1971 sca);
1972 }
1973 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
1974