1 /*
2 * Copyright (c) 2020-2021 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <soc.h>
9 #include <zephyr/sys/byteorder.h>
10 #include <zephyr/bluetooth/hci_types.h>
11
12 #include "util/util.h"
13 #include "util/mem.h"
14 #include "util/memq.h"
15 #include "util/mayfly.h"
16 #include "util/dbuf.h"
17
18 #include "hal/cpu.h"
19 #include "hal/ccm.h"
20 #include "hal/radio.h"
21 #include "hal/ticker.h"
22
23 #include "ticker/ticker.h"
24
25 #include "pdu_df.h"
26 #include "lll/pdu_vendor.h"
27 #include "pdu.h"
28
29 #include "lll.h"
30 #include "lll_clock.h"
31 #include "lll/lll_vendor.h"
32 #include "lll_chan.h"
33 #include "lll_scan.h"
34 #include "lll/lll_df_types.h"
35 #include "lll_conn.h"
36 #include "lll_sync.h"
37 #include "lll_sync_iso.h"
38
39 #include "ull_filter.h"
40 #include "ull_scan_types.h"
41 #include "ull_sync_types.h"
42
43 #include "ull_internal.h"
44 #include "ull_scan_internal.h"
45 #include "ull_sync_internal.h"
46 #include "ull_df_types.h"
47 #include "ull_df_internal.h"
48
49 #include "ll.h"
50
51 #include <soc.h>
52 #include "hal/debug.h"
53
54 /* Check that timeout_reload member is at safe offset when ll_sync_set is
55 * allocated using mem interface. timeout_reload being non-zero is used to
56 * indicate that a sync is established. And is used to check for sync being
57 * terminated under race conditions between HCI Tx and Rx thread when
58 * Periodic Advertising Reports are generated.
59 */
60 MEM_FREE_MEMBER_ACCESS_BUILD_ASSERT(struct ll_sync_set, timeout_reload);
61
62 static int init_reset(void);
63 static inline struct ll_sync_set *sync_acquire(void);
64 static void sync_ticker_cleanup(struct ll_sync_set *sync, ticker_op_func stop_op_cb);
65 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
66 uint32_t remainder, uint16_t lazy, uint8_t force,
67 void *param);
68 static void ticker_start_op_cb(uint32_t status, void *param);
69 static void ticker_update_op_cb(uint32_t status, void *param);
70 static void ticker_stop_sync_expire_op_cb(uint32_t status, void *param);
71 static void sync_expire(void *param);
72 static void ticker_stop_sync_lost_op_cb(uint32_t status, void *param);
73 static void sync_lost(void *param);
74 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)
75 static bool peer_sid_sync_exists(uint8_t const peer_id_addr_type,
76 uint8_t const *const peer_id_addr,
77 uint8_t sid);
78 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC */
79 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
80 !defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
81 static struct pdu_cte_info *pdu_cte_info_get(struct pdu_adv *pdu);
82 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && !CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
83
84 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
85 static void ticker_update_op_status_give(uint32_t status, void *param);
86 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
87
88 static struct ll_sync_set ll_sync_pool[CONFIG_BT_PER_ADV_SYNC_MAX];
89 static void *sync_free;
90
91 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
92 /* Semaphore to wakeup thread on ticker API callback */
93 static struct k_sem sem_ticker_cb;
94 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
95
ll_sync_create(uint8_t options,uint8_t sid,uint8_t adv_addr_type,uint8_t * adv_addr,uint16_t skip,uint16_t sync_timeout,uint8_t sync_cte_type)96 uint8_t ll_sync_create(uint8_t options, uint8_t sid, uint8_t adv_addr_type,
97 uint8_t *adv_addr, uint16_t skip,
98 uint16_t sync_timeout, uint8_t sync_cte_type)
99 {
100 struct ll_scan_set *scan_coded;
101 memq_link_t *link_sync_estab;
102 memq_link_t *link_sync_lost;
103 struct node_rx_pdu *node_rx;
104 struct lll_sync *lll_sync;
105 struct ll_scan_set *scan;
106 struct ll_sync_set *sync;
107
108 scan = ull_scan_set_get(SCAN_HANDLE_1M);
109 if (!scan || scan->periodic.sync) {
110 return BT_HCI_ERR_CMD_DISALLOWED;
111 }
112
113 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
114 scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
115 if (!scan_coded || scan_coded->periodic.sync) {
116 return BT_HCI_ERR_CMD_DISALLOWED;
117 }
118 }
119
120 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)
121 /* Do not sync twice to the same peer and same SID */
122 if (((options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST) == 0U) &&
123 peer_sid_sync_exists(adv_addr_type, adv_addr, sid)) {
124 return BT_HCI_ERR_CONN_ALREADY_EXISTS;
125 }
126 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC */
127
128 link_sync_estab = ll_rx_link_alloc();
129 if (!link_sync_estab) {
130 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
131 }
132
133 link_sync_lost = ll_rx_link_alloc();
134 if (!link_sync_lost) {
135 ll_rx_link_release(link_sync_estab);
136
137 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
138 }
139
140 node_rx = ll_rx_alloc();
141 if (!node_rx) {
142 ll_rx_link_release(link_sync_lost);
143 ll_rx_link_release(link_sync_estab);
144
145 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
146 }
147
148 sync = sync_acquire();
149 if (!sync) {
150 ll_rx_release(node_rx);
151 ll_rx_link_release(link_sync_lost);
152 ll_rx_link_release(link_sync_estab);
153
154 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
155 }
156
157 scan->periodic.cancelled = 0U;
158 scan->periodic.state = LL_SYNC_STATE_IDLE;
159 scan->periodic.filter_policy =
160 options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST;
161 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
162 scan_coded->periodic.cancelled = 0U;
163 scan_coded->periodic.state = LL_SYNC_STATE_IDLE;
164 scan_coded->periodic.filter_policy =
165 scan->periodic.filter_policy;
166 }
167
168 if (!scan->periodic.filter_policy) {
169 scan->periodic.sid = sid;
170 scan->periodic.adv_addr_type = adv_addr_type;
171 (void)memcpy(scan->periodic.adv_addr, adv_addr, BDADDR_SIZE);
172
173 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
174 scan_coded->periodic.sid = scan->periodic.sid;
175 scan_coded->periodic.adv_addr_type =
176 scan->periodic.adv_addr_type;
177 (void)memcpy(scan_coded->periodic.adv_addr,
178 scan->periodic.adv_addr, BDADDR_SIZE);
179 }
180 }
181
182 /* Initialize sync context */
183 node_rx->hdr.link = link_sync_estab;
184 sync->node_rx_lost.rx.hdr.link = link_sync_lost;
185
186 /* Make sure that the node_rx_sync_establ hasn't got anything assigned. It is used to
187 * mark when sync establishment is in progress.
188 */
189 LL_ASSERT(!sync->node_rx_sync_estab);
190 sync->node_rx_sync_estab = node_rx;
191
192 /* Reporting initially enabled/disabled */
193 sync->rx_enable =
194 !(options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_REPORTS_DISABLED);
195
196 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
197 sync->nodups = (options &
198 BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE) ?
199 1U : 0U;
200 #endif
201 sync->skip = skip;
202 sync->is_stop = 0U;
203
204 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
205 sync->enc = 0U;
206 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
207
208 /* NOTE: Use timeout not zero to represent sync context used for sync
209 * create.
210 */
211 sync->timeout = sync_timeout;
212
213 /* NOTE: Use timeout_reload not zero to represent sync established. */
214 sync->timeout_reload = 0U;
215 sync->timeout_expire = 0U;
216
217 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)
218 /* Remember the peer address when periodic advertiser list is not
219 * used.
220 * NOTE: Peer address will be filled/overwritten with correct identity
221 * address on sync setup when privacy is enabled.
222 */
223 if ((options & BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST) == 0U) {
224 sync->peer_id_addr_type = adv_addr_type;
225 (void)memcpy(sync->peer_id_addr, adv_addr,
226 sizeof(sync->peer_id_addr));
227 }
228
229 /* Remember the SID */
230 sync->sid = sid;
231 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC */
232
233 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
234 /* Reset Broadcast Isochronous Group Sync Establishment */
235 sync->iso.sync_iso = NULL;
236 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
237
238 /* Initialize sync LLL context */
239 lll_sync = &sync->lll;
240 lll_sync->lll_aux = NULL;
241 lll_sync->is_rx_enabled = sync->rx_enable;
242 lll_sync->skip_prepare = 0U;
243 lll_sync->skip_event = 0U;
244 lll_sync->window_widening_prepare_us = 0U;
245 lll_sync->window_widening_event_us = 0U;
246 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
247 lll_sync->cte_type = sync_cte_type;
248 lll_sync->filter_policy = scan->periodic.filter_policy;
249 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
250
251 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
252 ull_df_sync_cfg_init(&lll_sync->df_cfg);
253 LL_ASSERT(!lll_sync->node_cte_incomplete);
254 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
255
256 /* Initialise ULL and LLL headers */
257 ull_hdr_init(&sync->ull);
258 lll_hdr_init(lll_sync, sync);
259
260 #if defined(CONFIG_BT_CTLR_SCAN_AUX_SYNC_RESERVE_MIN)
261 /* Initialise LLL abort count */
262 lll_sync->abort_count = 0U;
263 #endif /* CONFIG_BT_CTLR_SCAN_AUX_SYNC_RESERVE_MIN */
264
265 /* Enable scanner to create sync */
266 scan->periodic.sync = sync;
267
268 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
269 scan->lll.is_sync = 1U;
270 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
271 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
272 scan_coded->periodic.sync = sync;
273
274 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
275 scan_coded->lll.is_sync = 1U;
276 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
277 }
278
279 return 0;
280 }
281
ll_sync_create_cancel(void ** rx)282 uint8_t ll_sync_create_cancel(void **rx)
283 {
284 struct ll_scan_set *scan_coded;
285 memq_link_t *link_sync_estab;
286 memq_link_t *link_sync_lost;
287 struct node_rx_pdu *node_rx;
288 struct ll_scan_set *scan;
289 struct ll_sync_set *sync;
290 struct node_rx_sync *se;
291
292 scan = ull_scan_set_get(SCAN_HANDLE_1M);
293 if (!scan || !scan->periodic.sync) {
294 return BT_HCI_ERR_CMD_DISALLOWED;
295 }
296
297 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
298 scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
299 if (!scan_coded || !scan_coded->periodic.sync) {
300 return BT_HCI_ERR_CMD_DISALLOWED;
301 }
302 }
303
304 /* Check for race condition where in sync is established when sync
305 * create cancel is invoked.
306 *
307 * Setting `scan->periodic.cancelled` to represent cancellation
308 * requested in the thread context. Checking `scan->periodic.sync` for
309 * NULL confirms if synchronization was established before
310 * `scan->periodic.cancelled` was set to 1U.
311 */
312 scan->periodic.cancelled = 1U;
313 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
314 scan_coded->periodic.cancelled = 1U;
315 }
316 cpu_dmb();
317 sync = scan->periodic.sync;
318 if (!sync) {
319 return BT_HCI_ERR_CMD_DISALLOWED;
320 }
321
322 /* node_rx_sync_estab is assigned when Host calls create sync and cleared when sync is
323 * established. timeout_reload is set when sync is found and setup. It is non-zero until
324 * sync is terminated. Together they give information about current sync state:
325 * - node_rx_sync_estab == NULL && timeout_reload != 0 => sync is established
326 * - node_rx_sync_estab == NULL && timeout_reload == 0 => sync is terminated
327 * - node_rx_sync_estab != NULL && timeout_reload == 0 => sync is created
328 * - node_rx_sync_estab != NULL && timeout_reload != 0 => sync is waiting to be established
329 */
330 if (!sync->node_rx_sync_estab) {
331 /* There is no sync to be cancelled */
332 return BT_HCI_ERR_CMD_DISALLOWED;
333 }
334
335 sync->is_stop = 1U;
336 cpu_dmb();
337
338 if (sync->timeout_reload != 0U) {
339 uint16_t sync_handle = ull_sync_handle_get(sync);
340
341 LL_ASSERT(sync_handle <= UINT8_MAX);
342
343 /* Sync is not established yet, so stop sync ticker */
344 const int err =
345 ull_ticker_stop_with_mark((TICKER_ID_SCAN_SYNC_BASE +
346 (uint8_t)sync_handle),
347 sync, &sync->lll);
348 if (err != 0 && err != -EALREADY) {
349 return BT_HCI_ERR_CMD_DISALLOWED;
350 }
351 } /* else: sync was created but not yet setup, there is no sync ticker yet. */
352
353 /* It is safe to remove association with scanner as cancelled flag is
354 * set, sync is_stop flag was set and sync has not been established.
355 */
356 ull_sync_setup_reset(scan);
357
358 /* Mark the sync context as sync create cancelled */
359 if (IS_ENABLED(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)) {
360 sync->timeout = 0U;
361 }
362
363 node_rx = sync->node_rx_sync_estab;
364 link_sync_estab = node_rx->hdr.link;
365 link_sync_lost = sync->node_rx_lost.rx.hdr.link;
366
367 ll_rx_link_release(link_sync_lost);
368 ll_rx_link_release(link_sync_estab);
369 ll_rx_release(node_rx);
370
371 /* Clear the node after release to mark the sync establish as being completed.
372 * In this case the completion reason is sync cancelled by Host.
373 */
374 sync->node_rx_sync_estab = NULL;
375
376 node_rx = (void *)&sync->node_rx_lost;
377 node_rx->hdr.type = NODE_RX_TYPE_SYNC;
378 node_rx->hdr.handle = LLL_HANDLE_INVALID;
379
380 /* NOTE: struct node_rx_lost has uint8_t member following the
381 * struct node_rx_hdr to store the reason.
382 */
383 se = (void *)node_rx->pdu;
384 se->status = BT_HCI_ERR_OP_CANCELLED_BY_HOST;
385
386 /* NOTE: Since NODE_RX_TYPE_SYNC is only generated from ULL context,
387 * pass ULL sync context as parameter.
388 */
389 node_rx->rx_ftr.param = sync;
390
391 *rx = node_rx;
392
393 return 0;
394 }
395
ll_sync_terminate(uint16_t handle)396 uint8_t ll_sync_terminate(uint16_t handle)
397 {
398 struct lll_scan_aux *lll_aux;
399 memq_link_t *link_sync_lost;
400 struct ll_sync_set *sync;
401 int err;
402
403 sync = ull_sync_is_enabled_get(handle);
404 if (!sync) {
405 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
406 }
407
408 /* Request terminate, no new ULL scheduling to be setup */
409 sync->is_stop = 1U;
410 cpu_dmb();
411
412 /* Stop periodic sync ticker timeouts */
413 err = ull_ticker_stop_with_mark(TICKER_ID_SCAN_SYNC_BASE + handle,
414 sync, &sync->lll);
415 LL_ASSERT_INFO2(err == 0 || err == -EALREADY, handle, err);
416 if (err) {
417 return BT_HCI_ERR_CMD_DISALLOWED;
418 }
419
420 /* Check and stop any auxiliary PDU receptions */
421 lll_aux = sync->lll.lll_aux;
422 if (lll_aux) {
423 struct ll_scan_aux_set *aux;
424
425 aux = HDR_LLL2ULL(lll_aux);
426 err = ull_scan_aux_stop(aux);
427 if (err && (err != -EALREADY)) {
428 return BT_HCI_ERR_CMD_DISALLOWED;
429 }
430
431 LL_ASSERT(!aux->parent);
432 }
433
434 link_sync_lost = sync->node_rx_lost.rx.hdr.link;
435 ll_rx_link_release(link_sync_lost);
436
437 /* Mark sync context not sync established */
438 sync->timeout_reload = 0U;
439
440 ull_sync_release(sync);
441
442 return 0;
443 }
444
445 /* @brief Link Layer interface function corresponding to HCI LE Set Periodic
446 * Advertising Receive Enable command.
447 *
448 * @param[in] handle Sync_Handle identifying the periodic advertising
449 * train. Range: 0x0000 to 0x0EFF.
450 * @param[in] enable Bit number 0 - Reporting Enabled.
451 * Bit number 1 - Duplicate filtering enabled.
452 * All other bits - Reserved for future use.
453 *
454 * @return HCI error codes as documented in Bluetooth Core Specification v5.3.
455 */
ll_sync_recv_enable(uint16_t handle,uint8_t enable)456 uint8_t ll_sync_recv_enable(uint16_t handle, uint8_t enable)
457 {
458 struct ll_sync_set *sync;
459
460 sync = ull_sync_is_enabled_get(handle);
461 if (!sync) {
462 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
463 }
464
465 /* Reporting enabled/disabled */
466 sync->rx_enable = (enable & BT_HCI_LE_SET_PER_ADV_RECV_ENABLE_ENABLE) ?
467 1U : 0U;
468
469 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
470 sync->nodups = (enable & BT_HCI_LE_SET_PER_ADV_RECV_ENABLE_FILTER_DUPLICATE) ?
471 1U : 0U;
472 #endif
473
474 return 0;
475 }
476
ull_sync_init(void)477 int ull_sync_init(void)
478 {
479 int err;
480
481 err = init_reset();
482 if (err) {
483 return err;
484 }
485
486 return 0;
487 }
488
ull_sync_reset(void)489 int ull_sync_reset(void)
490 {
491 uint16_t handle;
492 void *rx;
493 int err;
494
495 (void)ll_sync_create_cancel(&rx);
496
497 for (handle = 0U; handle < CONFIG_BT_PER_ADV_SYNC_MAX; handle++) {
498 (void)ll_sync_terminate(handle);
499 }
500
501 err = init_reset();
502 if (err) {
503 return err;
504 }
505
506 return 0;
507 }
508
ull_sync_set_get(uint16_t handle)509 struct ll_sync_set *ull_sync_set_get(uint16_t handle)
510 {
511 if (handle >= CONFIG_BT_PER_ADV_SYNC_MAX) {
512 return NULL;
513 }
514
515 return &ll_sync_pool[handle];
516 }
517
ull_sync_is_enabled_get(uint16_t handle)518 struct ll_sync_set *ull_sync_is_enabled_get(uint16_t handle)
519 {
520 struct ll_sync_set *sync;
521
522 sync = ull_sync_set_get(handle);
523 if (!sync || !sync->timeout_reload) {
524 return NULL;
525 }
526
527 return sync;
528 }
529
ull_sync_is_valid_get(struct ll_sync_set * sync)530 struct ll_sync_set *ull_sync_is_valid_get(struct ll_sync_set *sync)
531 {
532 if (((uint8_t *)sync < (uint8_t *)ll_sync_pool) ||
533 ((uint8_t *)sync > ((uint8_t *)ll_sync_pool +
534 (sizeof(struct ll_sync_set) * (CONFIG_BT_PER_ADV_SYNC_MAX - 1))))) {
535 return NULL;
536 }
537
538 return sync;
539 }
540
ull_sync_lll_is_valid_get(struct lll_sync * lll)541 struct lll_sync *ull_sync_lll_is_valid_get(struct lll_sync *lll)
542 {
543 struct ll_sync_set *sync;
544
545 sync = HDR_LLL2ULL(lll);
546 sync = ull_sync_is_valid_get(sync);
547 if (sync) {
548 return &sync->lll;
549 }
550
551 return NULL;
552 }
553
ull_sync_handle_get(struct ll_sync_set * sync)554 uint16_t ull_sync_handle_get(struct ll_sync_set *sync)
555 {
556 return mem_index_get(sync, ll_sync_pool, sizeof(struct ll_sync_set));
557 }
558
ull_sync_lll_handle_get(struct lll_sync * lll)559 uint16_t ull_sync_lll_handle_get(struct lll_sync *lll)
560 {
561 return ull_sync_handle_get(HDR_LLL2ULL(lll));
562 }
563
ull_sync_release(struct ll_sync_set * sync)564 void ull_sync_release(struct ll_sync_set *sync)
565 {
566 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
567 struct lll_sync *lll = &sync->lll;
568
569 if (lll->node_cte_incomplete) {
570 const uint8_t release_cnt = 1U;
571 struct node_rx_pdu *node_rx;
572 memq_link_t *link;
573
574 node_rx = &lll->node_cte_incomplete->rx;
575 link = node_rx->hdr.link;
576
577 ll_rx_link_release(link);
578 ull_iq_report_link_inc_quota(release_cnt);
579 ull_df_iq_report_mem_release(node_rx);
580 ull_df_rx_iq_report_alloc(release_cnt);
581
582 lll->node_cte_incomplete = NULL;
583 }
584 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
585
586 /* Mark the sync context as sync create cancelled */
587 if (IS_ENABLED(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)) {
588 sync->timeout = 0U;
589 }
590
591 /* reset accumulated data len */
592 sync->data_len = 0U;
593
594 mem_release(sync, &sync_free);
595 }
596
ull_sync_setup_addr_check(struct ll_scan_set * scan,uint8_t addr_type,uint8_t * addr,uint8_t rl_idx)597 void ull_sync_setup_addr_check(struct ll_scan_set *scan, uint8_t addr_type,
598 uint8_t *addr, uint8_t rl_idx)
599 {
600 /* Check if Periodic Advertiser list to be used */
601 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST) &&
602 scan->periodic.filter_policy) {
603 /* Check in Periodic Advertiser List */
604 if (ull_filter_ull_pal_addr_match(addr_type, addr)) {
605 /* Remember the address, to check with
606 * SID in Sync Info
607 */
608 scan->periodic.adv_addr_type = addr_type;
609 (void)memcpy(scan->periodic.adv_addr, addr,
610 BDADDR_SIZE);
611
612 /* Address matched */
613 scan->periodic.state = LL_SYNC_STATE_ADDR_MATCH;
614
615 /* Check in Resolving List */
616 } else if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY) &&
617 ull_filter_ull_pal_listed(rl_idx, &addr_type,
618 scan->periodic.adv_addr)) {
619 /* Remember the address, to check with the
620 * SID in Sync Info
621 */
622 scan->periodic.adv_addr_type = addr_type;
623
624 /* Mark it as identity address from RPA (0x02, 0x03) */
625 scan->periodic.adv_addr_type += 2U;
626
627 /* Address matched */
628 scan->periodic.state = LL_SYNC_STATE_ADDR_MATCH;
629 }
630
631 /* Check with explicitly supplied address */
632 } else if ((addr_type == scan->periodic.adv_addr_type) &&
633 !memcmp(addr, scan->periodic.adv_addr, BDADDR_SIZE)) {
634 /* Address matched */
635 scan->periodic.state = LL_SYNC_STATE_ADDR_MATCH;
636
637 /* Check identity address with explicitly supplied address */
638 } else if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY) &&
639 (rl_idx < ll_rl_size_get())) {
640 ll_rl_id_addr_get(rl_idx, &addr_type, addr);
641 if ((addr_type == scan->periodic.adv_addr_type) &&
642 !memcmp(addr, scan->periodic.adv_addr, BDADDR_SIZE)) {
643 /* Mark it as identity address from RPA (0x02, 0x03) */
644 scan->periodic.adv_addr_type += 2U;
645
646 /* Identity address matched */
647 scan->periodic.state = LL_SYNC_STATE_ADDR_MATCH;
648 }
649 }
650 }
651
ull_sync_setup_sid_match(struct ll_scan_set * scan,uint8_t sid)652 bool ull_sync_setup_sid_match(struct ll_scan_set *scan, uint8_t sid)
653 {
654 return (scan->periodic.state == LL_SYNC_STATE_ADDR_MATCH) &&
655 ((IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC_ADV_LIST) &&
656 scan->periodic.filter_policy &&
657 ull_filter_ull_pal_match(scan->periodic.adv_addr_type,
658 scan->periodic.adv_addr, sid)) ||
659 (!scan->periodic.filter_policy &&
660 (sid == scan->periodic.sid)));
661 }
662
ull_sync_setup(struct ll_scan_set * scan,struct ll_scan_aux_set * aux,struct node_rx_pdu * node_rx,struct pdu_adv_sync_info * si)663 void ull_sync_setup(struct ll_scan_set *scan, struct ll_scan_aux_set *aux,
664 struct node_rx_pdu *node_rx, struct pdu_adv_sync_info *si)
665 {
666 uint32_t ticks_slot_overhead;
667 uint32_t ticks_slot_offset;
668 struct ll_sync_set *sync;
669 struct node_rx_sync *se;
670 struct node_rx_ftr *ftr;
671 uint32_t sync_offset_us;
672 uint32_t ready_delay_us;
673 struct node_rx_pdu *rx;
674 uint8_t *data_chan_map;
675 struct lll_sync *lll;
676 uint16_t sync_handle;
677 uint32_t interval_us;
678 uint32_t overhead_us;
679 struct pdu_adv *pdu;
680 uint16_t interval;
681 uint32_t slot_us;
682 uint8_t chm_last;
683 uint32_t ret;
684 uint8_t sca;
685
686 /* Populate the LLL context */
687 sync = scan->periodic.sync;
688 lll = &sync->lll;
689
690 /* Copy channel map from sca_chm field in sync_info structure, and
691 * clear the SCA bits.
692 */
693 chm_last = lll->chm_first;
694 lll->chm_last = chm_last;
695 data_chan_map = lll->chm[chm_last].data_chan_map;
696 (void)memcpy(data_chan_map, si->sca_chm,
697 sizeof(lll->chm[chm_last].data_chan_map));
698 data_chan_map[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &=
699 ~PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK;
700 lll->chm[chm_last].data_chan_count =
701 util_ones_count_get(data_chan_map,
702 sizeof(lll->chm[chm_last].data_chan_map));
703 if (lll->chm[chm_last].data_chan_count < CHM_USED_COUNT_MIN) {
704 /* Ignore sync setup, invalid available channel count */
705 return;
706 }
707
708 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC) || \
709 defined(CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT)
710 /* Remember the peer address.
711 * NOTE: Peer identity address is copied here when privacy is enable.
712 */
713 sync->peer_id_addr_type = scan->periodic.adv_addr_type & 0x01;
714 (void)memcpy(sync->peer_id_addr, scan->periodic.adv_addr,
715 sizeof(sync->peer_id_addr));
716 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC ||
717 * CONFIG_BT_CTLR_SYNC_PERIODIC_ADI_SUPPORT
718 */
719
720 memcpy(lll->access_addr, si->aa, sizeof(lll->access_addr));
721 lll->data_chan_id = lll_chan_id(lll->access_addr);
722 memcpy(lll->crc_init, si->crc_init, sizeof(lll->crc_init));
723 lll->event_counter = sys_le16_to_cpu(si->evt_cntr);
724 lll->phy = aux->lll.phy;
725
726 interval = sys_le16_to_cpu(si->interval);
727 interval_us = interval * PERIODIC_INT_UNIT_US;
728
729 /* Convert from 10ms units to interval units */
730 sync->timeout_reload = RADIO_SYNC_EVENTS((sync->timeout * 10U *
731 USEC_PER_MSEC), interval_us);
732
733 /* Adjust Skip value so that there is minimum of 6 events that can be
734 * listened to before Sync_Timeout occurs.
735 * The adjustment of the skip value is controller implementation
736 * specific and not specified by the Bluetooth Core Specification v5.3.
737 * The Controller `may` use the Skip value, and the implementation here
738 * covers a case where Skip value could lead to less events being
739 * listened to until Sync_Timeout. Listening to more consecutive events
740 * before Sync_Timeout increases probability of retaining the Periodic
741 * Synchronization.
742 */
743 if (sync->timeout_reload > CONN_ESTAB_COUNTDOWN) {
744 uint16_t skip_max = sync->timeout_reload - CONN_ESTAB_COUNTDOWN;
745
746 if (sync->skip > skip_max) {
747 sync->skip = skip_max;
748 }
749 } else {
750 sync->skip = 0U;
751 }
752
753 sync->sync_expire = CONN_ESTAB_COUNTDOWN;
754
755 /* Extract the SCA value from the sca_chm field of the sync_info
756 * structure.
757 */
758 sca = (si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &
759 PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK) >>
760 PDU_SYNC_INFO_SCA_CHM_SCA_BIT_POS;
761
762 #if defined(CONFIG_BT_CTLR_SYNC_ISO)
763 lll->sca = sca;
764 #endif /* CONFIG_BT_CTLR_SYNC_ISO */
765
766 lll->window_widening_periodic_us =
767 DIV_ROUND_UP(((lll_clock_ppm_local_get() +
768 lll_clock_ppm_get(sca)) *
769 interval_us), USEC_PER_SEC);
770 lll->window_widening_max_us = (interval_us >> 1) - EVENT_IFS_US;
771 if (PDU_ADV_SYNC_INFO_OFFS_UNITS_GET(si)) {
772 lll->window_size_event_us = OFFS_UNIT_300_US;
773 } else {
774 lll->window_size_event_us = OFFS_UNIT_30_US;
775 }
776
777 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
778 lll->node_cte_incomplete = NULL;
779 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
780
781 /* Set the state to sync create */
782 scan->periodic.state = LL_SYNC_STATE_CREATED;
783 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
784 struct ll_scan_set *scan_1m;
785
786 scan_1m = ull_scan_set_get(SCAN_HANDLE_1M);
787 if (scan == scan_1m) {
788 struct ll_scan_set *scan_coded;
789
790 scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
791 scan_coded->periodic.state = LL_SYNC_STATE_CREATED;
792 } else {
793 scan_1m->periodic.state = LL_SYNC_STATE_CREATED;
794 }
795 }
796
797 sync_handle = ull_sync_handle_get(sync);
798
799 /* Prepare sync notification, dispatch only on successful AUX_SYNC_IND
800 * reception.
801 */
802 rx = (void *)sync->node_rx_sync_estab;
803 rx->hdr.type = NODE_RX_TYPE_SYNC;
804 rx->hdr.handle = sync_handle;
805 rx->rx_ftr.param = scan;
806 se = (void *)rx->pdu;
807 se->interval = interval;
808 se->phy = lll->phy;
809 se->sca = sca;
810
811 /* Calculate offset and schedule sync radio events */
812 ftr = &node_rx->rx_ftr;
813 pdu = (void *)((struct node_rx_pdu *)node_rx)->pdu;
814
815 ready_delay_us = lll_radio_rx_ready_delay_get(lll->phy, 1);
816
817 sync_offset_us = ftr->radio_end_us;
818 sync_offset_us += PDU_ADV_SYNC_INFO_OFFSET_GET(si) *
819 lll->window_size_event_us;
820 /* offs_adjust may be 1 only if sync setup by LL_PERIODIC_SYNC_IND */
821 sync_offset_us += (PDU_ADV_SYNC_INFO_OFFS_ADJUST_GET(si) ? OFFS_ADJUST_US : 0U);
822 sync_offset_us -= PDU_AC_US(pdu->len, lll->phy, ftr->phy_flags);
823 sync_offset_us -= EVENT_TICKER_RES_MARGIN_US;
824 sync_offset_us -= EVENT_JITTER_US;
825 sync_offset_us -= ready_delay_us;
826
827 /* Minimum prepare tick offset + minimum preempt tick offset are the
828 * overheads before ULL scheduling can setup radio for reception
829 */
830 overhead_us = HAL_TICKER_TICKS_TO_US(HAL_TICKER_CNTR_CMP_OFFSET_MIN << 1);
831
832 /* CPU execution overhead to setup the radio for reception */
833 overhead_us += EVENT_OVERHEAD_END_US + EVENT_OVERHEAD_START_US;
834
835 /* If not sufficient CPU processing time, skip to receiving next
836 * event.
837 */
838 if ((sync_offset_us - ftr->radio_end_us) < overhead_us) {
839 sync_offset_us += interval_us;
840 lll->event_counter++;
841 }
842
843 interval_us -= lll->window_widening_periodic_us;
844
845 /* Calculate event time reservation */
846 slot_us = PDU_AC_MAX_US(PDU_AC_EXT_PAYLOAD_RX_SIZE, lll->phy);
847 slot_us += ready_delay_us;
848
849 /* Add implementation defined radio event overheads */
850 if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX)) {
851 slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
852 }
853
854 /* TODO: active_to_start feature port */
855 sync->ull.ticks_active_to_start = 0U;
856 sync->ull.ticks_prepare_to_start =
857 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
858 sync->ull.ticks_preempt_to_start =
859 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
860 sync->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
861
862 ticks_slot_offset = MAX(sync->ull.ticks_active_to_start,
863 sync->ull.ticks_prepare_to_start);
864 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
865 ticks_slot_overhead = ticks_slot_offset;
866 } else {
867 ticks_slot_overhead = 0U;
868 }
869 ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
870
871 sync->lll_sync_prepare = lll_sync_create_prepare;
872
873 ret = ticker_start(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
874 (TICKER_ID_SCAN_SYNC_BASE + sync_handle),
875 ftr->ticks_anchor - ticks_slot_offset,
876 HAL_TICKER_US_TO_TICKS(sync_offset_us),
877 HAL_TICKER_US_TO_TICKS(interval_us),
878 HAL_TICKER_REMAINDER(interval_us),
879 TICKER_NULL_LAZY,
880 (sync->ull.ticks_slot + ticks_slot_overhead),
881 ticker_cb, sync,
882 ticker_start_op_cb, (void *)__LINE__);
883 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
884 (ret == TICKER_STATUS_BUSY));
885 }
886
ull_sync_setup_reset(struct ll_scan_set * scan)887 void ull_sync_setup_reset(struct ll_scan_set *scan)
888 {
889 /* Remove the sync context from being associated with scan contexts */
890 scan->periodic.sync = NULL;
891
892 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
893 scan->lll.is_sync = 0U;
894 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
895
896 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
897 struct ll_scan_set *scan_1m;
898
899 scan_1m = ull_scan_set_get(SCAN_HANDLE_1M);
900 if (scan == scan_1m) {
901 scan = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
902 } else {
903 scan = scan_1m;
904 }
905
906 scan->periodic.sync = NULL;
907
908 #if defined(CONFIG_BT_CTLR_FILTER_ACCEPT_LIST)
909 scan->lll.is_sync = 0U;
910 #endif /* CONFIG_BT_CTLR_FILTER_ACCEPT_LIST */
911 }
912 }
913
ull_sync_established_report(memq_link_t * link,struct node_rx_pdu * rx)914 void ull_sync_established_report(memq_link_t *link, struct node_rx_pdu *rx)
915 {
916 struct node_rx_pdu *rx_establ;
917 struct ll_sync_set *sync;
918 struct node_rx_ftr *ftr;
919 struct node_rx_sync *se;
920 struct lll_sync *lll;
921
922 ftr = &rx->rx_ftr;
923 lll = ftr->param;
924 sync = HDR_LLL2ULL(lll);
925
926 /* Do nothing if sync is cancelled or lost. */
927 if (unlikely(sync->is_stop || !sync->timeout_reload)) {
928 return;
929 }
930
931 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
932 enum sync_status sync_status;
933
934 #if defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
935 sync_status = ftr->sync_status;
936 #else
937 struct pdu_cte_info *rx_cte_info;
938
939 rx_cte_info = pdu_cte_info_get((struct pdu_adv *)rx->pdu);
940 if (rx_cte_info != NULL) {
941 sync_status = lll_sync_cte_is_allowed(lll->cte_type, lll->filter_policy,
942 rx_cte_info->time, rx_cte_info->type);
943 } else {
944 sync_status = lll_sync_cte_is_allowed(lll->cte_type, lll->filter_policy, 0,
945 BT_HCI_LE_NO_CTE);
946 }
947
948 /* If there is no CTEInline support, notify done event handler to terminate periodic
949 * advertising sync in case the CTE is not allowed.
950 * If the periodic filtering list is not used then terminate synchronization and notify
951 * host. If the periodic filtering list is used then stop synchronization with this
952 * particular periodic advertised but continue to search for other one.
953 */
954 sync->is_term = ((sync_status == SYNC_STAT_TERM) || (sync_status == SYNC_STAT_CONT_SCAN));
955 #endif /* CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
956
957 /* Send periodic advertisement sync established report when sync has correct CTE type
958 * or the CTE type is incorrect and filter policy doesn't allow to continue scanning.
959 */
960 if (sync_status == SYNC_STAT_ALLOWED || sync_status == SYNC_STAT_TERM) {
961 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
962
963 if (1) {
964 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
965
966 /* Prepare and dispatch sync notification */
967 rx_establ = sync->node_rx_sync_estab;
968 rx_establ->hdr.type = NODE_RX_TYPE_SYNC;
969 rx_establ->hdr.handle = ull_sync_handle_get(sync);
970 se = (void *)rx_establ->pdu;
971 /* Clear the node to mark the sync establish as being completed.
972 * In this case the completion reason is sync being established.
973 */
974 sync->node_rx_sync_estab = NULL;
975
976 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
977 se->status = (ftr->sync_status == SYNC_STAT_TERM) ?
978 BT_HCI_ERR_UNSUPP_REMOTE_FEATURE :
979 BT_HCI_ERR_SUCCESS;
980 #else
981 se->status = BT_HCI_ERR_SUCCESS;
982 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
983
984 /* NOTE: footer param has already been populated during sync
985 * setup.
986 */
987
988 ll_rx_put_sched(rx_establ->hdr.link, rx_establ);
989 }
990
991 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
992 /* Handle periodic advertising PDU and send periodic advertising scan report when
993 * the sync was found or was established in the past. The report is not send if
994 * scanning is terminated due to wrong CTE type.
995 */
996 if (sync_status == SYNC_STAT_ALLOWED || sync_status == SYNC_STAT_READY) {
997 #else /* !CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
998
999 if (1) {
1000 #endif /* !CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1001
1002 /* Switch sync event prepare function to one responsible for regular PDUs receive */
1003 sync->lll_sync_prepare = lll_sync_prepare;
1004
1005 /* Change node type to appropriately handle periodic
1006 * advertising PDU report.
1007 */
1008 rx->hdr.type = NODE_RX_TYPE_SYNC_REPORT;
1009 ull_scan_aux_setup(link, rx);
1010 } else {
1011 rx->hdr.type = NODE_RX_TYPE_RELEASE;
1012 ll_rx_put_sched(link, rx);
1013 }
1014 }
1015
1016 void ull_sync_done(struct node_rx_event_done *done)
1017 {
1018 uint32_t ticks_drift_minus;
1019 uint32_t ticks_drift_plus;
1020 struct ll_sync_set *sync;
1021 uint16_t elapsed_event;
1022 uint16_t skip_event;
1023 uint16_t lazy;
1024 uint8_t force;
1025
1026 /* Get reference to ULL context */
1027 sync = CONTAINER_OF(done->param, struct ll_sync_set, ull);
1028
1029 /* Do nothing if local terminate requested or sync lost */
1030 if (unlikely(sync->is_stop || !sync->timeout_reload)) {
1031 return;
1032 }
1033
1034 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
1035 #if defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1036 if (done->extra.sync_term) {
1037 #else
1038 if (sync->is_term) {
1039 #endif /* CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1040 /* In case the periodic advertising list filtering is not used the synchronization
1041 * must be terminated and host notification must be send.
1042 * In case the periodic advertising list filtering is used the synchronization with
1043 * this particular periodic advertiser but search for other one from the list.
1044 *
1045 * Stop periodic advertising sync ticker and clear variables informing the
1046 * sync is pending. That is a step to completely terminate the synchronization.
1047 * In case search for another periodic advertiser it allows to setup new ticker for
1048 * that.
1049 */
1050 sync_ticker_cleanup(sync, NULL);
1051 } else
1052 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
1053 {
1054 struct lll_sync *lll;
1055
1056 lll = &sync->lll;
1057
1058 /* Events elapsed used in timeout checks below */
1059 skip_event = lll->skip_event;
1060 if (lll->skip_prepare) {
1061 elapsed_event = skip_event + lll->skip_prepare;
1062 } else {
1063 elapsed_event = skip_event + 1U;
1064 }
1065
1066 /* Sync drift compensation and new skip calculation */
1067 ticks_drift_plus = 0U;
1068 ticks_drift_minus = 0U;
1069 if (done->extra.trx_cnt) {
1070 /* Calculate drift in ticks unit */
1071 ull_drift_ticks_get(done, &ticks_drift_plus, &ticks_drift_minus);
1072
1073 /* Enforce skip */
1074 lll->skip_event = sync->skip;
1075
1076 /* Reset failed to establish sync countdown */
1077 sync->sync_expire = 0U;
1078 }
1079
1080 /* Reset supervision countdown */
1081 if (done->extra.crc_valid) {
1082 sync->timeout_expire = 0U;
1083 }
1084
1085 /* check sync failed to establish */
1086 else if (sync->sync_expire) {
1087 if (sync->sync_expire > elapsed_event) {
1088 sync->sync_expire -= elapsed_event;
1089 } else {
1090 sync_ticker_cleanup(sync, ticker_stop_sync_expire_op_cb);
1091
1092 return;
1093 }
1094 }
1095
1096 /* If anchor point not sync-ed, start timeout countdown, and break skip if any */
1097 else if (!sync->timeout_expire) {
1098 sync->timeout_expire = sync->timeout_reload;
1099 }
1100
1101 /* check timeout */
1102 force = 0U;
1103 if (sync->timeout_expire) {
1104 if (sync->timeout_expire > elapsed_event) {
1105 sync->timeout_expire -= elapsed_event;
1106
1107 /* break skip */
1108 lll->skip_event = 0U;
1109
1110 if (skip_event) {
1111 force = 1U;
1112 }
1113 } else {
1114 sync_ticker_cleanup(sync, ticker_stop_sync_lost_op_cb);
1115
1116 return;
1117 }
1118 }
1119
1120 /* Check if skip needs update */
1121 lazy = 0U;
1122 if ((force) || (skip_event != lll->skip_event)) {
1123 lazy = lll->skip_event + 1U;
1124 }
1125
1126 /* Update Sync ticker instance */
1127 if (ticks_drift_plus || ticks_drift_minus || lazy || force) {
1128 uint16_t sync_handle = ull_sync_handle_get(sync);
1129 uint32_t ticker_status;
1130
1131 /* Call to ticker_update can fail under the race
1132 * condition where in the periodic sync role is being
1133 * stopped but at the same time it is preempted by
1134 * periodic sync event that gets into close state.
1135 * Accept failure when periodic sync role is being
1136 * stopped.
1137 */
1138 ticker_status =
1139 ticker_update(TICKER_INSTANCE_ID_CTLR,
1140 TICKER_USER_ID_ULL_HIGH,
1141 (TICKER_ID_SCAN_SYNC_BASE +
1142 sync_handle),
1143 ticks_drift_plus,
1144 ticks_drift_minus, 0, 0,
1145 lazy, force,
1146 ticker_update_op_cb, sync);
1147 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
1148 (ticker_status == TICKER_STATUS_BUSY) ||
1149 ((void *)sync == ull_disable_mark_get()));
1150 }
1151 }
1152 }
1153
1154 void ull_sync_chm_update(uint8_t sync_handle, uint8_t *acad, uint8_t acad_len)
1155 {
1156 struct pdu_adv_sync_chm_upd_ind *chm_upd_ind;
1157 struct ll_sync_set *sync;
1158 struct lll_sync *lll;
1159 uint8_t chm_last;
1160 uint16_t ad_len;
1161
1162 /* Get reference to LLL context */
1163 sync = ull_sync_set_get(sync_handle);
1164 LL_ASSERT(sync);
1165 lll = &sync->lll;
1166
1167 /* Ignore if already in progress */
1168 if (lll->chm_last != lll->chm_first) {
1169 return;
1170 }
1171
1172 /* Find the Channel Map Update Indication */
1173 do {
1174 /* Pick the length and find the Channel Map Update Indication */
1175 ad_len = acad[PDU_ADV_DATA_HEADER_LEN_OFFSET];
1176 if (ad_len &&
1177 (acad[PDU_ADV_DATA_HEADER_TYPE_OFFSET] ==
1178 PDU_ADV_DATA_TYPE_CHANNEL_MAP_UPDATE_IND)) {
1179 break;
1180 }
1181
1182 /* Add length field size */
1183 ad_len += 1U;
1184 if (ad_len < acad_len) {
1185 acad_len -= ad_len;
1186 } else {
1187 return;
1188 }
1189
1190 /* Move to next AD data */
1191 acad += ad_len;
1192 } while (acad_len);
1193
1194 /* Validate the size of the Channel Map Update Indication */
1195 if (ad_len != (sizeof(*chm_upd_ind) + 1U)) {
1196 return;
1197 }
1198
1199 /* Pick the parameters into the procedure context */
1200 chm_last = lll->chm_last + 1U;
1201 if (chm_last == DOUBLE_BUFFER_SIZE) {
1202 chm_last = 0U;
1203 }
1204
1205 chm_upd_ind = (void *)&acad[PDU_ADV_DATA_HEADER_DATA_OFFSET];
1206 (void)memcpy(lll->chm[chm_last].data_chan_map, chm_upd_ind->chm,
1207 sizeof(lll->chm[chm_last].data_chan_map));
1208 lll->chm[chm_last].data_chan_count =
1209 util_ones_count_get(lll->chm[chm_last].data_chan_map,
1210 sizeof(lll->chm[chm_last].data_chan_map));
1211 if (lll->chm[chm_last].data_chan_count < CHM_USED_COUNT_MIN) {
1212 /* Ignore channel map, invalid available channel count */
1213 return;
1214 }
1215
1216 lll->chm_instant = sys_le16_to_cpu(chm_upd_ind->instant);
1217
1218 /* Set Channel Map Update Procedure in progress */
1219 lll->chm_last = chm_last;
1220 }
1221
1222 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1223 /* @brief Function updates periodic sync slot duration.
1224 *
1225 * @param[in] sync Pointer to sync instance
1226 * @param[in] slot_plus_us Number of microsecond to add to ticker slot
1227 * @param[in] slot_minus_us Number of microsecond to subtracks from ticker slot
1228 *
1229 * @retval 0 Successful ticker slot update.
1230 * @retval -ENOENT Ticker node related with provided sync is already stopped.
1231 * @retval -ENOMEM Couldn't enqueue update ticker job.
1232 * @retval -EFAULT Somethin else went wrong.
1233 */
1234 int ull_sync_slot_update(struct ll_sync_set *sync, uint32_t slot_plus_us,
1235 uint32_t slot_minus_us)
1236 {
1237 uint32_t volatile ret_cb;
1238 uint32_t ret;
1239
1240 ret_cb = TICKER_STATUS_BUSY;
1241 ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
1242 TICKER_USER_ID_THREAD,
1243 (TICKER_ID_SCAN_SYNC_BASE +
1244 ull_sync_handle_get(sync)),
1245 0, 0,
1246 HAL_TICKER_US_TO_TICKS(slot_plus_us),
1247 HAL_TICKER_US_TO_TICKS(slot_minus_us),
1248 0, 0,
1249 ticker_update_op_status_give,
1250 (void *)&ret_cb);
1251 if (ret == TICKER_STATUS_BUSY || ret == TICKER_STATUS_SUCCESS) {
1252 /* Wait for callback or clear semaphore is callback was already
1253 * executed.
1254 */
1255 k_sem_take(&sem_ticker_cb, K_FOREVER);
1256
1257 if (ret_cb == TICKER_STATUS_FAILURE) {
1258 return -EFAULT; /* Something went wrong */
1259 } else {
1260 return 0;
1261 }
1262 } else {
1263 if (ret_cb != TICKER_STATUS_BUSY) {
1264 /* Ticker callback was executed and job enqueue was successful.
1265 * Call k_sem_take to clear ticker callback semaphore.
1266 */
1267 k_sem_take(&sem_ticker_cb, K_FOREVER);
1268 }
1269 /* Ticker was already stopped or job was not enqueued. */
1270 return (ret_cb == TICKER_STATUS_FAILURE) ? -ENOENT : -ENOMEM;
1271 }
1272 }
1273 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1274
1275 static int init_reset(void)
1276 {
1277 /* Initialize sync pool. */
1278 mem_init(ll_sync_pool, sizeof(struct ll_sync_set),
1279 sizeof(ll_sync_pool) / sizeof(struct ll_sync_set),
1280 &sync_free);
1281
1282 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1283 k_sem_init(&sem_ticker_cb, 0, 1);
1284 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1285
1286 return 0;
1287 }
1288
1289 static inline struct ll_sync_set *sync_acquire(void)
1290 {
1291 return mem_acquire(&sync_free);
1292 }
1293
1294 static void sync_ticker_cleanup(struct ll_sync_set *sync, ticker_op_func stop_op_cb)
1295 {
1296 uint16_t sync_handle = ull_sync_handle_get(sync);
1297 uint32_t ret;
1298
1299 /* Stop Periodic Sync Ticker */
1300 ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
1301 TICKER_ID_SCAN_SYNC_BASE + sync_handle, stop_op_cb, (void *)sync);
1302 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1303 (ret == TICKER_STATUS_BUSY));
1304
1305 /* Mark sync context not sync established */
1306 sync->timeout_reload = 0U;
1307 }
1308
1309 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
1310 uint32_t remainder, uint16_t lazy, uint8_t force,
1311 void *param)
1312 {
1313 static memq_link_t link_lll_prepare;
1314 static struct mayfly mfy_lll_prepare = {
1315 0, 0, &link_lll_prepare, NULL, NULL};
1316 static struct lll_prepare_param p;
1317 struct ll_sync_set *sync = param;
1318 struct lll_sync *lll;
1319 uint32_t ret;
1320 uint8_t ref;
1321
1322 DEBUG_RADIO_PREPARE_O(1);
1323
1324 lll = &sync->lll;
1325
1326 /* Commit receive enable changed value */
1327 lll->is_rx_enabled = sync->rx_enable;
1328
1329 /* Increment prepare reference count */
1330 ref = ull_ref_inc(&sync->ull);
1331 LL_ASSERT(ref);
1332
1333 /* Append timing parameters */
1334 p.ticks_at_expire = ticks_at_expire;
1335 p.remainder = remainder;
1336 p.lazy = lazy;
1337 p.force = force;
1338 p.param = lll;
1339 mfy_lll_prepare.param = &p;
1340 mfy_lll_prepare.fp = sync->lll_sync_prepare;
1341
1342 /* Kick LLL prepare */
1343 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0,
1344 &mfy_lll_prepare);
1345 LL_ASSERT(!ret);
1346
1347 DEBUG_RADIO_PREPARE_O(1);
1348 }
1349
1350 static void ticker_start_op_cb(uint32_t status, void *param)
1351 {
1352 ARG_UNUSED(param);
1353 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1354 }
1355
1356 static void ticker_update_op_cb(uint32_t status, void *param)
1357 {
1358 LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
1359 param == ull_disable_mark_get());
1360 }
1361
1362 static void ticker_stop_sync_expire_op_cb(uint32_t status, void *param)
1363 {
1364 uint32_t retval;
1365 static memq_link_t link;
1366 static struct mayfly mfy = {0, 0, &link, NULL, sync_expire};
1367
1368 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1369
1370 mfy.param = param;
1371
1372 retval = mayfly_enqueue(TICKER_USER_ID_ULL_LOW, TICKER_USER_ID_ULL_HIGH,
1373 0, &mfy);
1374 LL_ASSERT(!retval);
1375 }
1376
1377 static void sync_expire(void *param)
1378 {
1379 struct ll_sync_set *sync = param;
1380 struct node_rx_sync *se;
1381 struct node_rx_pdu *rx;
1382
1383 /* Generate Periodic advertising sync failed to establish */
1384 rx = (void *)sync->node_rx_sync_estab;
1385 rx->hdr.type = NODE_RX_TYPE_SYNC;
1386 rx->hdr.handle = LLL_HANDLE_INVALID;
1387
1388 /* Clear the node to mark the sync establish as being completed.
1389 * In this case the completion reason is sync expire.
1390 */
1391 sync->node_rx_sync_estab = NULL;
1392
1393 /* NOTE: struct node_rx_sync_estab has uint8_t member following the
1394 * struct node_rx_hdr to store the reason.
1395 */
1396 se = (void *)rx->pdu;
1397 se->status = BT_HCI_ERR_CONN_FAIL_TO_ESTAB;
1398
1399 /* NOTE: footer param has already been populated during sync setup */
1400
1401 /* Enqueue the sync failed to established towards ULL context */
1402 ll_rx_put_sched(rx->hdr.link, rx);
1403 }
1404
1405 static void ticker_stop_sync_lost_op_cb(uint32_t status, void *param)
1406 {
1407 uint32_t retval;
1408 static memq_link_t link;
1409 static struct mayfly mfy = {0, 0, &link, NULL, sync_lost};
1410
1411 /* When in race between terminate requested in thread context and
1412 * sync lost scenario, do not generate the sync lost node rx from here
1413 */
1414 if (status != TICKER_STATUS_SUCCESS) {
1415 LL_ASSERT(param == ull_disable_mark_get());
1416
1417 return;
1418 }
1419
1420 mfy.param = param;
1421
1422 retval = mayfly_enqueue(TICKER_USER_ID_ULL_LOW, TICKER_USER_ID_ULL_HIGH,
1423 0, &mfy);
1424 LL_ASSERT(!retval);
1425 }
1426
1427 static void sync_lost(void *param)
1428 {
1429 struct ll_sync_set *sync;
1430 struct node_rx_pdu *rx;
1431
1432 /* sync established was not generated yet, no free node rx */
1433 sync = param;
1434 if (sync->lll_sync_prepare != lll_sync_prepare) {
1435 sync_expire(param);
1436
1437 return;
1438 }
1439
1440 /* Generate Periodic advertising sync lost */
1441 rx = (void *)&sync->node_rx_lost;
1442 rx->hdr.handle = ull_sync_handle_get(sync);
1443 rx->hdr.type = NODE_RX_TYPE_SYNC_LOST;
1444 rx->rx_ftr.param = sync;
1445
1446 /* Enqueue the sync lost towards ULL context */
1447 ll_rx_put_sched(rx->hdr.link, rx);
1448 }
1449
1450 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)
1451 static struct ll_sync_set *sync_is_create_get(uint16_t handle)
1452 {
1453 struct ll_sync_set *sync;
1454
1455 sync = ull_sync_set_get(handle);
1456 if (!sync || !sync->timeout) {
1457 return NULL;
1458 }
1459
1460 return sync;
1461 }
1462
1463 static bool peer_sid_sync_exists(uint8_t const peer_id_addr_type,
1464 uint8_t const *const peer_id_addr,
1465 uint8_t sid)
1466 {
1467 uint16_t handle;
1468
1469 for (handle = 0U; handle < CONFIG_BT_PER_ADV_SYNC_MAX; handle++) {
1470 struct ll_sync_set *sync = sync_is_create_get(handle);
1471
1472 if (sync &&
1473 (sync->peer_id_addr_type == peer_id_addr_type) &&
1474 !memcmp(sync->peer_id_addr, peer_id_addr, BDADDR_SIZE) &&
1475 (sync->sid == sid)) {
1476 return true;
1477 }
1478 }
1479
1480 return false;
1481 }
1482 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC */
1483
1484 #if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
1485 static void ticker_update_op_status_give(uint32_t status, void *param)
1486 {
1487 *((uint32_t volatile *)param) = status;
1488
1489 k_sem_give(&sem_ticker_cb);
1490 }
1491 #endif /* CONFIG_BT_CTLR_DF_SCAN_CTE_RX */
1492
1493 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING) && \
1494 !defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
1495 static struct pdu_cte_info *pdu_cte_info_get(struct pdu_adv *pdu)
1496 {
1497 struct pdu_adv_com_ext_adv *com_hdr;
1498 struct pdu_adv_ext_hdr *hdr;
1499
1500 com_hdr = &pdu->adv_ext_ind;
1501 hdr = &com_hdr->ext_hdr;
1502
1503 if (!com_hdr->ext_hdr_len || (com_hdr->ext_hdr_len != 0 && !hdr->cte_info)) {
1504 return NULL;
1505 }
1506
1507 /* Make sure there are no fields that are not allowed for AUX_SYNC_IND and AUX_CHAIN_IND */
1508 LL_ASSERT(!hdr->adv_addr);
1509 LL_ASSERT(!hdr->tgt_addr);
1510
1511 return (struct pdu_cte_info *)hdr->data;
1512 }
1513 #endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && !CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
1514