1 /*
2 * Copyright (c) 2020 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/sys/byteorder.h>
9 #include <zephyr/bluetooth/hci_types.h>
10
11 #include "util/util.h"
12 #include "util/mem.h"
13 #include "util/memq.h"
14 #include "util/mayfly.h"
15 #include "util/dbuf.h"
16
17 #include "hal/ccm.h"
18 #include "hal/radio.h"
19 #include "hal/ticker.h"
20
21 #include "ticker/ticker.h"
22
23 #include "pdu_df.h"
24 #include "lll/pdu_vendor.h"
25 #include "pdu.h"
26
27 #include "lll.h"
28 #include "lll/lll_vendor.h"
29 #include "lll_clock.h"
30 #include "lll_scan.h"
31 #include "lll/lll_df_types.h"
32 #include "lll_sync.h"
33 #include "lll_sync_iso.h"
34
35 #include "isoal.h"
36
37 #include "ull_scan_types.h"
38 #include "ull_sync_types.h"
39 #include "ull_iso_types.h"
40
41 #include "ull_internal.h"
42 #include "ull_scan_internal.h"
43 #include "ull_sync_internal.h"
44 #include "ull_iso_internal.h"
45 #include "ull_sync_iso_internal.h"
46
47 #include "ll.h"
48
49 #include "bt_crypto.h"
50
51 #include "hal/debug.h"
52
53 static int init_reset(void);
54 static struct ll_sync_iso_set *sync_iso_get(uint8_t handle);
55 static struct ll_sync_iso_set *sync_iso_alloc(uint8_t handle);
56 static uint8_t sync_iso_handle_get(struct ll_sync_iso_set *sync);
57 static uint8_t sync_iso_handle_to_index(uint8_t handle);
58 static struct stream *sync_iso_stream_acquire(void);
59 static uint16_t sync_iso_stream_handle_get(struct lll_sync_iso_stream *stream);
60 static void timeout_cleanup(struct ll_sync_iso_set *sync_iso);
61 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
62 uint32_t remainder, uint16_t lazy, uint8_t force,
63 void *param);
64 static void ticker_start_op_cb(uint32_t status, void *param);
65 static void ticker_update_op_cb(uint32_t status, void *param);
66 static void ticker_stop_op_cb(uint32_t status, void *param);
67 static void sync_iso_disable(void *param);
68 static void disabled_cb(void *param);
69
70 static memq_link_t link_lll_prepare;
71 static struct mayfly mfy_lll_prepare = {0U, 0U, &link_lll_prepare, NULL, NULL};
72
73 static struct ll_sync_iso_set ll_sync_iso[CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET];
74 static struct lll_sync_iso_stream
75 stream_pool[CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT];
76 static struct ll_iso_rx_test_mode
77 test_mode[CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT];
78 static void *stream_free;
79
ll_big_sync_create(uint8_t big_handle,uint16_t sync_handle,uint8_t encryption,uint8_t * bcode,uint8_t mse,uint16_t sync_timeout,uint8_t num_bis,uint8_t * bis)80 uint8_t ll_big_sync_create(uint8_t big_handle, uint16_t sync_handle,
81 uint8_t encryption, uint8_t *bcode, uint8_t mse,
82 uint16_t sync_timeout, uint8_t num_bis,
83 uint8_t *bis)
84 {
85 struct ll_sync_iso_set *sync_iso;
86 memq_link_t *link_sync_estab;
87 memq_link_t *link_sync_lost;
88 struct node_rx_pdu *node_rx;
89 struct ll_sync_set *sync;
90 struct lll_sync_iso *lll;
91 int8_t last_index;
92
93 sync = ull_sync_is_enabled_get(sync_handle);
94 if (!sync) {
95 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
96 }
97
98 if (sync->iso.sync_iso) {
99 return BT_HCI_ERR_CMD_DISALLOWED;
100 }
101
102 sync_iso = sync_iso_get(big_handle);
103 if (sync_iso) {
104 /* BIG handle already in use */
105 return BT_HCI_ERR_CMD_DISALLOWED;
106 }
107
108 sync_iso = sync_iso_alloc(big_handle);
109 if (!sync_iso) {
110 return BT_HCI_ERR_INSUFFICIENT_RESOURCES;
111 }
112
113 /* TODO: Check remaining parameters */
114
115 /* Check BIS indices */
116 last_index = -1;
117 for (uint8_t i = 0U; i < num_bis; i++) {
118 /* Stream index must be in valid range and in ascending order */
119 if (!IN_RANGE(bis[i], 0x01, 0x1F) || (bis[i] <= last_index)) {
120 return BT_HCI_ERR_INVALID_PARAM;
121
122 } else if (bis[i] > sync->num_bis) {
123 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
124 }
125 last_index = bis[i];
126 }
127
128 /* Check if encryption supported */
129 if (!IS_ENABLED(CONFIG_BT_CTLR_BROADCAST_ISO_ENC) &&
130 encryption) {
131 return BT_HCI_ERR_CMD_DISALLOWED;
132 };
133
134 /* Check if requested encryption matches */
135 if (encryption != sync->enc) {
136 return BT_HCI_ERR_ENC_MODE_NOT_ACCEPTABLE;
137 }
138
139 /* Check if free BISes available */
140 if (mem_free_count_get(stream_free) < num_bis) {
141 return BT_HCI_ERR_INSUFFICIENT_RESOURCES;
142 }
143
144 link_sync_estab = ll_rx_link_alloc();
145 if (!link_sync_estab) {
146 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
147 }
148
149 link_sync_lost = ll_rx_link_alloc();
150 if (!link_sync_lost) {
151 ll_rx_link_release(link_sync_estab);
152
153 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
154 }
155
156 node_rx = ll_rx_alloc();
157 if (!node_rx) {
158 ll_rx_link_release(link_sync_lost);
159 ll_rx_link_release(link_sync_estab);
160
161 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
162 }
163
164 /* Initialize the ISO sync ULL context */
165 sync_iso->sync = sync;
166 sync_iso->timeout = sync_timeout;
167 sync_iso->timeout_reload = 0U;
168 sync_iso->timeout_expire = 0U;
169
170 /* Setup the periodic sync to establish ISO sync */
171 node_rx->hdr.link = link_sync_estab;
172 sync->iso.node_rx_estab = node_rx;
173 sync_iso->node_rx_lost.rx.hdr.link = link_sync_lost;
174
175 /* Initialize sync LLL context */
176 lll = &sync_iso->lll;
177 lll->latency_prepare = 0U;
178 lll->latency_event = 0U;
179 lll->window_widening_prepare_us = 0U;
180 lll->window_widening_event_us = 0U;
181 lll->ctrl = 0U;
182 lll->cssn_curr = 0U;
183 lll->cssn_next = 0U;
184 lll->term_reason = 0U;
185
186 if (IS_ENABLED(CONFIG_BT_CTLR_BROADCAST_ISO_ENC) && encryption) {
187 const uint8_t BIG1[16] = {0x31, 0x47, 0x49, 0x42, };
188 const uint8_t BIG2[4] = {0x32, 0x47, 0x49, 0x42};
189 uint8_t igltk[16];
190 int err;
191
192 /* Calculate GLTK */
193 err = bt_crypto_h7(BIG1, bcode, igltk);
194 LL_ASSERT(!err);
195 err = bt_crypto_h6(igltk, BIG2, sync_iso->gltk);
196 LL_ASSERT(!err);
197
198 lll->enc = 1U;
199 } else {
200 lll->enc = 0U;
201 }
202
203 /* TODO: Implement usage of MSE to limit listening to subevents */
204
205 /* Allocate streams */
206 lll->stream_count = num_bis;
207 for (uint8_t i = 0U; i < num_bis; i++) {
208 struct lll_sync_iso_stream *stream;
209
210 stream = (void *)sync_iso_stream_acquire();
211 stream->big_handle = big_handle;
212 stream->bis_index = bis[i];
213 stream->dp = NULL;
214 stream->test_mode = &test_mode[i];
215 memset(stream->test_mode, 0, sizeof(struct ll_iso_rx_test_mode));
216 lll->stream_handle[i] = sync_iso_stream_handle_get(stream);
217 }
218
219 /* Initialize ULL and LLL headers */
220 ull_hdr_init(&sync_iso->ull);
221 lll_hdr_init(lll, sync_iso);
222
223 /* Enable periodic advertising to establish ISO sync */
224 sync->iso.sync_iso = sync_iso;
225
226 return BT_HCI_ERR_SUCCESS;
227 }
228
ll_big_sync_terminate(uint8_t big_handle,void ** rx)229 uint8_t ll_big_sync_terminate(uint8_t big_handle, void **rx)
230 {
231 struct ll_sync_iso_set *sync_iso;
232 memq_link_t *link_sync_estab;
233 struct node_rx_pdu *node_rx;
234 memq_link_t *link_sync_lost;
235 struct ll_sync_set *sync;
236 int err;
237
238 sync_iso = sync_iso_get(big_handle);
239 if (!sync_iso) {
240 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
241 }
242
243 sync = sync_iso->sync;
244 if (sync && sync->iso.sync_iso) {
245 struct node_rx_sync_iso *se;
246
247 if (sync->iso.sync_iso != sync_iso) {
248 return BT_HCI_ERR_CMD_DISALLOWED;
249 }
250 sync->iso.sync_iso = NULL;
251
252 node_rx = sync->iso.node_rx_estab;
253 link_sync_estab = node_rx->hdr.link;
254 link_sync_lost = sync_iso->node_rx_lost.rx.hdr.link;
255
256 ll_rx_link_release(link_sync_lost);
257 ll_rx_link_release(link_sync_estab);
258 ll_rx_release(node_rx);
259
260 node_rx = (void *)&sync_iso->node_rx_lost;
261 node_rx->hdr.type = NODE_RX_TYPE_SYNC_ISO;
262 node_rx->hdr.handle = big_handle;
263
264 /* NOTE: Since NODE_RX_TYPE_SYNC_ISO is only generated from ULL
265 * context, pass ULL context as parameter.
266 */
267 node_rx->rx_ftr.param = sync_iso;
268
269 /* NOTE: struct node_rx_lost has uint8_t member store the reason.
270 */
271 se = (void *)node_rx->pdu;
272 se->status = BT_HCI_ERR_OP_CANCELLED_BY_HOST;
273
274 *rx = node_rx;
275
276 return BT_HCI_ERR_SUCCESS;
277 }
278
279 err = ull_ticker_stop_with_mark((TICKER_ID_SCAN_SYNC_ISO_BASE +
280 sync_iso_handle_to_index(big_handle)),
281 sync_iso, &sync_iso->lll);
282 LL_ASSERT_INFO2(err == 0 || err == -EALREADY, big_handle, err);
283 if (err) {
284 return BT_HCI_ERR_CMD_DISALLOWED;
285 }
286
287 ull_sync_iso_stream_release(sync_iso);
288
289 link_sync_lost = sync_iso->node_rx_lost.rx.hdr.link;
290 ll_rx_link_release(link_sync_lost);
291
292 return BT_HCI_ERR_SUCCESS;
293 }
294
ull_sync_iso_init(void)295 int ull_sync_iso_init(void)
296 {
297 int err;
298
299 err = init_reset();
300 if (err) {
301 return err;
302 }
303
304 return 0;
305 }
306
ull_sync_iso_reset(void)307 int ull_sync_iso_reset(void)
308 {
309 int err;
310
311 err = init_reset();
312 if (err) {
313 return err;
314 }
315
316 return 0;
317 }
318
ull_sync_iso_lll_index_get(struct lll_sync_iso * lll)319 uint8_t ull_sync_iso_lll_index_get(struct lll_sync_iso *lll)
320 {
321 return ARRAY_INDEX(ll_sync_iso, HDR_LLL2ULL(lll));
322 }
323
ull_sync_iso_by_stream_get(uint16_t handle)324 struct ll_sync_iso_set *ull_sync_iso_by_stream_get(uint16_t handle)
325 {
326 if (handle >= CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT) {
327 return NULL;
328 }
329
330 return sync_iso_get(stream_pool[handle].big_handle);
331 }
332
ull_sync_iso_stream_get(uint16_t handle)333 struct lll_sync_iso_stream *ull_sync_iso_stream_get(uint16_t handle)
334 {
335 struct ll_sync_iso_set *sync_iso;
336
337 /* Get the BIG Sync context and check for not being terminated */
338 sync_iso = ull_sync_iso_by_stream_get(handle);
339 if (!sync_iso || !sync_iso->sync) {
340 return NULL;
341 }
342
343 return &stream_pool[handle];
344 }
345
ull_sync_iso_lll_stream_get(uint16_t handle)346 struct lll_sync_iso_stream *ull_sync_iso_lll_stream_get(uint16_t handle)
347 {
348 return ull_sync_iso_stream_get(handle);
349 }
350
ull_sync_iso_stream_release(struct ll_sync_iso_set * sync_iso)351 void ull_sync_iso_stream_release(struct ll_sync_iso_set *sync_iso)
352 {
353 struct lll_sync_iso *lll;
354
355 lll = &sync_iso->lll;
356 while (lll->stream_count--) {
357 struct lll_sync_iso_stream *stream;
358 struct ll_iso_datapath *dp;
359 uint16_t stream_handle;
360
361 stream_handle = lll->stream_handle[lll->stream_count];
362 stream = ull_sync_iso_stream_get(stream_handle);
363 LL_ASSERT(stream);
364
365 dp = stream->dp;
366 if (dp) {
367 stream->dp = NULL;
368 isoal_sink_destroy(dp->sink_hdl);
369 ull_iso_datapath_release(dp);
370 }
371
372 mem_release(stream, &stream_free);
373 }
374
375 sync_iso->sync = NULL;
376 }
377
ull_sync_iso_setup(struct ll_sync_iso_set * sync_iso,struct node_rx_pdu * node_rx,uint8_t * acad,uint8_t acad_len)378 void ull_sync_iso_setup(struct ll_sync_iso_set *sync_iso,
379 struct node_rx_pdu *node_rx,
380 uint8_t *acad, uint8_t acad_len)
381 {
382 struct lll_sync_iso_stream *stream;
383 uint32_t ticks_slot_overhead;
384 uint32_t sync_iso_offset_us;
385 uint32_t ticks_slot_offset;
386 uint32_t ticks_threshold;
387 struct lll_sync_iso *lll;
388 struct node_rx_ftr *ftr;
389 struct pdu_big_info *bi;
390 uint32_t ready_delay_us;
391 uint32_t ticks_expire;
392 uint32_t ctrl_spacing;
393 uint32_t pdu_spacing;
394 uint32_t interval_us;
395 uint32_t ticks_diff;
396 struct pdu_adv *pdu;
397 uint32_t slot_us;
398 uint8_t num_bis;
399 uint8_t bi_size;
400 uint8_t handle;
401 uint32_t ret;
402 uint8_t sca;
403
404 while (acad_len) {
405 const uint8_t hdr_len = acad[PDU_ADV_DATA_HEADER_LEN_OFFSET];
406
407 if ((hdr_len >= PDU_ADV_DATA_HEADER_TYPE_SIZE) &&
408 (acad[PDU_ADV_DATA_HEADER_TYPE_OFFSET] ==
409 BT_DATA_BIG_INFO)) {
410 break;
411 }
412
413 if (acad_len < (hdr_len + PDU_ADV_DATA_HEADER_LEN_SIZE)) {
414 return;
415 }
416
417 acad_len -= hdr_len + PDU_ADV_DATA_HEADER_LEN_SIZE;
418 acad += hdr_len + PDU_ADV_DATA_HEADER_LEN_SIZE;
419 }
420
421 if ((acad_len < (PDU_BIG_INFO_CLEARTEXT_SIZE +
422 PDU_ADV_DATA_HEADER_SIZE)) ||
423 ((acad[PDU_ADV_DATA_HEADER_LEN_OFFSET] !=
424 (PDU_ADV_DATA_HEADER_TYPE_SIZE + PDU_BIG_INFO_CLEARTEXT_SIZE)) &&
425 (acad[PDU_ADV_DATA_HEADER_LEN_OFFSET] !=
426 (PDU_ADV_DATA_HEADER_TYPE_SIZE + PDU_BIG_INFO_ENCRYPTED_SIZE)))) {
427 return;
428 }
429
430 bi_size = acad[PDU_ADV_DATA_HEADER_LEN_OFFSET] -
431 PDU_ADV_DATA_HEADER_TYPE_SIZE;
432 bi = (void *)&acad[PDU_ADV_DATA_HEADER_DATA_OFFSET];
433
434 lll = &sync_iso->lll;
435 (void)memcpy(lll->seed_access_addr, &bi->seed_access_addr,
436 sizeof(lll->seed_access_addr));
437 (void)memcpy(lll->base_crc_init, &bi->base_crc_init,
438 sizeof(lll->base_crc_init));
439
440 (void)memcpy(lll->data_chan_map, bi->chm_phy,
441 sizeof(lll->data_chan_map));
442 lll->data_chan_map[4] &= 0x1F;
443 lll->data_chan_count = util_ones_count_get(lll->data_chan_map,
444 sizeof(lll->data_chan_map));
445 if (lll->data_chan_count < CHM_USED_COUNT_MIN) {
446 return;
447 }
448
449 /* Reset ISO create BIG flag in the periodic advertising context */
450 sync_iso->sync->iso.sync_iso = NULL;
451
452 lll->phy = BIT(bi->chm_phy[4] >> 5);
453
454 lll->num_bis = PDU_BIG_INFO_NUM_BIS_GET(bi);
455 lll->bn = PDU_BIG_INFO_BN_GET(bi);
456 lll->nse = PDU_BIG_INFO_NSE_GET(bi);
457 lll->sub_interval = PDU_BIG_INFO_SUB_INTERVAL_GET(bi);
458 lll->max_pdu = bi->max_pdu;
459 lll->pto = PDU_BIG_INFO_PTO_GET(bi);
460 if (lll->pto) {
461 lll->ptc = lll->bn;
462 } else {
463 lll->ptc = 0U;
464 }
465 lll->bis_spacing = PDU_BIG_INFO_SPACING_GET(bi);
466 lll->irc = PDU_BIG_INFO_IRC_GET(bi);
467 lll->sdu_interval = PDU_BIG_INFO_SDU_INTERVAL_GET(bi);
468
469 /* Pick the 39-bit payload count, 1 MSb is framing bit */
470 lll->payload_count = (uint64_t)bi->payload_count_framing[0];
471 lll->payload_count |= (uint64_t)bi->payload_count_framing[1] << 8;
472 lll->payload_count |= (uint64_t)bi->payload_count_framing[2] << 16;
473 lll->payload_count |= (uint64_t)bi->payload_count_framing[3] << 24;
474 lll->payload_count |= (uint64_t)(bi->payload_count_framing[4] & 0x7f) << 32;
475 lll->framing = (bi->payload_count_framing[4] & 0x80) >> 7;
476
477 /* Set establishment event countdown */
478 lll->establish_events = CONN_ESTAB_COUNTDOWN;
479
480 if (IS_ENABLED(CONFIG_BT_CTLR_BROADCAST_ISO_ENC) &&
481 lll->enc && (bi_size == PDU_BIG_INFO_ENCRYPTED_SIZE)) {
482 const uint8_t BIG3[4] = {0x33, 0x47, 0x49, 0x42};
483 struct ccm *ccm_rx;
484 uint8_t gsk[16];
485 int err;
486
487 /* Copy the GIV in BIGInfo */
488 (void)memcpy(lll->giv, bi->giv, sizeof(lll->giv));
489
490 /* Calculate GSK */
491 err = bt_crypto_h8(sync_iso->gltk, bi->gskd, BIG3, gsk);
492 LL_ASSERT(!err);
493
494 /* Prepare the CCM parameters */
495 ccm_rx = &lll->ccm_rx;
496 ccm_rx->direction = 1U;
497 (void)memcpy(&ccm_rx->iv[4], &lll->giv[4], 4U);
498 (void)mem_rcopy(ccm_rx->key, gsk, sizeof(ccm_rx->key));
499
500 /* NOTE: counter is filled in LLL */
501 } else {
502 lll->enc = 0U;
503 }
504
505 /* Initialize payload pointers */
506 lll->payload_count_max = PDU_BIG_PAYLOAD_COUNT_MAX;
507 lll->payload_tail = 0U;
508 for (int i = 0; i < CONFIG_BT_CTLR_SYNC_ISO_STREAM_MAX; i++) {
509 for (int j = 0; j < lll->payload_count_max; j++) {
510 lll->payload[i][j] = NULL;
511 }
512 }
513
514 lll->iso_interval = PDU_BIG_INFO_ISO_INTERVAL_GET(bi);
515 interval_us = lll->iso_interval * PERIODIC_INT_UNIT_US;
516
517 sync_iso->timeout_reload =
518 RADIO_SYNC_EVENTS((sync_iso->timeout * 10U * USEC_PER_MSEC),
519 interval_us);
520
521 sca = sync_iso->sync->lll.sca;
522 lll->window_widening_periodic_us =
523 DIV_ROUND_UP(((lll_clock_ppm_local_get() +
524 lll_clock_ppm_get(sca)) *
525 interval_us), USEC_PER_SEC);
526 lll->window_widening_max_us = (interval_us >> 1) - EVENT_IFS_US;
527 if (PDU_BIG_INFO_OFFS_UNITS_GET(bi)) {
528 lll->window_size_event_us = OFFS_UNIT_300_US;
529 } else {
530 lll->window_size_event_us = OFFS_UNIT_30_US;
531 }
532
533 ftr = &node_rx->rx_ftr;
534 pdu = (void *)((struct node_rx_pdu *)node_rx)->pdu;
535
536 ready_delay_us = lll_radio_rx_ready_delay_get(lll->phy, PHY_FLAGS_S8);
537
538 /* Calculate the BIG Offset in microseconds */
539 sync_iso_offset_us = ftr->radio_end_us;
540 sync_iso_offset_us += PDU_BIG_INFO_OFFS_GET(bi) *
541 lll->window_size_event_us;
542 /* Skip to first selected BIS subevent */
543 /* FIXME: add support for interleaved packing */
544 stream = ull_sync_iso_stream_get(lll->stream_handle[0]);
545 sync_iso_offset_us += (stream->bis_index - 1U) * lll->sub_interval *
546 ((lll->irc * lll->bn) + lll->ptc);
547 sync_iso_offset_us -= PDU_AC_US(pdu->len, sync_iso->sync->lll.phy,
548 ftr->phy_flags);
549 sync_iso_offset_us -= EVENT_TICKER_RES_MARGIN_US;
550 sync_iso_offset_us -= EVENT_JITTER_US;
551 sync_iso_offset_us -= ready_delay_us;
552
553 interval_us -= lll->window_widening_periodic_us;
554
555 /* Calculate ISO Receiver BIG event timings */
556 pdu_spacing = PDU_BIS_US(lll->max_pdu, lll->enc, lll->phy,
557 PHY_FLAGS_S8) +
558 EVENT_MSS_US;
559 ctrl_spacing = PDU_BIS_US(sizeof(struct pdu_big_ctrl), lll->enc,
560 lll->phy, PHY_FLAGS_S8);
561
562 /* Number of maximum BISes to sync from the first BIS to sync */
563 /* NOTE: When ULL scheduling is implemented for subevents, then update
564 * the time reservation as required.
565 */
566 num_bis = lll->num_bis - (stream->bis_index - 1U);
567
568 /* 1. Maximum PDU transmission time in 1M/2M/S8 PHY is 17040 us, or
569 * represented in 15-bits.
570 * 2. NSE in the range 1 to 31 is represented in 5-bits
571 * 3. num_bis in the range 1 to 31 is represented in 5-bits
572 *
573 * Hence, worst case event time can be represented in 25-bits plus
574 * one each bit for added ctrl_spacing and radio event overheads. I.e.
575 * 27-bits required and sufficiently covered by using 32-bit data type
576 * for time_us.
577 */
578
579 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_ISO_RESERVE_MAX)) {
580 /* Maximum time reservation for both sequential and interleaved
581 * packing.
582 */
583 slot_us = (pdu_spacing * lll->nse * num_bis) + ctrl_spacing;
584
585 } else if (lll->bis_spacing >= (lll->sub_interval * lll->nse)) {
586 /* Time reservation omitting PTC subevents in sequential
587 * packing.
588 */
589 slot_us = pdu_spacing * ((lll->nse * num_bis) - lll->ptc);
590
591 } else {
592 /* Time reservation omitting PTC subevents in interleaved
593 * packing.
594 */
595 slot_us = pdu_spacing * ((lll->nse - lll->ptc) * num_bis);
596 }
597
598 /* Add radio ready delay */
599 slot_us += ready_delay_us;
600
601 /* Add implementation defined radio event overheads */
602 if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX)) {
603 slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
604 }
605
606 /* TODO: active_to_start feature port */
607 sync_iso->ull.ticks_active_to_start = 0U;
608 sync_iso->ull.ticks_prepare_to_start =
609 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
610 sync_iso->ull.ticks_preempt_to_start =
611 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
612 sync_iso->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
613
614 ticks_slot_offset = MAX(sync_iso->ull.ticks_active_to_start,
615 sync_iso->ull.ticks_prepare_to_start);
616 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
617 ticks_slot_overhead = ticks_slot_offset;
618 } else {
619 ticks_slot_overhead = 0U;
620 }
621 ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
622
623 /* Check and skip to next interval if CPU processing introduces latency
624 * that can delay scheduling the first ISO event.
625 */
626 ticks_expire = ftr->ticks_anchor - ticks_slot_offset +
627 HAL_TICKER_US_TO_TICKS(sync_iso_offset_us);
628 ticks_threshold = ticker_ticks_now_get() +
629 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
630 ticks_diff = ticker_ticks_diff_get(ticks_expire, ticks_threshold);
631 if (ticks_diff & BIT(HAL_TICKER_CNTR_MSBIT)) {
632 sync_iso_offset_us += interval_us -
633 lll->window_widening_periodic_us;
634 lll->window_widening_event_us +=
635 lll->window_widening_periodic_us;
636 lll->payload_count += lll->bn;
637 }
638
639 /* setup to use ISO create prepare function until sync established */
640 mfy_lll_prepare.fp = lll_sync_iso_create_prepare;
641
642 handle = sync_iso_handle_get(sync_iso);
643 ret = ticker_start(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
644 (TICKER_ID_SCAN_SYNC_ISO_BASE +
645 sync_iso_handle_to_index(handle)),
646 ftr->ticks_anchor - ticks_slot_offset,
647 HAL_TICKER_US_TO_TICKS(sync_iso_offset_us),
648 HAL_TICKER_US_TO_TICKS(interval_us),
649 HAL_TICKER_REMAINDER(interval_us),
650 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
651 !defined(CONFIG_BT_CTLR_LOW_LAT)
652 TICKER_LAZY_MUST_EXPIRE,
653 #else
654 TICKER_NULL_LAZY,
655 #endif /* !CONFIG_BT_TICKER_LOW_LAT && !CONFIG_BT_CTLR_LOW_LAT */
656 (sync_iso->ull.ticks_slot + ticks_slot_overhead),
657 ticker_cb, sync_iso,
658 ticker_start_op_cb, (void *)__LINE__);
659 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
660 (ret == TICKER_STATUS_BUSY));
661 }
662
ull_sync_iso_estab_done(struct node_rx_event_done * done)663 void ull_sync_iso_estab_done(struct node_rx_event_done *done)
664 {
665 struct ll_sync_iso_set *sync_iso;
666 struct node_rx_sync_iso *se;
667 struct node_rx_pdu *rx;
668
669 if (done->extra.trx_cnt || done->extra.estab_failed) {
670 /* Switch to normal prepare */
671 mfy_lll_prepare.fp = lll_sync_iso_prepare;
672
673 /* Get reference to ULL context */
674 sync_iso = CONTAINER_OF(done->param, struct ll_sync_iso_set, ull);
675
676 /* Prepare BIG Sync Established */
677 rx = (void *)sync_iso->sync->iso.node_rx_estab;
678 rx->hdr.type = NODE_RX_TYPE_SYNC_ISO;
679 rx->hdr.handle = sync_iso_handle_get(sync_iso);
680 rx->rx_ftr.param = sync_iso;
681
682 se = (void *)rx->pdu;
683 se->status = done->extra.estab_failed ?
684 BT_HCI_ERR_CONN_FAIL_TO_ESTAB : BT_HCI_ERR_SUCCESS;
685
686 ll_rx_put_sched(rx->hdr.link, rx);
687 }
688
689 ull_sync_iso_done(done);
690 }
691
ull_sync_iso_done(struct node_rx_event_done * done)692 void ull_sync_iso_done(struct node_rx_event_done *done)
693 {
694 struct ll_sync_iso_set *sync_iso;
695 uint32_t ticks_drift_minus;
696 uint32_t ticks_drift_plus;
697 struct lll_sync_iso *lll;
698 uint16_t elapsed_event;
699 uint16_t latency_event;
700 uint16_t lazy;
701 uint8_t force;
702
703 /* Get reference to ULL context */
704 sync_iso = CONTAINER_OF(done->param, struct ll_sync_iso_set, ull);
705 lll = &sync_iso->lll;
706
707 /* Events elapsed used in timeout checks below */
708 latency_event = lll->latency_event;
709 if (lll->latency_prepare) {
710 elapsed_event = latency_event + lll->latency_prepare;
711 } else {
712 elapsed_event = latency_event + 1U;
713 }
714
715 /* Sync drift compensation and new skip calculation
716 */
717 ticks_drift_plus = 0U;
718 ticks_drift_minus = 0U;
719 if (done->extra.trx_cnt) {
720 /* Calculate drift in ticks unit */
721 ull_drift_ticks_get(done, &ticks_drift_plus,
722 &ticks_drift_minus);
723
724 /* Reset latency */
725 lll->latency_event = 0U;
726 }
727
728 /* Reset supervision countdown */
729 if (done->extra.crc_valid) {
730 sync_iso->timeout_expire = 0U;
731 } else {
732 /* if anchor point not sync-ed, start timeout countdown */
733 if (!sync_iso->timeout_expire) {
734 sync_iso->timeout_expire = sync_iso->timeout_reload;
735 }
736 }
737
738 /* check timeout */
739 force = 0U;
740 if (sync_iso->timeout_expire) {
741 if (sync_iso->timeout_expire > elapsed_event) {
742 sync_iso->timeout_expire -= elapsed_event;
743
744 /* break skip */
745 lll->latency_event = 0U;
746
747 if (latency_event) {
748 force = 1U;
749 }
750 } else {
751 timeout_cleanup(sync_iso);
752
753 return;
754 }
755 }
756
757 /* check if skip needs update */
758 lazy = 0U;
759 if (force || (latency_event != lll->latency_event)) {
760 lazy = lll->latency_event + 1U;
761 }
762
763 /* Update Sync ticker instance */
764 if (ticks_drift_plus || ticks_drift_minus || lazy || force) {
765 uint8_t handle = sync_iso_handle_get(sync_iso);
766 uint32_t ticker_status;
767
768 /* Call to ticker_update can fail under the race
769 * condition where in the periodic sync role is being stopped
770 * but at the same time it is preempted by periodic sync event
771 * that gets into close state. Accept failure when periodic sync
772 * role is being stopped.
773 */
774 ticker_status = ticker_update(TICKER_INSTANCE_ID_CTLR,
775 TICKER_USER_ID_ULL_HIGH,
776 (TICKER_ID_SCAN_SYNC_ISO_BASE +
777 sync_iso_handle_to_index(handle)),
778 ticks_drift_plus,
779 ticks_drift_minus, 0U, 0U,
780 lazy, force,
781 ticker_update_op_cb,
782 sync_iso);
783 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
784 (ticker_status == TICKER_STATUS_BUSY) ||
785 ((void *)sync_iso == ull_disable_mark_get()));
786 }
787 }
788
ull_sync_iso_done_terminate(struct node_rx_event_done * done)789 void ull_sync_iso_done_terminate(struct node_rx_event_done *done)
790 {
791 struct ll_sync_iso_set *sync_iso;
792 struct lll_sync_iso *lll;
793 struct node_rx_pdu *rx;
794 uint8_t handle;
795 uint32_t ret;
796
797 /* Get reference to ULL context */
798 sync_iso = CONTAINER_OF(done->param, struct ll_sync_iso_set, ull);
799 lll = &sync_iso->lll;
800
801 /* Populate the Sync Lost which will be enqueued in disabled_cb */
802 rx = (void *)&sync_iso->node_rx_lost;
803 rx->hdr.handle = sync_iso_handle_get(sync_iso);
804 rx->hdr.type = NODE_RX_TYPE_SYNC_ISO_LOST;
805 rx->rx_ftr.param = sync_iso;
806 *((uint8_t *)rx->pdu) = lll->term_reason;
807
808 /* Stop Sync ISO Ticker */
809 handle = sync_iso_handle_get(sync_iso);
810 ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
811 (TICKER_ID_SCAN_SYNC_ISO_BASE +
812 sync_iso_handle_to_index(handle)),
813 ticker_stop_op_cb, (void *)sync_iso);
814 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
815 (ret == TICKER_STATUS_BUSY));
816 }
817
disable(uint8_t sync_idx)818 static void disable(uint8_t sync_idx)
819 {
820 struct ll_sync_iso_set *sync_iso;
821 int err;
822
823 sync_iso = &ll_sync_iso[sync_idx];
824
825 err = ull_ticker_stop_with_mark(TICKER_ID_SCAN_SYNC_ISO_BASE +
826 sync_idx, sync_iso, &sync_iso->lll);
827 LL_ASSERT_INFO2(err == 0 || err == -EALREADY, sync_idx, err);
828 }
829
init_reset(void)830 static int init_reset(void)
831 {
832 uint8_t idx;
833
834 /* Disable all active BIGs (uses blocking ull_ticker_stop_with_mark) */
835 for (idx = 0U; idx < CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET; idx++) {
836 disable(idx);
837 }
838
839 mem_init((void *)stream_pool, sizeof(struct lll_sync_iso_stream),
840 CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT, &stream_free);
841
842 memset(&ll_sync_iso, 0, sizeof(ll_sync_iso));
843
844 /* Initialize LLL */
845 return lll_sync_iso_init();
846 }
847
sync_iso_get(uint8_t handle)848 static struct ll_sync_iso_set *sync_iso_get(uint8_t handle)
849 {
850 for (uint8_t idx = 0; idx < CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET; idx++) {
851 if (ll_sync_iso[idx].sync && ll_sync_iso[idx].big_handle == handle) {
852 return &ll_sync_iso[idx];
853 }
854 }
855
856 return NULL;
857 }
858
sync_iso_alloc(uint8_t handle)859 static struct ll_sync_iso_set *sync_iso_alloc(uint8_t handle)
860 {
861 for (uint8_t idx = 0; idx < CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET; idx++) {
862 if (!ll_sync_iso[idx].sync) {
863 ll_sync_iso[idx].big_handle = handle;
864 return &ll_sync_iso[idx];
865 }
866 }
867
868 return NULL;
869 }
870
sync_iso_handle_get(struct ll_sync_iso_set * sync)871 static uint8_t sync_iso_handle_get(struct ll_sync_iso_set *sync)
872 {
873 return sync->big_handle;
874 }
875
sync_iso_handle_to_index(uint8_t handle)876 static uint8_t sync_iso_handle_to_index(uint8_t handle)
877 {
878 return ARRAY_INDEX(ll_sync_iso, sync_iso_get(handle));
879 }
880
sync_iso_stream_acquire(void)881 static struct stream *sync_iso_stream_acquire(void)
882 {
883 return mem_acquire(&stream_free);
884 }
885
sync_iso_stream_handle_get(struct lll_sync_iso_stream * stream)886 static uint16_t sync_iso_stream_handle_get(struct lll_sync_iso_stream *stream)
887 {
888 return mem_index_get(stream, stream_pool, sizeof(*stream));
889 }
890
timeout_cleanup(struct ll_sync_iso_set * sync_iso)891 static void timeout_cleanup(struct ll_sync_iso_set *sync_iso)
892 {
893 struct node_rx_pdu *rx;
894 uint8_t handle;
895 uint32_t ret;
896
897 /* Populate the Sync Lost which will be enqueued in disabled_cb */
898 rx = (void *)&sync_iso->node_rx_lost;
899 rx->hdr.handle = sync_iso_handle_get(sync_iso);
900 rx->rx_ftr.param = sync_iso;
901
902 if (mfy_lll_prepare.fp == lll_sync_iso_prepare) {
903 rx->hdr.type = NODE_RX_TYPE_SYNC_ISO_LOST;
904 *((uint8_t *)rx->pdu) = BT_HCI_ERR_CONN_TIMEOUT;
905 } else {
906 rx->hdr.type = NODE_RX_TYPE_SYNC_ISO;
907 *((uint8_t *)rx->pdu) = BT_HCI_ERR_CONN_FAIL_TO_ESTAB;
908 }
909
910 /* Stop Sync ISO Ticker */
911 handle = sync_iso_handle_get(sync_iso);
912 ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
913 (TICKER_ID_SCAN_SYNC_ISO_BASE +
914 sync_iso_handle_to_index(handle)),
915 ticker_stop_op_cb, (void *)sync_iso);
916 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
917 (ret == TICKER_STATUS_BUSY));
918 }
919
ticker_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)920 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
921 uint32_t remainder, uint16_t lazy, uint8_t force,
922 void *param)
923 {
924 static struct lll_prepare_param p;
925 struct ll_sync_iso_set *sync_iso;
926 struct lll_sync_iso *lll;
927 uint32_t ret;
928 uint8_t ref;
929
930 DEBUG_RADIO_PREPARE_O(1);
931
932 if (!IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT) &&
933 !IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) &&
934 (lazy == TICKER_LAZY_MUST_EXPIRE)) {
935 /* FIXME: generate ISO PDU with status set to invalid */
936
937 DEBUG_RADIO_PREPARE_O(0);
938 return;
939 }
940
941 sync_iso = param;
942 lll = &sync_iso->lll;
943
944 /* Increment prepare reference count */
945 ref = ull_ref_inc(&sync_iso->ull);
946 LL_ASSERT(ref);
947
948 /* Append timing parameters */
949 p.ticks_at_expire = ticks_at_expire;
950 p.remainder = remainder;
951 p.lazy = lazy;
952 p.force = force;
953 p.param = lll;
954 mfy_lll_prepare.param = &p;
955
956 /* Kick LLL prepare */
957 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0U,
958 &mfy_lll_prepare);
959 LL_ASSERT(!ret);
960
961 DEBUG_RADIO_PREPARE_O(1);
962 }
963
ticker_start_op_cb(uint32_t status,void * param)964 static void ticker_start_op_cb(uint32_t status, void *param)
965 {
966 ARG_UNUSED(param);
967
968 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
969 }
970
ticker_update_op_cb(uint32_t status,void * param)971 static void ticker_update_op_cb(uint32_t status, void *param)
972 {
973 LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
974 param == ull_disable_mark_get());
975 }
976
ticker_stop_op_cb(uint32_t status,void * param)977 static void ticker_stop_op_cb(uint32_t status, void *param)
978 {
979 static memq_link_t link;
980 static struct mayfly mfy = {0U, 0U, &link, NULL, sync_iso_disable};
981 uint32_t ret;
982
983 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
984
985 /* Check if any pending LLL events that need to be aborted */
986 mfy.param = param;
987 ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
988 TICKER_USER_ID_ULL_HIGH, 0U, &mfy);
989 LL_ASSERT(!ret);
990 }
991
sync_iso_disable(void * param)992 static void sync_iso_disable(void *param)
993 {
994 struct ll_sync_iso_set *sync_iso;
995 struct ull_hdr *hdr;
996
997 /* Check ref count to determine if any pending LLL events in pipeline */
998 sync_iso = param;
999 hdr = &sync_iso->ull;
1000 if (ull_ref_get(hdr)) {
1001 static memq_link_t link;
1002 static struct mayfly mfy = {0U, 0U, &link, NULL, lll_disable};
1003 uint32_t ret;
1004
1005 mfy.param = &sync_iso->lll;
1006
1007 /* Setup disabled callback to be called when ref count
1008 * returns to zero.
1009 */
1010 LL_ASSERT(!hdr->disabled_cb);
1011 hdr->disabled_param = mfy.param;
1012 hdr->disabled_cb = disabled_cb;
1013
1014 /* Trigger LLL disable */
1015 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1016 TICKER_USER_ID_LLL, 0U, &mfy);
1017 LL_ASSERT(!ret);
1018 } else {
1019 /* No pending LLL events */
1020 disabled_cb(&sync_iso->lll);
1021 }
1022 }
1023
disabled_cb(void * param)1024 static void disabled_cb(void *param)
1025 {
1026 struct ll_sync_iso_set *sync_iso;
1027 struct node_rx_pdu *rx;
1028 memq_link_t *link;
1029
1030 /* Get reference to ULL context */
1031 sync_iso = HDR_LLL2ULL(param);
1032
1033 /* Generate BIG sync lost */
1034 rx = (void *)&sync_iso->node_rx_lost;
1035 LL_ASSERT(rx->hdr.link);
1036 link = rx->hdr.link;
1037 rx->hdr.link = NULL;
1038
1039 /* Enqueue the BIG sync lost towards ULL context */
1040 ll_rx_put_sched(link, rx);
1041 }
1042