1 /*
2 * Copyright (c) 2020 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/sys/byteorder.h>
9 #include <zephyr/bluetooth/hci_types.h>
10
11 #include "util/util.h"
12 #include "util/mem.h"
13 #include "util/memq.h"
14 #include "util/mayfly.h"
15 #include "util/dbuf.h"
16
17 #include "hal/ccm.h"
18 #include "hal/radio.h"
19 #include "hal/ticker.h"
20
21 #include "ticker/ticker.h"
22
23 #include "pdu_df.h"
24 #include "lll/pdu_vendor.h"
25 #include "pdu.h"
26
27 #include "lll.h"
28 #include "lll/lll_vendor.h"
29 #include "lll_clock.h"
30 #include "lll_scan.h"
31 #include "lll/lll_df_types.h"
32 #include "lll_sync.h"
33 #include "lll_sync_iso.h"
34 #include "lll_conn.h"
35 #include "lll_conn_iso.h"
36
37 #include "isoal.h"
38
39 #include "ull_tx_queue.h"
40
41 #include "ull_scan_types.h"
42 #include "ull_sync_types.h"
43 #include "ull_iso_types.h"
44 #include "ull_conn_types.h"
45 #include "ull_conn_iso_types.h"
46
47 #include "ull_internal.h"
48 #include "ull_scan_internal.h"
49 #include "ull_sync_internal.h"
50 #include "ull_iso_internal.h"
51 #include "ull_sync_iso_internal.h"
52 #include "ull_conn_internal.h"
53 #include "ull_conn_iso_internal.h"
54
55 #include "ll.h"
56
57 #include "bt_crypto.h"
58
59 #include "hal/debug.h"
60
61 static int init_reset(void);
62 static struct ll_sync_iso_set *sync_iso_get(uint8_t handle);
63 static struct ll_sync_iso_set *sync_iso_alloc(uint8_t handle);
64 static uint8_t sync_iso_handle_get(struct ll_sync_iso_set *sync);
65 static uint8_t sync_iso_handle_to_index(uint8_t handle);
66 static struct stream *sync_iso_stream_acquire(void);
67 static uint16_t sync_iso_stream_handle_get(struct lll_sync_iso_stream *stream);
68 static void timeout_cleanup(struct ll_sync_iso_set *sync_iso);
69 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
70 uint32_t remainder, uint16_t lazy, uint8_t force,
71 void *param);
72 static void ticker_start_op_cb(uint32_t status, void *param);
73 static void ticker_update_op_cb(uint32_t status, void *param);
74 static void ticker_stop_op_cb(uint32_t status, void *param);
75 static void sync_iso_disable(void *param);
76 static void disabled_cb(void *param);
77 static void lll_flush(void *param);
78 static void stop_ticker(struct ll_sync_iso_set *sync_iso, ticker_op_func fp_op_func);
79
80 static memq_link_t link_lll_prepare;
81 static struct mayfly mfy_lll_prepare = {0U, 0U, &link_lll_prepare, NULL, NULL};
82
83 static struct ll_sync_iso_set ll_sync_iso[CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET];
84 static struct lll_sync_iso_stream
85 stream_pool[CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT];
86 static struct ll_iso_rx_test_mode
87 test_mode[CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT];
88 static void *stream_free;
89
ll_big_sync_create(uint8_t big_handle,uint16_t sync_handle,uint8_t encryption,uint8_t * bcode,uint8_t mse,uint16_t sync_timeout,uint8_t num_bis,uint8_t * bis)90 uint8_t ll_big_sync_create(uint8_t big_handle, uint16_t sync_handle,
91 uint8_t encryption, uint8_t *bcode, uint8_t mse,
92 uint16_t sync_timeout, uint8_t num_bis,
93 uint8_t *bis)
94 {
95 struct ll_sync_iso_set *sync_iso;
96 memq_link_t *link_sync_estab;
97 memq_link_t *link_sync_lost;
98 struct node_rx_pdu *node_rx;
99 struct ll_sync_set *sync;
100 struct lll_sync_iso *lll;
101 int8_t last_index;
102
103 sync = ull_sync_is_enabled_get(sync_handle);
104 if (!sync) {
105 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
106 }
107
108 if (sync->iso.sync_iso) {
109 return BT_HCI_ERR_CMD_DISALLOWED;
110 }
111
112 sync_iso = sync_iso_get(big_handle);
113 if (sync_iso) {
114 /* BIG handle already in use */
115 return BT_HCI_ERR_CMD_DISALLOWED;
116 }
117
118 sync_iso = sync_iso_alloc(big_handle);
119 if (!sync_iso) {
120 return BT_HCI_ERR_INSUFFICIENT_RESOURCES;
121 }
122
123 /* TODO: Check remaining parameters */
124
125 /* Check BIS indices */
126 last_index = -1;
127 for (uint8_t i = 0U; i < num_bis; i++) {
128 /* Stream index must be in valid range and in ascending order */
129 if (!IN_RANGE(bis[i], 0x01, 0x1F) || (bis[i] <= last_index)) {
130 return BT_HCI_ERR_INVALID_PARAM;
131
132 } else if (bis[i] > sync->num_bis) {
133 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
134 }
135 last_index = bis[i];
136 }
137
138 /* Check if encryption supported */
139 if (!IS_ENABLED(CONFIG_BT_CTLR_BROADCAST_ISO_ENC) &&
140 encryption) {
141 return BT_HCI_ERR_CMD_DISALLOWED;
142 };
143
144 /* Check if requested encryption matches */
145 if (encryption != sync->enc) {
146 return BT_HCI_ERR_ENC_MODE_NOT_ACCEPTABLE;
147 }
148
149 /* Check if free BISes available */
150 if (mem_free_count_get(stream_free) < num_bis) {
151 return BT_HCI_ERR_INSUFFICIENT_RESOURCES;
152 }
153
154 link_sync_estab = ll_rx_link_alloc();
155 if (!link_sync_estab) {
156 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
157 }
158
159 link_sync_lost = ll_rx_link_alloc();
160 if (!link_sync_lost) {
161 ll_rx_link_release(link_sync_estab);
162
163 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
164 }
165
166 node_rx = ll_rx_alloc();
167 if (!node_rx) {
168 ll_rx_link_release(link_sync_lost);
169 ll_rx_link_release(link_sync_estab);
170
171 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
172 }
173
174 /* Initialize the ISO sync ULL context */
175 sync_iso->sync = sync;
176 sync_iso->timeout = sync_timeout;
177 sync_iso->timeout_reload = 0U;
178 sync_iso->timeout_expire = 0U;
179
180 /* Setup the periodic sync to establish ISO sync */
181 node_rx->hdr.link = link_sync_estab;
182 sync->iso.node_rx_estab = node_rx;
183 sync_iso->node_rx_lost.rx.hdr.link = link_sync_lost;
184
185 /* Initialize sync LLL context */
186 lll = &sync_iso->lll;
187 lll->latency_prepare = 0U;
188 lll->latency_event = 0U;
189 lll->window_widening_prepare_us = 0U;
190 lll->window_widening_event_us = 0U;
191 lll->ctrl = 0U;
192 lll->cssn_curr = 0U;
193 lll->cssn_next = 0U;
194 lll->term_reason = 0U;
195
196 if (IS_ENABLED(CONFIG_BT_CTLR_BROADCAST_ISO_ENC) && encryption) {
197 const uint8_t BIG1[16] = {0x31, 0x47, 0x49, 0x42, };
198 const uint8_t BIG2[4] = {0x32, 0x47, 0x49, 0x42};
199 uint8_t igltk[16];
200 int err;
201
202 /* Calculate GLTK */
203 err = bt_crypto_h7(BIG1, bcode, igltk);
204 LL_ASSERT(!err);
205 err = bt_crypto_h6(igltk, BIG2, sync_iso->gltk);
206 LL_ASSERT(!err);
207
208 lll->enc = 1U;
209 } else {
210 lll->enc = 0U;
211 }
212
213 /* TODO: Implement usage of MSE to limit listening to subevents */
214
215 /* Allocate streams */
216 lll->stream_count = num_bis;
217 for (uint8_t i = 0U; i < num_bis; i++) {
218 struct lll_sync_iso_stream *stream;
219
220 stream = (void *)sync_iso_stream_acquire();
221 stream->big_handle = big_handle;
222 stream->bis_index = bis[i];
223 stream->dp = NULL;
224 stream->test_mode = &test_mode[i];
225 memset(stream->test_mode, 0, sizeof(struct ll_iso_rx_test_mode));
226 lll->stream_handle[i] = sync_iso_stream_handle_get(stream);
227 }
228
229 /* Initialize ULL and LLL headers */
230 ull_hdr_init(&sync_iso->ull);
231 lll_hdr_init(lll, sync_iso);
232
233 /* Enable periodic advertising to establish ISO sync */
234 sync->iso.sync_iso = sync_iso;
235
236 return BT_HCI_ERR_SUCCESS;
237 }
238
ll_big_sync_terminate(uint8_t big_handle,void ** rx)239 uint8_t ll_big_sync_terminate(uint8_t big_handle, void **rx)
240 {
241 static memq_link_t link;
242 static struct mayfly mfy = {0, 0, &link, NULL, lll_flush};
243
244 struct ll_sync_iso_set *sync_iso;
245 memq_link_t *link_sync_estab;
246 struct node_rx_pdu *node_rx;
247 memq_link_t *link_sync_lost;
248 struct ll_sync_set *sync;
249 struct k_sem sem;
250 uint32_t ret;
251 int err;
252
253 sync_iso = sync_iso_get(big_handle);
254 if (!sync_iso) {
255 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
256 }
257
258 sync = sync_iso->sync;
259 if (sync && sync->iso.sync_iso) {
260 struct node_rx_sync_iso *se;
261
262 if (sync->iso.sync_iso != sync_iso) {
263 return BT_HCI_ERR_CMD_DISALLOWED;
264 }
265 sync->iso.sync_iso = NULL;
266
267 node_rx = sync->iso.node_rx_estab;
268 link_sync_estab = node_rx->hdr.link;
269 link_sync_lost = sync_iso->node_rx_lost.rx.hdr.link;
270
271 ll_rx_link_release(link_sync_lost);
272 ll_rx_link_release(link_sync_estab);
273 ll_rx_release(node_rx);
274
275 node_rx = (void *)&sync_iso->node_rx_lost;
276 node_rx->hdr.type = NODE_RX_TYPE_SYNC_ISO;
277 node_rx->hdr.handle = big_handle;
278
279 /* NOTE: Since NODE_RX_TYPE_SYNC_ISO is only generated from ULL
280 * context, pass ULL context as parameter.
281 */
282 node_rx->rx_ftr.param = sync_iso;
283
284 /* NOTE: struct node_rx_lost has uint8_t member store the reason.
285 */
286 se = (void *)node_rx->pdu;
287 se->status = BT_HCI_ERR_OP_CANCELLED_BY_HOST;
288
289 *rx = node_rx;
290
291 return BT_HCI_ERR_SUCCESS;
292 }
293
294 err = ull_ticker_stop_with_mark((TICKER_ID_SCAN_SYNC_ISO_BASE +
295 sync_iso_handle_to_index(big_handle)),
296 sync_iso, &sync_iso->lll);
297 LL_ASSERT_INFO2(err == 0 || err == -EALREADY, big_handle, err);
298 if (err) {
299 return BT_HCI_ERR_CMD_DISALLOWED;
300 }
301
302 /* Do a blocking mayfly call to LLL context for flushing any outstanding
303 * operations.
304 */
305 sync_iso->flush_sem = &sem;
306 k_sem_init(&sem, 0, 1);
307 mfy.param = &sync_iso->lll;
308
309 ret = mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_LLL, 0, &mfy);
310 LL_ASSERT(!ret);
311 k_sem_take(&sem, K_FOREVER);
312 sync_iso->flush_sem = NULL;
313
314 /* Release resources */
315 ull_sync_iso_stream_release(sync_iso);
316
317 link_sync_lost = sync_iso->node_rx_lost.rx.hdr.link;
318 ll_rx_link_release(link_sync_lost);
319
320 return BT_HCI_ERR_SUCCESS;
321 }
322
ull_sync_iso_init(void)323 int ull_sync_iso_init(void)
324 {
325 int err;
326
327 err = init_reset();
328 if (err) {
329 return err;
330 }
331
332 return 0;
333 }
334
ull_sync_iso_reset(void)335 int ull_sync_iso_reset(void)
336 {
337 int err;
338
339 err = init_reset();
340 if (err) {
341 return err;
342 }
343
344 return 0;
345 }
346
ull_sync_iso_lll_index_get(struct lll_sync_iso * lll)347 uint8_t ull_sync_iso_lll_index_get(struct lll_sync_iso *lll)
348 {
349 return ARRAY_INDEX(ll_sync_iso, HDR_LLL2ULL(lll));
350 }
351
ull_sync_iso_by_stream_get(uint16_t handle)352 struct ll_sync_iso_set *ull_sync_iso_by_stream_get(uint16_t handle)
353 {
354 if (handle >= CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT) {
355 return NULL;
356 }
357
358 return sync_iso_get(stream_pool[handle].big_handle);
359 }
360
ull_sync_iso_stream_get(uint16_t handle)361 struct lll_sync_iso_stream *ull_sync_iso_stream_get(uint16_t handle)
362 {
363 struct ll_sync_iso_set *sync_iso;
364
365 /* Get the BIG Sync context and check for not being terminated */
366 sync_iso = ull_sync_iso_by_stream_get(handle);
367 if (!sync_iso || !sync_iso->sync) {
368 return NULL;
369 }
370
371 return &stream_pool[handle];
372 }
373
ull_sync_iso_lll_stream_get(uint16_t handle)374 struct lll_sync_iso_stream *ull_sync_iso_lll_stream_get(uint16_t handle)
375 {
376 return ull_sync_iso_stream_get(handle);
377 }
378
ull_sync_iso_stream_release(struct ll_sync_iso_set * sync_iso)379 void ull_sync_iso_stream_release(struct ll_sync_iso_set *sync_iso)
380 {
381 struct lll_sync_iso *lll;
382
383 lll = &sync_iso->lll;
384 while (lll->stream_count--) {
385 struct lll_sync_iso_stream *stream;
386 struct ll_iso_datapath *dp;
387 uint16_t stream_handle;
388
389 stream_handle = lll->stream_handle[lll->stream_count];
390 stream = ull_sync_iso_stream_get(stream_handle);
391 LL_ASSERT(stream);
392
393 dp = stream->dp;
394 if (dp) {
395 stream->dp = NULL;
396 isoal_sink_destroy(dp->sink_hdl);
397 ull_iso_datapath_release(dp);
398 }
399
400 mem_release(stream, &stream_free);
401 }
402
403 sync_iso->sync = NULL;
404 }
405
ull_sync_iso_setup(struct ll_sync_iso_set * sync_iso,struct node_rx_pdu * node_rx,uint8_t * acad,uint8_t acad_len)406 void ull_sync_iso_setup(struct ll_sync_iso_set *sync_iso,
407 struct node_rx_pdu *node_rx,
408 uint8_t *acad, uint8_t acad_len)
409 {
410 struct lll_sync_iso_stream *stream;
411 uint32_t ticks_slot_overhead;
412 uint32_t sync_iso_offset_us;
413 uint32_t ticks_slot_offset;
414 uint32_t ticks_threshold;
415 struct lll_sync_iso *lll;
416 struct node_rx_ftr *ftr;
417 struct pdu_big_info *bi;
418 uint32_t ready_delay_us;
419 uint32_t ticks_expire;
420 uint32_t interval_us;
421 uint32_t ticks_diff;
422 struct pdu_adv *pdu;
423 uint32_t slot_us;
424 uint8_t num_bis;
425 uint8_t bi_size;
426 uint8_t handle;
427 uint32_t ret;
428 uint8_t sca;
429
430 while (acad_len) {
431 const uint8_t hdr_len = acad[PDU_ADV_DATA_HEADER_LEN_OFFSET];
432
433 if ((hdr_len >= PDU_ADV_DATA_HEADER_TYPE_SIZE) &&
434 (acad[PDU_ADV_DATA_HEADER_TYPE_OFFSET] ==
435 BT_DATA_BIG_INFO)) {
436 break;
437 }
438
439 if (acad_len < (hdr_len + PDU_ADV_DATA_HEADER_LEN_SIZE)) {
440 return;
441 }
442
443 acad_len -= hdr_len + PDU_ADV_DATA_HEADER_LEN_SIZE;
444 acad += hdr_len + PDU_ADV_DATA_HEADER_LEN_SIZE;
445 }
446
447 if ((acad_len < (PDU_BIG_INFO_CLEARTEXT_SIZE +
448 PDU_ADV_DATA_HEADER_SIZE)) ||
449 ((acad[PDU_ADV_DATA_HEADER_LEN_OFFSET] !=
450 (PDU_ADV_DATA_HEADER_TYPE_SIZE + PDU_BIG_INFO_CLEARTEXT_SIZE)) &&
451 (acad[PDU_ADV_DATA_HEADER_LEN_OFFSET] !=
452 (PDU_ADV_DATA_HEADER_TYPE_SIZE + PDU_BIG_INFO_ENCRYPTED_SIZE)))) {
453 return;
454 }
455
456 bi_size = acad[PDU_ADV_DATA_HEADER_LEN_OFFSET] -
457 PDU_ADV_DATA_HEADER_TYPE_SIZE;
458 bi = (void *)&acad[PDU_ADV_DATA_HEADER_DATA_OFFSET];
459
460 lll = &sync_iso->lll;
461 (void)memcpy(lll->seed_access_addr, &bi->seed_access_addr,
462 sizeof(lll->seed_access_addr));
463 (void)memcpy(lll->base_crc_init, &bi->base_crc_init,
464 sizeof(lll->base_crc_init));
465
466 (void)memcpy(lll->data_chan_map, bi->chm_phy,
467 sizeof(lll->data_chan_map));
468 lll->data_chan_map[4] &= 0x1F;
469 lll->data_chan_count = util_ones_count_get(lll->data_chan_map,
470 sizeof(lll->data_chan_map));
471 if (lll->data_chan_count < CHM_USED_COUNT_MIN) {
472 return;
473 }
474
475 /* Reset ISO create BIG flag in the periodic advertising context */
476 sync_iso->sync->iso.sync_iso = NULL;
477
478 lll->phy = BIT(bi->chm_phy[4] >> 5);
479
480 lll->num_bis = PDU_BIG_INFO_NUM_BIS_GET(bi);
481 lll->bn = PDU_BIG_INFO_BN_GET(bi);
482 lll->nse = PDU_BIG_INFO_NSE_GET(bi);
483 lll->sub_interval = PDU_BIG_INFO_SUB_INTERVAL_GET(bi);
484 lll->max_pdu = bi->max_pdu;
485 lll->pto = PDU_BIG_INFO_PTO_GET(bi);
486 if (lll->pto) {
487 lll->ptc = lll->bn;
488 } else {
489 lll->ptc = 0U;
490 }
491 lll->bis_spacing = PDU_BIG_INFO_SPACING_GET(bi);
492 lll->irc = PDU_BIG_INFO_IRC_GET(bi);
493 lll->sdu_interval = PDU_BIG_INFO_SDU_INTERVAL_GET(bi);
494
495 /* Pick the 39-bit payload count, 1 MSb is framing bit */
496 lll->payload_count = (uint64_t)bi->payload_count_framing[0];
497 lll->payload_count |= (uint64_t)bi->payload_count_framing[1] << 8;
498 lll->payload_count |= (uint64_t)bi->payload_count_framing[2] << 16;
499 lll->payload_count |= (uint64_t)bi->payload_count_framing[3] << 24;
500 lll->payload_count |= (uint64_t)(bi->payload_count_framing[4] & 0x7f) << 32;
501 lll->framing = (bi->payload_count_framing[4] & 0x80) >> 7;
502
503 /* Set establishment event countdown */
504 lll->establish_events = CONN_ESTAB_COUNTDOWN;
505
506 if (IS_ENABLED(CONFIG_BT_CTLR_BROADCAST_ISO_ENC) &&
507 lll->enc && (bi_size == PDU_BIG_INFO_ENCRYPTED_SIZE)) {
508 const uint8_t BIG3[4] = {0x33, 0x47, 0x49, 0x42};
509 struct ccm *ccm_rx;
510 uint8_t gsk[16];
511 int err;
512
513 /* Copy the GIV in BIGInfo */
514 (void)memcpy(lll->giv, bi->giv, sizeof(lll->giv));
515
516 /* Calculate GSK */
517 err = bt_crypto_h8(sync_iso->gltk, bi->gskd, BIG3, gsk);
518 LL_ASSERT(!err);
519
520 /* Prepare the CCM parameters */
521 ccm_rx = &lll->ccm_rx;
522 ccm_rx->direction = 1U;
523 (void)memcpy(&ccm_rx->iv[4], &lll->giv[4], 4U);
524 (void)mem_rcopy(ccm_rx->key, gsk, sizeof(ccm_rx->key));
525
526 /* NOTE: counter is filled in LLL */
527 } else {
528 lll->enc = 0U;
529 }
530
531 /* Initialize payload pointers */
532 lll->payload_count_max = PDU_BIG_PAYLOAD_COUNT_MAX;
533 lll->payload_tail = 0U;
534 for (int i = 0; i < CONFIG_BT_CTLR_SYNC_ISO_STREAM_MAX; i++) {
535 for (int j = 0; j < lll->payload_count_max; j++) {
536 lll->payload[i][j] = NULL;
537 }
538 }
539
540 lll->iso_interval = PDU_BIG_INFO_ISO_INTERVAL_GET(bi);
541 interval_us = lll->iso_interval * PERIODIC_INT_UNIT_US;
542
543 sync_iso->timeout_reload =
544 RADIO_SYNC_EVENTS((sync_iso->timeout * 10U * USEC_PER_MSEC),
545 interval_us);
546
547 sca = sync_iso->sync->lll.sca;
548 lll->window_widening_periodic_us =
549 DIV_ROUND_UP(((lll_clock_ppm_local_get() +
550 lll_clock_ppm_get(sca)) *
551 interval_us), USEC_PER_SEC);
552 lll->window_widening_max_us = (interval_us >> 1) - EVENT_IFS_US;
553 if (PDU_BIG_INFO_OFFS_UNITS_GET(bi)) {
554 lll->window_size_event_us = OFFS_UNIT_300_US;
555 } else {
556 lll->window_size_event_us = OFFS_UNIT_30_US;
557 }
558
559 ftr = &node_rx->rx_ftr;
560 pdu = (void *)((struct node_rx_pdu *)node_rx)->pdu;
561
562 ready_delay_us = lll_radio_rx_ready_delay_get(lll->phy, PHY_FLAGS_S8);
563
564 /* Calculate the BIG Offset in microseconds */
565 sync_iso_offset_us = ftr->radio_end_us;
566 sync_iso_offset_us += PDU_BIG_INFO_OFFS_GET(bi) *
567 lll->window_size_event_us;
568 /* Skip to first selected BIS subevent */
569 /* FIXME: add support for interleaved packing */
570 stream = ull_sync_iso_stream_get(lll->stream_handle[0]);
571 sync_iso_offset_us += (stream->bis_index - 1U) * lll->sub_interval *
572 ((lll->irc * lll->bn) + lll->ptc);
573 sync_iso_offset_us -= PDU_AC_US(pdu->len, sync_iso->sync->lll.phy,
574 ftr->phy_flags);
575 sync_iso_offset_us -= EVENT_TICKER_RES_MARGIN_US;
576 sync_iso_offset_us -= EVENT_JITTER_US;
577 sync_iso_offset_us -= ready_delay_us;
578
579 interval_us -= lll->window_widening_periodic_us;
580
581 /* Calculate ISO Receiver BIG event timings */
582
583 /* Number of maximum BISes to sync from the first BIS to sync */
584 /* NOTE: When ULL scheduling is implemented for subevents, then update
585 * the time reservation as required.
586 */
587 num_bis = lll->num_bis - (stream->bis_index - 1U);
588
589 /* 1. Maximum PDU transmission time in 1M/2M/S8 PHY is 17040 us, or
590 * represented in 15-bits.
591 * 2. NSE in the range 1 to 31 is represented in 5-bits
592 * 3. num_bis in the range 1 to 31 is represented in 5-bits
593 *
594 * Hence, worst case event time can be represented in 25-bits plus
595 * one each bit for added ctrl_spacing and radio event overheads. I.e.
596 * 27-bits required and sufficiently covered by using 32-bit data type
597 * for time_us.
598 */
599
600 if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_ISO_RESERVE_MAX)) {
601 uint32_t ctrl_spacing_us;
602
603 /* Maximum time reservation for sequential and interleaved
604 * packing.
605 */
606 if (lll->bis_spacing >= (lll->sub_interval * lll->nse)) {
607 slot_us = lll->sub_interval * lll->nse * num_bis;
608 } else {
609 slot_us = lll->bis_spacing * lll->nse * num_bis;
610 }
611
612 ctrl_spacing_us = PDU_BIS_US(sizeof(struct pdu_big_ctrl),
613 lll->enc, lll->phy, PHY_FLAGS_S8);
614 slot_us += ctrl_spacing_us;
615
616 } else if (lll->bis_spacing >= (lll->sub_interval * lll->nse)) {
617 /* Time reservation omitting PTC subevents in sequential
618 * packing.
619 */
620 slot_us = lll->sub_interval * ((lll->nse * num_bis) - lll->ptc);
621
622 } else {
623 /* Time reservation omitting PTC subevents in interleaved
624 * packing.
625 */
626 slot_us = lll->bis_spacing * ((lll->nse - lll->ptc) * num_bis);
627 }
628
629 /* Add radio ready delay */
630 slot_us += ready_delay_us;
631 slot_us += lll->window_widening_periodic_us << 1U;
632 slot_us += EVENT_JITTER_US << 1U;
633 slot_us += EVENT_TICKER_RES_MARGIN_US << 2U;
634
635 /* Add implementation defined radio event overheads */
636 if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX)) {
637 slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
638 }
639
640 /* TODO: active_to_start feature port */
641 sync_iso->ull.ticks_active_to_start = 0U;
642 sync_iso->ull.ticks_prepare_to_start =
643 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
644 sync_iso->ull.ticks_preempt_to_start =
645 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
646 sync_iso->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
647
648 ticks_slot_offset = MAX(sync_iso->ull.ticks_active_to_start,
649 sync_iso->ull.ticks_prepare_to_start);
650 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
651 ticks_slot_overhead = ticks_slot_offset;
652 } else {
653 ticks_slot_overhead = 0U;
654 }
655 ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
656
657 /* Check and skip to next interval if CPU processing introduces latency
658 * that can delay scheduling the first ISO event.
659 */
660 ticks_expire = ftr->ticks_anchor - ticks_slot_offset +
661 HAL_TICKER_US_TO_TICKS(sync_iso_offset_us);
662 ticks_threshold = ticker_ticks_now_get() +
663 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
664 ticks_diff = ticker_ticks_diff_get(ticks_expire, ticks_threshold);
665 if (ticks_diff & BIT(HAL_TICKER_CNTR_MSBIT)) {
666 sync_iso_offset_us += interval_us -
667 lll->window_widening_periodic_us;
668 lll->window_widening_event_us +=
669 lll->window_widening_periodic_us;
670 lll->payload_count += lll->bn;
671 }
672
673 /* setup to use ISO create prepare function until sync established */
674 mfy_lll_prepare.fp = lll_sync_iso_create_prepare;
675
676 handle = sync_iso_handle_get(sync_iso);
677 ret = ticker_start(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
678 (TICKER_ID_SCAN_SYNC_ISO_BASE +
679 sync_iso_handle_to_index(handle)),
680 ftr->ticks_anchor - ticks_slot_offset,
681 HAL_TICKER_US_TO_TICKS(sync_iso_offset_us),
682 HAL_TICKER_US_TO_TICKS(interval_us),
683 HAL_TICKER_REMAINDER(interval_us),
684 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
685 !defined(CONFIG_BT_CTLR_LOW_LAT)
686 TICKER_LAZY_MUST_EXPIRE,
687 #else
688 TICKER_NULL_LAZY,
689 #endif /* !CONFIG_BT_TICKER_LOW_LAT && !CONFIG_BT_CTLR_LOW_LAT */
690 (sync_iso->ull.ticks_slot + ticks_slot_overhead),
691 ticker_cb, sync_iso,
692 ticker_start_op_cb, (void *)__LINE__);
693 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
694 (ret == TICKER_STATUS_BUSY));
695 }
696
ull_sync_iso_estab_done(struct node_rx_event_done * done)697 void ull_sync_iso_estab_done(struct node_rx_event_done *done)
698 {
699 struct ll_sync_iso_set *sync_iso;
700 struct node_rx_sync_iso *se;
701 struct node_rx_pdu *rx;
702
703 if (done->extra.trx_cnt || done->extra.estab_failed) {
704 /* Switch to normal prepare */
705 mfy_lll_prepare.fp = lll_sync_iso_prepare;
706
707 /* Get reference to ULL context */
708 sync_iso = CONTAINER_OF(done->param, struct ll_sync_iso_set, ull);
709
710 /* Prepare BIG Sync Established */
711 rx = (void *)sync_iso->sync->iso.node_rx_estab;
712 rx->hdr.type = NODE_RX_TYPE_SYNC_ISO;
713 rx->hdr.handle = sync_iso_handle_get(sync_iso);
714 rx->rx_ftr.param = sync_iso;
715
716 /* status value is stored in the PDU member of the node rx */
717 se = (void *)rx->pdu;
718 if (done->extra.estab_failed) {
719 if (sync_iso->lll.term_reason != BT_HCI_ERR_SUCCESS) {
720 se->status = sync_iso->lll.term_reason;
721 } else {
722 se->status = BT_HCI_ERR_CONN_FAIL_TO_ESTAB;
723 }
724 } else {
725 se->status = BT_HCI_ERR_SUCCESS;
726 }
727
728 ll_rx_put_sched(rx->hdr.link, rx);
729 }
730
731 ull_sync_iso_done(done);
732 }
733
ull_sync_iso_done(struct node_rx_event_done * done)734 void ull_sync_iso_done(struct node_rx_event_done *done)
735 {
736 struct ll_sync_iso_set *sync_iso;
737 uint32_t ticks_drift_minus;
738 uint32_t ticks_drift_plus;
739 struct lll_sync_iso *lll;
740 uint16_t elapsed_event;
741 uint16_t latency_event;
742 uint16_t lazy;
743 uint8_t force;
744
745 /* Get reference to ULL context */
746 sync_iso = CONTAINER_OF(done->param, struct ll_sync_iso_set, ull);
747 lll = &sync_iso->lll;
748
749 /* Events elapsed used in timeout checks below */
750 latency_event = lll->latency_event;
751 if (lll->latency_prepare) {
752 elapsed_event = latency_event + lll->latency_prepare;
753 } else {
754 elapsed_event = latency_event + 1U;
755 }
756
757 /* Check for establishmet failure */
758 if (done->extra.estab_failed) {
759 /* Stop Sync ISO Ticker directly. Establishment failure has been
760 * notified.
761 */
762 stop_ticker(sync_iso, NULL);
763 return;
764 }
765
766 /* Sync drift compensation and new skip calculation
767 */
768 ticks_drift_plus = 0U;
769 ticks_drift_minus = 0U;
770 if (done->extra.trx_cnt) {
771 /* Calculate drift in ticks unit */
772 ull_drift_ticks_get(done, &ticks_drift_plus,
773 &ticks_drift_minus);
774
775 /* Reset latency */
776 lll->latency_event = 0U;
777 }
778
779 /* Reset supervision countdown */
780 if (done->extra.crc_valid) {
781 sync_iso->timeout_expire = 0U;
782 } else {
783 /* if anchor point not sync-ed, start timeout countdown */
784 if (!sync_iso->timeout_expire) {
785 sync_iso->timeout_expire = sync_iso->timeout_reload;
786 }
787 }
788
789 /* check timeout */
790 force = 0U;
791 if (sync_iso->timeout_expire) {
792 if (sync_iso->timeout_expire > elapsed_event) {
793 sync_iso->timeout_expire -= elapsed_event;
794
795 /* break skip */
796 lll->latency_event = 0U;
797
798 if (latency_event) {
799 force = 1U;
800 }
801 } else {
802 timeout_cleanup(sync_iso);
803
804 return;
805 }
806 }
807
808 /* check if skip needs update */
809 lazy = 0U;
810 if (force || (latency_event != lll->latency_event)) {
811 lazy = lll->latency_event + 1U;
812 }
813
814 /* Update Sync ticker instance */
815 if (ticks_drift_plus || ticks_drift_minus || lazy || force) {
816 uint8_t handle = sync_iso_handle_get(sync_iso);
817 uint32_t ticker_status;
818
819 /* Call to ticker_update can fail under the race
820 * condition where in the periodic sync role is being stopped
821 * but at the same time it is preempted by periodic sync event
822 * that gets into close state. Accept failure when periodic sync
823 * role is being stopped.
824 */
825 ticker_status = ticker_update(TICKER_INSTANCE_ID_CTLR,
826 TICKER_USER_ID_ULL_HIGH,
827 (TICKER_ID_SCAN_SYNC_ISO_BASE +
828 sync_iso_handle_to_index(handle)),
829 ticks_drift_plus,
830 ticks_drift_minus, 0U, 0U,
831 lazy, force,
832 ticker_update_op_cb,
833 sync_iso);
834 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
835 (ticker_status == TICKER_STATUS_BUSY) ||
836 ((void *)sync_iso == ull_disable_mark_get()));
837 }
838 }
839
ull_sync_iso_done_terminate(struct node_rx_event_done * done)840 void ull_sync_iso_done_terminate(struct node_rx_event_done *done)
841 {
842 struct ll_sync_iso_set *sync_iso;
843 struct lll_sync_iso *lll;
844 struct node_rx_pdu *rx;
845
846 /* Get reference to ULL context */
847 sync_iso = CONTAINER_OF(done->param, struct ll_sync_iso_set, ull);
848 lll = &sync_iso->lll;
849
850 /* Populate the Sync Lost which will be enqueued in disabled_cb */
851 rx = (void *)&sync_iso->node_rx_lost;
852 rx->hdr.handle = sync_iso_handle_get(sync_iso);
853 rx->hdr.type = NODE_RX_TYPE_SYNC_ISO_LOST;
854 rx->rx_ftr.param = sync_iso;
855 *((uint8_t *)rx->pdu) = lll->term_reason;
856
857 /* Stop Sync ISO Ticker */
858 stop_ticker(sync_iso, ticker_stop_op_cb);
859 }
860
disable(uint8_t sync_idx)861 static void disable(uint8_t sync_idx)
862 {
863 struct ll_sync_iso_set *sync_iso;
864 int err;
865
866 sync_iso = &ll_sync_iso[sync_idx];
867
868 /* Stop any active resume ticker */
869 (void)ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
870 TICKER_ID_SCAN_SYNC_ISO_RESUME_BASE + sync_idx,
871 NULL, NULL);
872
873 err = ull_ticker_stop_with_mark(TICKER_ID_SCAN_SYNC_ISO_BASE +
874 sync_idx, sync_iso, &sync_iso->lll);
875 LL_ASSERT_INFO2(err == 0 || err == -EALREADY, sync_idx, err);
876 }
877
init_reset(void)878 static int init_reset(void)
879 {
880 uint8_t idx;
881
882 /* Disable all active BIGs (uses blocking ull_ticker_stop_with_mark) */
883 for (idx = 0U; idx < CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET; idx++) {
884 disable(idx);
885 }
886
887 mem_init((void *)stream_pool, sizeof(struct lll_sync_iso_stream),
888 CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT, &stream_free);
889
890 memset(&ll_sync_iso, 0, sizeof(ll_sync_iso));
891
892 /* Initialize LLL */
893 return lll_sync_iso_init();
894 }
895
sync_iso_get(uint8_t handle)896 static struct ll_sync_iso_set *sync_iso_get(uint8_t handle)
897 {
898 for (uint8_t idx = 0; idx < CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET; idx++) {
899 if (ll_sync_iso[idx].sync && ll_sync_iso[idx].big_handle == handle) {
900 return &ll_sync_iso[idx];
901 }
902 }
903
904 return NULL;
905 }
906
sync_iso_alloc(uint8_t handle)907 static struct ll_sync_iso_set *sync_iso_alloc(uint8_t handle)
908 {
909 for (uint8_t idx = 0; idx < CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET; idx++) {
910 if (!ll_sync_iso[idx].sync) {
911 ll_sync_iso[idx].big_handle = handle;
912 return &ll_sync_iso[idx];
913 }
914 }
915
916 return NULL;
917 }
918
sync_iso_handle_get(struct ll_sync_iso_set * sync)919 static uint8_t sync_iso_handle_get(struct ll_sync_iso_set *sync)
920 {
921 return sync->big_handle;
922 }
923
sync_iso_handle_to_index(uint8_t handle)924 static uint8_t sync_iso_handle_to_index(uint8_t handle)
925 {
926 return ARRAY_INDEX(ll_sync_iso, sync_iso_get(handle));
927 }
928
sync_iso_stream_acquire(void)929 static struct stream *sync_iso_stream_acquire(void)
930 {
931 return mem_acquire(&stream_free);
932 }
933
sync_iso_stream_handle_get(struct lll_sync_iso_stream * stream)934 static uint16_t sync_iso_stream_handle_get(struct lll_sync_iso_stream *stream)
935 {
936 return mem_index_get(stream, stream_pool, sizeof(*stream));
937 }
938
timeout_cleanup(struct ll_sync_iso_set * sync_iso)939 static void timeout_cleanup(struct ll_sync_iso_set *sync_iso)
940 {
941 struct node_rx_pdu *rx;
942
943 /* Populate the Sync Lost which will be enqueued in disabled_cb */
944 rx = (void *)&sync_iso->node_rx_lost;
945 rx->hdr.handle = sync_iso_handle_get(sync_iso);
946 rx->rx_ftr.param = sync_iso;
947
948 if (mfy_lll_prepare.fp == lll_sync_iso_prepare) {
949 rx->hdr.type = NODE_RX_TYPE_SYNC_ISO_LOST;
950 *((uint8_t *)rx->pdu) = BT_HCI_ERR_CONN_TIMEOUT;
951 } else {
952 rx->hdr.type = NODE_RX_TYPE_SYNC_ISO;
953 *((uint8_t *)rx->pdu) = BT_HCI_ERR_CONN_FAIL_TO_ESTAB;
954 }
955
956 /* Stop Sync ISO Ticker */
957 stop_ticker(sync_iso, ticker_stop_op_cb);
958 }
959
ticker_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)960 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
961 uint32_t remainder, uint16_t lazy, uint8_t force,
962 void *param)
963 {
964 static struct lll_prepare_param p;
965 struct ll_sync_iso_set *sync_iso;
966 struct lll_sync_iso *lll;
967 uint32_t ret;
968 uint8_t ref;
969
970 DEBUG_RADIO_PREPARE_O(1);
971
972 if (!IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT) &&
973 !IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) &&
974 (lazy == TICKER_LAZY_MUST_EXPIRE)) {
975 /* FIXME: generate ISO PDU with status set to invalid */
976
977 DEBUG_RADIO_PREPARE_O(0);
978 return;
979 }
980
981 sync_iso = param;
982 lll = &sync_iso->lll;
983
984 /* Increment prepare reference count */
985 ref = ull_ref_inc(&sync_iso->ull);
986 LL_ASSERT(ref);
987
988 /* Append timing parameters */
989 p.ticks_at_expire = ticks_at_expire;
990 p.remainder = remainder;
991 p.lazy = lazy;
992 p.force = force;
993 p.param = lll;
994 mfy_lll_prepare.param = &p;
995
996 /* Kick LLL prepare */
997 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0U,
998 &mfy_lll_prepare);
999 LL_ASSERT(!ret);
1000
1001 DEBUG_RADIO_PREPARE_O(1);
1002 }
1003
ticker_start_op_cb(uint32_t status,void * param)1004 static void ticker_start_op_cb(uint32_t status, void *param)
1005 {
1006 ARG_UNUSED(param);
1007
1008 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1009 }
1010
ticker_update_op_cb(uint32_t status,void * param)1011 static void ticker_update_op_cb(uint32_t status, void *param)
1012 {
1013 LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
1014 param == ull_disable_mark_get());
1015 }
1016
ticker_stop_op_cb(uint32_t status,void * param)1017 static void ticker_stop_op_cb(uint32_t status, void *param)
1018 {
1019 static memq_link_t link;
1020 static struct mayfly mfy = {0U, 0U, &link, NULL, sync_iso_disable};
1021 uint32_t ret;
1022
1023 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1024
1025 /* Check if any pending LLL events that need to be aborted */
1026 mfy.param = param;
1027 ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
1028 TICKER_USER_ID_ULL_HIGH, 0U, &mfy);
1029 LL_ASSERT(!ret);
1030 }
1031
sync_iso_disable(void * param)1032 static void sync_iso_disable(void *param)
1033 {
1034 struct ll_sync_iso_set *sync_iso;
1035 struct ull_hdr *hdr;
1036
1037 /* Check ref count to determine if any pending LLL events in pipeline */
1038 sync_iso = param;
1039 hdr = &sync_iso->ull;
1040 if (ull_ref_get(hdr)) {
1041 static memq_link_t link;
1042 static struct mayfly mfy = {0U, 0U, &link, NULL, lll_disable};
1043 uint32_t ret;
1044
1045 mfy.param = &sync_iso->lll;
1046
1047 /* Setup disabled callback to be called when ref count
1048 * returns to zero.
1049 */
1050 LL_ASSERT(!hdr->disabled_cb);
1051 hdr->disabled_param = mfy.param;
1052 hdr->disabled_cb = disabled_cb;
1053
1054 /* Trigger LLL disable */
1055 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1056 TICKER_USER_ID_LLL, 0U, &mfy);
1057 LL_ASSERT(!ret);
1058 } else {
1059 /* No pending LLL events */
1060 disabled_cb(&sync_iso->lll);
1061 }
1062 }
1063
lll_flush(void * param)1064 static void lll_flush(void *param)
1065 {
1066 struct ll_sync_iso_set *sync_iso;
1067 uint8_t handle;
1068
1069 /* Get reference to ULL context */
1070 sync_iso = HDR_LLL2ULL(param);
1071 handle = sync_iso_handle_get(sync_iso);
1072
1073 lll_sync_iso_flush(handle, param);
1074
1075 if (sync_iso->flush_sem) {
1076 k_sem_give(sync_iso->flush_sem);
1077 }
1078 }
1079
disabled_cb(void * param)1080 static void disabled_cb(void *param)
1081 {
1082 static memq_link_t mfy_link;
1083 static struct mayfly mfy = {0U, 0U, &mfy_link, NULL, lll_flush};
1084 struct ll_sync_iso_set *sync_iso;
1085 struct node_rx_pdu *rx;
1086 memq_link_t *link;
1087 uint32_t ret;
1088
1089 /* Get reference to ULL context */
1090 sync_iso = HDR_LLL2ULL(param);
1091
1092 /* Generate BIG sync lost */
1093 rx = (void *)&sync_iso->node_rx_lost;
1094 LL_ASSERT(rx->hdr.link);
1095 link = rx->hdr.link;
1096 rx->hdr.link = NULL;
1097
1098 /* Enqueue the BIG sync lost towards ULL context */
1099 ll_rx_put_sched(link, rx);
1100
1101 mfy.param = param;
1102 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1103 TICKER_USER_ID_LLL, 0U, &mfy);
1104 LL_ASSERT(!ret);
1105 }
1106
stop_ticker(struct ll_sync_iso_set * sync_iso,ticker_op_func fp_op_func)1107 static void stop_ticker(struct ll_sync_iso_set *sync_iso, ticker_op_func fp_op_func)
1108 {
1109
1110 uint8_t handle;
1111 uint32_t ret;
1112
1113 handle = sync_iso_handle_get(sync_iso);
1114
1115 (void)ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
1116 TICKER_ID_SCAN_SYNC_ISO_RESUME_BASE +
1117 sync_iso_handle_to_index(handle), NULL, NULL);
1118
1119 ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
1120 TICKER_ID_SCAN_SYNC_ISO_BASE +
1121 sync_iso_handle_to_index(handle),
1122 fp_op_func, fp_op_func ? (void *)sync_iso : NULL);
1123
1124 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1125 (ret == TICKER_STATUS_BUSY));
1126 }
1127