1 /*
2 * Copyright (c) 2021 Demant
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/sys/byteorder.h>
9 #include <zephyr/bluetooth/hci_types.h>
10
11 #include "util/util.h"
12 #include "util/mem.h"
13 #include "util/memq.h"
14 #include "util/mayfly.h"
15 #include "util/dbuf.h"
16
17 #include "ticker/ticker.h"
18
19 #include "hal/ccm.h"
20 #include "hal/ticker.h"
21
22 #include "pdu_df.h"
23 #include "lll/pdu_vendor.h"
24 #include "pdu.h"
25
26 #include "lll.h"
27 #include "lll/lll_vendor.h"
28 #include "lll_clock.h"
29 #include "lll/lll_df_types.h"
30 #include "lll_conn.h"
31 #include "lll_conn_iso.h"
32 #include "lll_central_iso.h"
33 #include "lll_peripheral_iso.h"
34 #include "lll_iso_tx.h"
35
36 #include "ll_sw/ull_tx_queue.h"
37
38 #include "isoal.h"
39
40 #include "ull_iso_types.h"
41 #include "ull_conn_types.h"
42 #include "ull_conn_iso_types.h"
43
44 #include "ull_llcp.h"
45
46 #include "ull_internal.h"
47 #include "ull_conn_internal.h"
48 #include "ull_iso_internal.h"
49 #include "ull_conn_iso_internal.h"
50 #include "ull_peripheral_iso_internal.h"
51
52 #include "ll.h"
53
54 #include "hal/debug.h"
55
56 /* Used by LISTIFY */
57 #define _INIT_MAYFLY_ARRAY(_i, _l, _fp) \
58 { ._link = &_l[_i], .fp = _fp },
59
60 /* Declare static initialized array of mayflies with associated link element */
61 #define DECLARE_MAYFLY_ARRAY(_name, _fp, _cnt) \
62 static memq_link_t _links[_cnt]; \
63 static struct mayfly _name[_cnt] = \
64 { LISTIFY(_cnt, _INIT_MAYFLY_ARRAY, (), _links, _fp) }
65
66
67 static int init_reset(void);
68 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
69 static void cis_lazy_fill(struct ll_conn_iso_stream *cis);
70 static void mfy_cis_lazy_fill(void *param);
71 static void ticker_next_slot_get_op_cb(uint32_t status, void *param);
72 #endif /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
73 static void ticker_start_op_cb(uint32_t status, void *param);
74 static void ticker_update_cig_op_cb(uint32_t status, void *param);
75 static void ticker_resume_op_cb(uint32_t status, void *param);
76 static void ticker_resume_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
77 uint32_t remainder, uint16_t lazy, uint8_t force,
78 void *param);
79 static void cis_disabled_cb(void *param);
80 static void ticker_stop_op_cb(uint32_t status, void *param);
81 static void cig_disable(void *param);
82 static void cig_disabled_cb(void *param);
83 static void disable(uint16_t handle);
84 static void cis_tx_lll_flush(void *param);
85
86 static struct ll_conn_iso_stream cis_pool[CONFIG_BT_CTLR_CONN_ISO_STREAMS];
87 static void *cis_free;
88
89 static struct ll_conn_iso_group cig_pool[CONFIG_BT_CTLR_CONN_ISO_GROUPS];
90 static void *cig_free;
91
92 /* BT. 5.3 Spec - Vol 4, Part E, Sect 6.7 */
93 #define CONN_ACCEPT_TIMEOUT_DEFAULT 0x1F40
94 #define CONN_ACCEPT_TIMEOUT_MAX 0xB540
95 #define CONN_ACCEPT_TIMEOUT_MIN 0x0001
96 static uint16_t conn_accept_timeout;
97
ll_conn_iso_group_acquire(void)98 struct ll_conn_iso_group *ll_conn_iso_group_acquire(void)
99 {
100 return mem_acquire(&cig_free);
101 }
102
ll_conn_iso_group_release(struct ll_conn_iso_group * cig)103 void ll_conn_iso_group_release(struct ll_conn_iso_group *cig)
104 {
105 cig->cig_id = 0xFF;
106 cig->state = CIG_STATE_NO_CIG;
107 cig->lll.num_cis = 0U;
108
109 mem_release(cig, &cig_free);
110 }
111
ll_conn_iso_group_handle_get(struct ll_conn_iso_group * cig)112 uint16_t ll_conn_iso_group_handle_get(struct ll_conn_iso_group *cig)
113 {
114 return mem_index_get(cig, cig_pool, sizeof(struct ll_conn_iso_group));
115 }
116
ll_conn_iso_group_get(uint16_t handle)117 struct ll_conn_iso_group *ll_conn_iso_group_get(uint16_t handle)
118 {
119 return mem_get(cig_pool, sizeof(struct ll_conn_iso_group), handle);
120 }
121
ll_conn_iso_group_get_by_id(uint8_t id)122 struct ll_conn_iso_group *ll_conn_iso_group_get_by_id(uint8_t id)
123 {
124 struct ll_conn_iso_group *cig;
125
126 for (int h = 0; h < CONFIG_BT_CTLR_CONN_ISO_GROUPS; h++) {
127 cig = ll_conn_iso_group_get(h);
128 if (id == cig->cig_id) {
129 return cig;
130 }
131 }
132
133 return NULL;
134 }
135
ll_conn_iso_stream_acquire(void)136 struct ll_conn_iso_stream *ll_conn_iso_stream_acquire(void)
137 {
138 struct ll_conn_iso_stream *cis = mem_acquire(&cis_free);
139
140 if (cis) {
141 (void)memset(&cis->hdr, 0U, sizeof(cis->hdr));
142 }
143
144 return cis;
145 }
146
ll_conn_iso_stream_release(struct ll_conn_iso_stream * cis)147 void ll_conn_iso_stream_release(struct ll_conn_iso_stream *cis)
148 {
149 cis->cis_id = 0;
150 cis->group = NULL;
151
152 mem_release(cis, &cis_free);
153 }
154
ll_conn_iso_stream_handle_get(struct ll_conn_iso_stream * cis)155 uint16_t ll_conn_iso_stream_handle_get(struct ll_conn_iso_stream *cis)
156 {
157 return mem_index_get(cis, cis_pool,
158 sizeof(struct ll_conn_iso_stream)) +
159 LL_CIS_HANDLE_BASE;
160 }
161
ll_conn_iso_stream_get(uint16_t handle)162 struct ll_conn_iso_stream *ll_conn_iso_stream_get(uint16_t handle)
163 {
164 return mem_get(cis_pool, sizeof(struct ll_conn_iso_stream), handle -
165 LL_CIS_HANDLE_BASE);
166 }
167
ull_conn_iso_lll_stream_get(uint16_t handle)168 struct lll_conn_iso_stream *ull_conn_iso_lll_stream_get(uint16_t handle)
169 {
170 struct ll_conn_iso_stream *cis;
171
172 cis = ll_conn_iso_stream_get(handle);
173 if (!cis) {
174 return NULL;
175 }
176
177 return &cis->lll;
178 }
179
ll_iso_stream_connected_get(uint16_t handle)180 struct ll_conn_iso_stream *ll_iso_stream_connected_get(uint16_t handle)
181 {
182 struct ll_conn_iso_stream *cis;
183
184 if (handle >= CONFIG_BT_CTLR_CONN_ISO_STREAMS +
185 LL_CIS_HANDLE_BASE) {
186 return NULL;
187 }
188
189 cis = ll_conn_iso_stream_get(handle);
190 if ((cis->group == NULL) || (cis->lll.handle != handle) || !cis->established) {
191 /* CIS does not belong to a group, has inconsistent handle or is
192 * not yet established.
193 */
194 return NULL;
195 }
196
197 return cis;
198 }
199
ll_conn_iso_stream_get_by_acl(struct ll_conn * conn,uint16_t * cis_iter)200 struct ll_conn_iso_stream *ll_conn_iso_stream_get_by_acl(struct ll_conn *conn, uint16_t *cis_iter)
201 {
202 uint8_t cis_iter_start = (cis_iter == NULL) || (*cis_iter) == UINT16_MAX;
203 uint8_t cig_handle;
204
205 /* Find CIS associated with ACL conn */
206 for (cig_handle = 0; cig_handle < CONFIG_BT_CTLR_CONN_ISO_GROUPS; cig_handle++) {
207 struct ll_conn_iso_stream *cis;
208 struct ll_conn_iso_group *cig;
209 uint16_t handle_iter;
210 int8_t cis_idx;
211
212 cig = ll_conn_iso_group_get(cig_handle);
213 if (!cig) {
214 continue;
215 }
216
217 handle_iter = UINT16_MAX;
218
219 /* Find next connected CIS in the group */
220 for (cis_idx = 0; cis_idx < CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP; cis_idx++) {
221 cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
222 if (cis) {
223 uint16_t cis_handle = cis->lll.handle;
224
225 cis = ll_iso_stream_connected_get(cis_handle);
226 if (!cis) {
227 /* CIS is not connected */
228 continue;
229 }
230
231 if (!cis_iter_start) {
232 /* Look for iterator start handle */
233 cis_iter_start = cis_handle == (*cis_iter);
234 } else if (cis->lll.acl_handle == conn->lll.handle) {
235 if (cis_iter) {
236 (*cis_iter) = cis_handle;
237 }
238
239 return cis;
240 }
241 }
242 }
243 }
244
245 return NULL;
246 }
247
ll_conn_iso_stream_get_by_group(struct ll_conn_iso_group * cig,uint16_t * handle_iter)248 struct ll_conn_iso_stream *ll_conn_iso_stream_get_by_group(struct ll_conn_iso_group *cig,
249 uint16_t *handle_iter)
250 {
251 struct ll_conn_iso_stream *cis;
252 uint16_t handle_start;
253 uint16_t handle;
254
255 handle_start = (handle_iter == NULL) || ((*handle_iter) == UINT16_MAX) ?
256 LL_CIS_HANDLE_BASE : (*handle_iter) + 1;
257
258 for (handle = handle_start; handle <= LL_CIS_HANDLE_LAST; handle++) {
259 cis = ll_conn_iso_stream_get(handle);
260 if (cis->group == cig) {
261 if (handle_iter) {
262 (*handle_iter) = handle;
263 }
264 return cis;
265 }
266 }
267
268 return NULL;
269 }
270
ll_conn_iso_stream_get_by_id(uint8_t cis_id)271 struct ll_conn_iso_stream *ll_conn_iso_stream_get_by_id(uint8_t cis_id)
272 {
273 struct ll_conn_iso_stream *cis;
274 uint16_t handle;
275
276 for (handle = LL_CIS_HANDLE_BASE; handle <= LL_CIS_HANDLE_LAST; handle++) {
277 cis = ll_conn_iso_stream_get(handle);
278 if (cis->group && (cis->cis_id == cis_id)) {
279 return cis;
280 }
281 }
282
283 return NULL;
284 }
285
286 struct lll_conn_iso_stream *
ull_conn_iso_lll_stream_get_by_group(struct lll_conn_iso_group * cig_lll,uint16_t * handle_iter)287 ull_conn_iso_lll_stream_get_by_group(struct lll_conn_iso_group *cig_lll,
288 uint16_t *handle_iter)
289 {
290 struct ll_conn_iso_stream *cis;
291 struct ll_conn_iso_group *cig;
292
293 cig = HDR_LLL2ULL(cig_lll);
294 cis = ll_conn_iso_stream_get_by_group(cig, handle_iter);
295 if (!cis) {
296 return NULL;
297 }
298
299 return &cis->lll;
300 }
301
302 struct lll_conn_iso_group *
ull_conn_iso_lll_group_get_by_stream(struct lll_conn_iso_stream * cis_lll)303 ull_conn_iso_lll_group_get_by_stream(struct lll_conn_iso_stream *cis_lll)
304 {
305 struct ll_conn_iso_stream *cis;
306 struct ll_conn_iso_group *cig;
307
308 cis = ll_conn_iso_stream_get(cis_lll->handle);
309 cig = cis->group;
310
311 return &cig->lll;
312 }
313
ll_conn_iso_accept_timeout_get(uint16_t * timeout)314 uint8_t ll_conn_iso_accept_timeout_get(uint16_t *timeout)
315 {
316 *timeout = conn_accept_timeout;
317
318 return 0;
319 }
320
ll_conn_iso_accept_timeout_set(uint16_t timeout)321 uint8_t ll_conn_iso_accept_timeout_set(uint16_t timeout)
322 {
323 if (!IN_RANGE(timeout, CONN_ACCEPT_TIMEOUT_MIN,
324 CONN_ACCEPT_TIMEOUT_MAX)) {
325 return BT_HCI_ERR_INVALID_LL_PARAM;
326 }
327
328 conn_accept_timeout = timeout;
329
330 return 0;
331 }
332
ull_conn_iso_lll_cis_established(struct lll_conn_iso_stream * cis_lll)333 void ull_conn_iso_lll_cis_established(struct lll_conn_iso_stream *cis_lll)
334 {
335 struct ll_conn_iso_stream *cis =
336 ll_conn_iso_stream_get(cis_lll->handle);
337 struct node_rx_pdu *node_rx;
338
339 if (cis->established) {
340 return;
341 }
342
343 node_rx = ull_pdu_rx_alloc();
344 if (!node_rx) {
345 /* No node available - try again later */
346 return;
347 }
348
349 node_rx->hdr.type = NODE_RX_TYPE_CIS_ESTABLISHED;
350
351 /* Send node to ULL RX demuxer for triggering LLCP state machine */
352 node_rx->hdr.handle = cis->lll.acl_handle;
353
354 ull_rx_put_sched(node_rx->hdr.link, node_rx);
355
356 cis->established = 1;
357 }
358
ull_conn_iso_done(struct node_rx_event_done * done)359 void ull_conn_iso_done(struct node_rx_event_done *done)
360 {
361 struct lll_conn_iso_group *lll;
362 struct ll_conn_iso_group *cig;
363 struct ll_conn_iso_stream *cis;
364 uint32_t ticks_drift_minus;
365 uint32_t ticks_drift_plus;
366 uint16_t handle_iter;
367 uint8_t cis_idx;
368
369 /* Get reference to ULL context */
370 cig = CONTAINER_OF(done->param, struct ll_conn_iso_group, ull);
371 lll = &cig->lll;
372
373 /* Skip if CIG terminated by local host */
374 if (unlikely(lll->handle == 0xFFFF)) {
375 return;
376 }
377
378 ticks_drift_plus = 0;
379 ticks_drift_minus = 0;
380 handle_iter = UINT16_MAX;
381 cis = NULL;
382
383 /* Check all CISes for supervison/establishment timeout */
384 for (cis_idx = 0; cis_idx < cig->lll.num_cis; cis_idx++) {
385 cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
386 LL_ASSERT(cis);
387
388 if (cis->lll.active && cis->lll.handle != LLL_HANDLE_INVALID) {
389 /* CIS was setup and is now expected to be going */
390 if (done->extra.trx_performed_bitmask &
391 (1U << LL_CIS_IDX_FROM_HANDLE(cis->lll.handle))) {
392 if (done->extra.mic_state == LLL_CONN_MIC_FAIL) {
393 /* MIC failure - stop CIS and defer cleanup to after
394 * teardown.
395 */
396 ull_conn_iso_cis_stop(cis, NULL,
397 BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL);
398 } else {
399 cis->event_expire = 0U;
400 }
401 } else {
402 /* We did NOT have successful transaction on established CIS,
403 * or CIS was not yet established, so handle timeout
404 */
405 if (!cis->event_expire) {
406 struct ll_conn *conn = ll_conn_get(cis->lll.acl_handle);
407
408 cis->event_expire = RADIO_CONN_EVENTS(
409 conn->supervision_timeout * 10U * 1000U,
410 cig->iso_interval * CONN_INT_UNIT_US);
411
412 } else if (cis->event_expire > cig->lll.latency_event) {
413 cis->event_expire -= cig->lll.latency_event;
414
415 } else {
416 cis->event_expire = 0U;
417
418 /* Stop CIS and defer cleanup to after teardown. This will
419 * only generate a terminate event to the host if CIS has
420 * been established. If CIS was not established, the
421 * teardown will send CIS_ESTABLISHED with failure.
422 */
423 ull_conn_iso_cis_stop(cis, NULL,
424 cis->established ?
425 BT_HCI_ERR_CONN_TIMEOUT :
426 BT_HCI_ERR_CONN_FAIL_TO_ESTAB);
427
428 }
429 }
430 }
431 }
432
433 if (IS_PERIPHERAL(cig) && done->extra.trx_performed_bitmask) {
434 ull_drift_ticks_get(done, &ticks_drift_plus,
435 &ticks_drift_minus);
436 }
437
438 /* Update CIG ticker to compensate for drift.
439 * Since all CISes in a CIG 'belong to' the same ACL,
440 * any CIS found in the above for-loop will do to dereference the ACL
441 */
442 if (cis && (ticks_drift_plus || ticks_drift_minus)) {
443 uint8_t ticker_id = TICKER_ID_CONN_ISO_BASE +
444 ll_conn_iso_group_handle_get(cig);
445 struct ll_conn *conn = ll_connected_get(cis->lll.acl_handle);
446 uint32_t ticker_status;
447
448 ticker_status = ticker_update(TICKER_INSTANCE_ID_CTLR,
449 TICKER_USER_ID_ULL_HIGH,
450 ticker_id,
451 ticks_drift_plus,
452 ticks_drift_minus, 0, 0,
453 TICKER_NULL_LAZY, 0,
454 ticker_update_cig_op_cb,
455 cig);
456
457 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
458 (ticker_status == TICKER_STATUS_BUSY) ||
459 ((void *)conn == ull_disable_mark_get()));
460 }
461 }
462
463 /**
464 * @brief Stop and tear down a connected ISO stream
465 * This function may be called to tear down a CIS. When the CIS teardown
466 * has completed and the stream is released and callback is provided, the
467 * cis_released_cb callback is invoked.
468 *
469 * @param cis Pointer to connected ISO stream to stop
470 * @param cis_released_cb Callback to invoke when the CIS has been released.
471 * NULL to ignore.
472 * @param reason Termination reason
473 */
ull_conn_iso_cis_stop(struct ll_conn_iso_stream * cis,ll_iso_stream_released_cb_t cis_released_cb,uint8_t reason)474 void ull_conn_iso_cis_stop(struct ll_conn_iso_stream *cis,
475 ll_iso_stream_released_cb_t cis_released_cb,
476 uint8_t reason)
477 {
478 struct ll_conn_iso_group *cig;
479 struct ull_hdr *hdr;
480
481 if (cis->teardown) {
482 /* Teardown already started */
483 LL_ASSERT(!cis->released_cb || !cis_released_cb ||
484 (cis->released_cb == cis_released_cb));
485
486 if (cis_released_cb) {
487 cis->released_cb = cis_released_cb;
488 }
489
490 return;
491 }
492
493 cis->teardown = 1;
494 cis->released_cb = cis_released_cb;
495 cis->terminate_reason = reason;
496
497 /* Check ref count to determine if any pending LLL events in pipeline */
498 cig = cis->group;
499 hdr = &cig->ull;
500 if (ull_ref_get(hdr)) {
501 static memq_link_t link;
502 static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
503 uint32_t ret;
504
505 mfy.param = &cig->lll;
506
507 /* Setup disabled callback to be called when ref count
508 * returns to zero.
509 */
510 /* Event is active (prepare/done ongoing) - wait for done and
511 * continue CIS teardown from there. The disabled_cb cannot be
512 * reserved for other use.
513 */
514 LL_ASSERT(!hdr->disabled_cb ||
515 (hdr->disabled_cb == cis_disabled_cb));
516 hdr->disabled_param = mfy.param;
517 hdr->disabled_cb = cis_disabled_cb;
518
519 /* Trigger LLL disable */
520 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
521 TICKER_USER_ID_LLL, 0, &mfy);
522 LL_ASSERT(!ret);
523 } else {
524 /* No pending LLL events */
525
526 /* Tear down CIS now in ULL_HIGH context. Ignore enqueue
527 * error (already enqueued) as all CISes marked for teardown
528 * will be handled in cis_disabled_cb. Use mayfly chaining to
529 * prevent recursive stop calls.
530 */
531 cis_disabled_cb(&cig->lll);
532 }
533 }
534
ull_conn_iso_resume_ticker_start(struct lll_event * resume_event,uint16_t cis_handle,uint32_t ticks_anchor,uint32_t resume_timeout)535 void ull_conn_iso_resume_ticker_start(struct lll_event *resume_event,
536 uint16_t cis_handle,
537 uint32_t ticks_anchor,
538 uint32_t resume_timeout)
539 {
540 struct lll_conn_iso_group *cig;
541 uint32_t resume_delay_us;
542 int32_t resume_offset_us;
543 uint8_t ticker_id;
544 uint32_t ret;
545
546 cig = resume_event->prepare_param.param;
547 ticker_id = TICKER_ID_CONN_ISO_RESUME_BASE + cig->handle;
548
549 if (cig->resume_cis != LLL_HANDLE_INVALID) {
550 /* Restarting resume ticker - must be stopped first */
551 (void)ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_LLL,
552 ticker_id, NULL, NULL);
553 }
554 cig->resume_cis = cis_handle;
555
556 resume_delay_us = EVENT_OVERHEAD_START_US;
557 resume_delay_us += EVENT_TICKER_RES_MARGIN_US;
558
559 if (cig->role == BT_HCI_ROLE_PERIPHERAL) {
560 /* Add peripheral specific delay */
561 resume_delay_us += EVENT_JITTER_US;
562 if (0) {
563 #if defined(CONFIG_BT_CTLR_PHY)
564 } else {
565 struct ll_conn_iso_stream *cis;
566 struct ll_conn *conn;
567
568 cis = ll_conn_iso_stream_get(cis_handle);
569 conn = ll_conn_get(cis->lll.acl_handle);
570
571 resume_delay_us +=
572 lll_radio_rx_ready_delay_get(conn->lll.phy_rx,
573 PHY_FLAGS_S8);
574 #else
575 } else {
576 resume_delay_us += lll_radio_rx_ready_delay_get(0, 0);
577 #endif /* CONFIG_BT_CTLR_PHY */
578 }
579 }
580
581 resume_offset_us = (int32_t)(resume_timeout - resume_delay_us);
582 LL_ASSERT(resume_offset_us >= 0);
583
584 /* Setup resume timeout as single-shot */
585 ret = ticker_start(TICKER_INSTANCE_ID_CTLR,
586 TICKER_USER_ID_LLL,
587 ticker_id,
588 ticks_anchor,
589 HAL_TICKER_US_TO_TICKS(resume_offset_us),
590 TICKER_NULL_PERIOD,
591 TICKER_NULL_REMAINDER,
592 TICKER_NULL_LAZY,
593 TICKER_NULL_SLOT,
594 ticker_resume_cb, resume_event,
595 ticker_resume_op_cb, NULL);
596
597 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
598 (ret == TICKER_STATUS_BUSY));
599 }
600
ull_conn_iso_init(void)601 int ull_conn_iso_init(void)
602 {
603 return init_reset();
604 }
605
ull_conn_iso_reset(void)606 int ull_conn_iso_reset(void)
607 {
608 return init_reset();
609 }
610
init_reset(void)611 static int init_reset(void)
612 {
613 struct ll_conn_iso_stream *cis;
614 struct ll_conn_iso_group *cig;
615 uint16_t handle;
616 int err;
617
618 /* Disable all active CIGs (uses blocking ull_ticker_stop_with_mark) */
619 for (handle = 0U; handle < CONFIG_BT_CTLR_CONN_ISO_GROUPS; handle++) {
620 disable(handle);
621 }
622
623 /* Initialize CIS pool */
624 mem_init(cis_pool, sizeof(struct ll_conn_iso_stream),
625 sizeof(cis_pool) / sizeof(struct ll_conn_iso_stream),
626 &cis_free);
627
628 /* Initialize CIG pool */
629 mem_init(cig_pool, sizeof(struct ll_conn_iso_group),
630 sizeof(cig_pool) / sizeof(struct ll_conn_iso_group),
631 &cig_free);
632
633 for (handle = 0; handle < CONFIG_BT_CTLR_CONN_ISO_GROUPS; handle++) {
634 cig = ll_conn_iso_group_get(handle);
635 cig->cig_id = 0xFF;
636 cig->state = CIG_STATE_NO_CIG;
637 cig->lll.num_cis = 0;
638 }
639
640 for (handle = LL_CIS_HANDLE_BASE; handle <= LL_CIS_HANDLE_LAST;
641 handle++) {
642 cis = ll_conn_iso_stream_get(handle);
643 cis->cis_id = 0;
644 cis->group = NULL;
645 cis->lll.link_tx_free = NULL;
646 }
647
648 conn_accept_timeout = CONN_ACCEPT_TIMEOUT_DEFAULT;
649
650 /* Initialize LLL */
651 err = lll_conn_iso_init();
652 if (err) {
653 return err;
654 }
655
656 return 0;
657 }
658
ull_conn_iso_ticker_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)659 void ull_conn_iso_ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
660 uint32_t remainder, uint16_t lazy, uint8_t force,
661 void *param)
662 {
663 static memq_link_t link;
664 static struct mayfly mfy = { 0, 0, &link, NULL, NULL };
665 static struct lll_prepare_param p;
666 struct ll_conn_iso_group *cig;
667 struct ll_conn_iso_stream *cis;
668 uint64_t leading_event_count;
669 uint16_t handle_iter;
670 uint32_t err;
671 uint8_t ref;
672
673 cig = param;
674 leading_event_count = 0;
675
676 /* Check if stopping ticker (on disconnection, race with ticker expiry)
677 */
678 if (unlikely(cig->lll.handle == 0xFFFF)) {
679 return;
680 }
681
682 handle_iter = UINT16_MAX;
683
684 /* Increment CIS event counters */
685 for (int i = 0; i < cig->lll.num_cis; i++) {
686 cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
687 LL_ASSERT(cis);
688
689 /* New CIS may become available by creation prior to the CIG
690 * event in which it has event_count == 0. Don't increment
691 * event count until its handle is validated in
692 * ull_conn_iso_start, which means that its ACL instant
693 * has been reached, and offset calculated.
694 */
695 if (cis->lll.handle != 0xFFFF && cis->lll.active) {
696 cis->lll.event_count += (lazy + 1U);
697
698 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
699 cis->lll.event_count -= cis->lll.lazy_active;
700 cis->lll.lazy_active = 0U;
701 #endif /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
702
703 leading_event_count = MAX(leading_event_count,
704 cis->lll.event_count);
705
706 ull_iso_lll_event_prepare(cis->lll.handle, cis->lll.event_count);
707 }
708
709 /* Latch datapath validity entering event */
710 cis->lll.datapath_ready_rx = cis->hdr.datapath_out != NULL;
711 }
712
713 /* Update the CIG reference point for this event. Event 0 for the
714 * leading CIS in the CIG would have had it's reference point set in
715 * ull_conn_iso_start(). The reference point should only be
716 * updated from event 1 onwards. Although the cig reference point set
717 * this way is not accurate, it is the best possible until the anchor
718 * point for the leading CIS is available for this event.
719 */
720 if (leading_event_count > 0) {
721 cig->cig_ref_point = isoal_get_wrapped_time_us(cig->cig_ref_point,
722 cig->iso_interval * CONN_INT_UNIT_US);
723 }
724
725 /* Increment prepare reference count */
726 ref = ull_ref_inc(&cig->ull);
727 LL_ASSERT(ref);
728
729 /* Append timing parameters */
730 p.ticks_at_expire = ticks_at_expire;
731 p.remainder = remainder;
732 p.lazy = lazy;
733 p.param = &cig->lll;
734 mfy.param = &p;
735
736 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) && \
737 defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
738 mfy.fp = IS_PERIPHERAL(cig) ? lll_peripheral_iso_prepare : lll_central_iso_prepare;
739
740 #elif defined(CONFIG_BT_CTLR_CENTRAL_ISO)
741 mfy.fp = lll_central_iso_prepare;
742
743 #elif defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
744 mfy.fp = lll_peripheral_iso_prepare;
745
746 #else /* !CONFIG_BT_CTLR_CENTRAL_ISO && !CONFIG_BT_CTLR_PERIPHERAL_ISO */
747 LL_ASSERT(0);
748
749 return;
750 #endif /* !CONFIG_BT_CTLR_CENTRAL_ISO && !CONFIG_BT_CTLR_PERIPHERAL_ISO */
751
752 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
753 if (IS_PERIPHERAL(cig) && cig->sca_update) {
754 /* CIG/ACL affilaition established */
755 uint32_t iso_interval_us_frac =
756 EVENT_US_TO_US_FRAC(cig->iso_interval * CONN_INT_UNIT_US);
757 cig->lll.window_widening_periodic_us_frac =
758 DIV_ROUND_UP(((lll_clock_ppm_local_get() +
759 lll_clock_ppm_get(cig->sca_update - 1)) *
760 iso_interval_us_frac),
761 1000000U);
762 iso_interval_us_frac -= cig->lll.window_widening_periodic_us_frac;
763
764 ull_peripheral_iso_update_ticker(cig, ticks_at_expire, iso_interval_us_frac);
765 cig->sca_update = 0;
766 }
767 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
768
769 /* Kick LLL prepare */
770 err = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0, &mfy);
771 LL_ASSERT(!err);
772
773 /* Handle ISO Transmit Test for this CIG */
774 ull_conn_iso_transmit_test_cig_interval(cig->lll.handle, ticks_at_expire);
775 }
776
ull_conn_iso_start(struct ll_conn * conn,uint16_t cis_handle,uint32_t ticks_at_expire,uint32_t remainder,uint16_t instant_latency)777 void ull_conn_iso_start(struct ll_conn *conn, uint16_t cis_handle,
778 uint32_t ticks_at_expire, uint32_t remainder,
779 uint16_t instant_latency)
780 {
781 struct ll_conn_iso_group *cig;
782 struct ll_conn_iso_stream *cis;
783 uint32_t acl_to_cig_ref_point;
784 uint32_t cis_offs_to_cig_ref;
785 uint32_t ticks_remainder;
786 uint32_t ticks_periodic;
787 uint32_t ticker_status;
788 uint32_t remainder_us;
789 int32_t cig_offset_us;
790 uint32_t ticks_slot;
791 uint8_t ticker_id;
792
793 cis = ll_conn_iso_stream_get(cis_handle);
794 cig = cis->group;
795
796 cis_offs_to_cig_ref = cig->sync_delay - cis->sync_delay;
797
798 cis->lll.offset = cis_offs_to_cig_ref;
799 cis->lll.handle = cis_handle;
800
801 #if defined(CONFIG_BT_CTLR_LE_ENC)
802 if (conn->lll.enc_tx) {
803 /* copy the Session Key */
804 memcpy(cis->lll.tx.ccm.key, conn->lll.ccm_tx.key,
805 sizeof(cis->lll.tx.ccm.key));
806
807 /* copy the MSbits of IV Base */
808 memcpy(&cis->lll.tx.ccm.iv[4], &conn->lll.ccm_tx.iv[4], 4);
809
810 /* XOR the CIS access address to get IV */
811 mem_xor_32(cis->lll.tx.ccm.iv, conn->lll.ccm_tx.iv,
812 cis->lll.access_addr);
813
814 /* initialise counter */
815 cis->lll.tx.ccm.counter = 0U;
816
817 /* set direction: peripheral to central = 0,
818 * central to peripheral = 1
819 */
820 cis->lll.tx.ccm.direction = !conn->lll.role;
821 }
822
823 if (conn->lll.enc_rx) {
824 /* copy the Session Key */
825 memcpy(cis->lll.rx.ccm.key, conn->lll.ccm_rx.key,
826 sizeof(cis->lll.rx.ccm.key));
827
828 /* copy the MSbits of IV Base */
829 memcpy(&cis->lll.rx.ccm.iv[4], &conn->lll.ccm_rx.iv[4], 4);
830
831 /* XOR the CIS access address to get IV */
832 mem_xor_32(cis->lll.rx.ccm.iv, conn->lll.ccm_rx.iv,
833 cis->lll.access_addr);
834
835 /* initialise counter */
836 cis->lll.rx.ccm.counter = 0U;
837
838 /* set direction: peripheral to central = 0,
839 * central to peripheral = 1
840 */
841 cis->lll.rx.ccm.direction = conn->lll.role;
842 }
843 #endif /* CONFIG_BT_CTLR_LE_ENC */
844
845 /* Connection establishment timeout */
846 cis->event_expire = CONN_ESTAB_COUNTDOWN;
847
848 /* Check if another CIS was already started and CIG ticker is
849 * running. If so, we just return with updated offset and
850 * validated handle.
851 */
852 if (cig->state == CIG_STATE_ACTIVE) {
853 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
854 /* Initialize CIS event lazy at CIS create */
855 cis->lll.lazy_active = 0U;
856
857 /* Deferred fill CIS event lazy value at CIS create */
858 cis_lazy_fill(cis);
859 #else /* CONFIG_BT_CTLR_JIT_SCHEDULING */
860 /* Set CIS active in already active CIG */
861 cis->lll.active = 1U;
862 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
863
864 /* We're done */
865 return;
866 }
867
868 ticker_id = TICKER_ID_CONN_ISO_BASE + ll_conn_iso_group_handle_get(cig);
869
870 remainder_us = remainder;
871 hal_ticker_remove_jitter(&ticks_at_expire, &remainder_us);
872
873 /* Establish the CIG reference point by adjusting ACL-to-CIS offset
874 * (cis->offset) by the difference between CIG- and CIS sync delays.
875 */
876 acl_to_cig_ref_point = cis->offset - cis_offs_to_cig_ref;
877
878 /* Calculate initial ticker offset */
879 cig_offset_us = remainder_us + acl_to_cig_ref_point;
880
881 /* Calculate the CIG reference point of first CIG event. This
882 * calculation is inaccurate. However it is the best estimate available
883 * until the first anchor point for the leading CIS is available.
884 */
885 cig->cig_ref_point = isoal_get_wrapped_time_us(HAL_TICKER_TICKS_TO_US(ticks_at_expire),
886 remainder_us +
887 EVENT_OVERHEAD_START_US +
888 acl_to_cig_ref_point);
889
890 if (false) {
891
892 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
893 } else if (IS_PERIPHERAL(cig)) {
894 uint32_t iso_interval_us_frac;
895
896 /* Calculate interval in fractional microseconds for highest precision when
897 * accumulating the window widening window size. Ticker interval is set lopsided,
898 * with natural drift towards earlier timeout.
899 */
900 iso_interval_us_frac = EVENT_US_TO_US_FRAC(cig->iso_interval * ISO_INT_UNIT_US) -
901 cig->lll.window_widening_periodic_us_frac;
902 ticks_periodic = EVENT_US_FRAC_TO_TICKS(iso_interval_us_frac);
903 ticks_remainder = EVENT_US_FRAC_TO_REMAINDER(iso_interval_us_frac);
904
905 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO_EARLY_CIG_START)
906 bool early_start = (cis->offset < EVENT_OVERHEAD_START_US);
907
908 if (early_start) {
909 if (instant_latency == 0U) {
910 /* Adjust CIG offset and reference point ahead one
911 * interval
912 */
913 cig_offset_us += (conn->lll.interval * CONN_INT_UNIT_US);
914 cig->cig_ref_point = isoal_get_wrapped_time_us(cig->cig_ref_point,
915 conn->lll.interval * CONN_INT_UNIT_US);
916 } else {
917 LL_ASSERT(instant_latency == 1U);
918 }
919 } else {
920 /* FIXME: Handle latency due to skipped ACL events around the
921 * instant to start CIG
922 */
923 LL_ASSERT(instant_latency == 0U);
924 }
925 #else /* CONFIG_BT_CTLR_PERIPHERAL_ISO_EARLY_CIG_START */
926 /* FIXME: Handle latency due to skipped ACL events around the
927 * instant to start CIG
928 */
929 LL_ASSERT(instant_latency == 0U);
930 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO_EARLY_CIG_START */
931
932 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
933
934 } else if (IS_CENTRAL(cig)) {
935 uint32_t iso_interval_us;
936
937 iso_interval_us = cig->iso_interval * ISO_INT_UNIT_US;
938 ticks_periodic = HAL_TICKER_US_TO_TICKS(iso_interval_us);
939 ticks_remainder = HAL_TICKER_REMAINDER(iso_interval_us);
940
941 /* FIXME: Handle latency due to skipped ACL events around the
942 * instant to start CIG
943 */
944 LL_ASSERT(instant_latency == 0U);
945 } else {
946 LL_ASSERT(0);
947
948 return;
949 }
950
951 /* Make sure we have time to service first subevent. TODO: Improve
952 * by skipping <n> interval(s) and incrementing event_count.
953 */
954 LL_ASSERT(cig_offset_us > 0);
955
956 ull_hdr_init(&cig->ull);
957
958 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
959 ticks_slot = 0U;
960
961 #else /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
962 uint32_t ticks_slot_overhead;
963 uint32_t ticks_slot_offset;
964
965 /* Calculate time reservations for sequential and interleaved packing as
966 * configured.
967 */
968 if (IS_PERIPHERAL(cig)) {
969 uint32_t slot_us;
970
971 /* FIXME: Time reservation for interleaved packing */
972 /* Below is time reservation for sequential packing */
973 slot_us = cis->lll.sub_interval * cis->lll.nse;
974
975 if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX)) {
976 slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
977 }
978
979 /* FIXME: How to use ready_delay_us in the time reservation?
980 * i.e. when CISes use different PHYs? Is that even
981 * allowed?
982 *
983 * Missing code here, i.e. slot_us += ready_delay_us;
984 */
985
986 /* Populate the ULL hdr with event timings overheads */
987 cig->ull.ticks_active_to_start = 0U;
988 cig->ull.ticks_prepare_to_start =
989 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
990 cig->ull.ticks_preempt_to_start =
991 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
992 cig->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
993 }
994
995 ticks_slot_offset = MAX(cig->ull.ticks_active_to_start,
996 cig->ull.ticks_prepare_to_start);
997
998 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
999 ticks_slot_overhead = ticks_slot_offset;
1000 } else {
1001 ticks_slot_overhead = 0U;
1002 }
1003
1004 ticks_slot = cig->ull.ticks_slot + ticks_slot_overhead;
1005
1006 /* Initialize CIS event lazy at CIS create */
1007 cis->lll.lazy_active = 0U;
1008 #endif /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
1009
1010 /* Start CIS peripheral CIG ticker */
1011 ticker_status = ticker_start_us(TICKER_INSTANCE_ID_CTLR,
1012 TICKER_USER_ID_ULL_HIGH,
1013 ticker_id, ticks_at_expire,
1014 HAL_TICKER_US_TO_TICKS(cig_offset_us),
1015 HAL_TICKER_REMAINDER(cig_offset_us),
1016 ticks_periodic, ticks_remainder,
1017 TICKER_NULL_LAZY, ticks_slot,
1018 ull_conn_iso_ticker_cb, cig,
1019 ticker_start_op_cb, NULL);
1020 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
1021 (ticker_status == TICKER_STATUS_BUSY));
1022
1023 /* Set CIG and the first CIS state as active */
1024 cig->state = CIG_STATE_ACTIVE;
1025 cis->lll.active = 1U;
1026 }
1027
1028 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
cis_lazy_fill(struct ll_conn_iso_stream * cis)1029 static void cis_lazy_fill(struct ll_conn_iso_stream *cis)
1030 {
1031 static memq_link_t link;
1032 static struct mayfly mfy = {0U, 0U, &link, NULL, mfy_cis_lazy_fill};
1033 uint32_t ret;
1034
1035 mfy.param = cis;
1036 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1U, &mfy);
1037 LL_ASSERT(!ret);
1038 }
1039
mfy_cis_lazy_fill(void * param)1040 static void mfy_cis_lazy_fill(void *param)
1041 {
1042 struct ll_conn_iso_stream *cis;
1043 struct ll_conn_iso_group *cig;
1044 uint32_t ticks_to_expire;
1045 uint32_t ticks_current;
1046 uint32_t remainder;
1047 uint16_t lazy = 0U;
1048 uint8_t ticker_id;
1049 uint8_t retry;
1050 uint8_t id;
1051
1052 cis = param;
1053 cig = cis->group;
1054 ticker_id = TICKER_ID_CONN_ISO_BASE + ll_conn_iso_group_handle_get(cig);
1055
1056 id = TICKER_NULL;
1057 ticks_to_expire = 0U;
1058 ticks_current = 0U;
1059
1060 /* In the first iteration the actual ticks_current value is returned
1061 * which will be different from the initial value of 0 that is set.
1062 * Subsequent iterations should return the same ticks_current as the
1063 * reference tick.
1064 * In order to avoid infinite updates to ticker's reference due to any
1065 * race condition due to expiring tickers, we try upto 3 more times.
1066 * Hence, first iteration to get an actual ticks_current and 3 more as
1067 * retries when there could be race conditions that changes the value
1068 * of ticks_current.
1069 *
1070 * ticker_next_slot_get_ext() restarts iterating when updated value of
1071 * ticks_current is returned.
1072 */
1073 retry = 4U;
1074 do {
1075 uint32_t volatile ret_cb;
1076 uint32_t ticks_previous;
1077 uint32_t ret;
1078 bool success;
1079
1080 ticks_previous = ticks_current;
1081
1082 ret_cb = TICKER_STATUS_BUSY;
1083 ret = ticker_next_slot_get_ext(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_LOW, &id,
1084 &ticks_current, &ticks_to_expire, &remainder, &lazy,
1085 NULL, NULL, ticker_next_slot_get_op_cb,
1086 (void *)&ret_cb);
1087 if (ret == TICKER_STATUS_BUSY) {
1088 /* Busy wait until Ticker Job is enabled after any Radio
1089 * event is done using the Radio hardware. Ticker Job
1090 * ISR is disabled during Radio events in LOW_LAT
1091 * feature to avoid Radio ISR latencies.
1092 */
1093 while (ret_cb == TICKER_STATUS_BUSY) {
1094 ticker_job_sched(TICKER_INSTANCE_ID_CTLR,
1095 TICKER_USER_ID_ULL_LOW);
1096 }
1097 }
1098
1099 success = (ret_cb == TICKER_STATUS_SUCCESS);
1100 LL_ASSERT(success);
1101
1102 LL_ASSERT((ticks_current == ticks_previous) || retry--);
1103
1104 LL_ASSERT(id != TICKER_NULL);
1105 } while (id != ticker_id);
1106
1107 /* Set CIS active in already active CIG and any previous laziness in
1108 * CIG before the CIS gets active that be decremented when event_count
1109 * is incremented in ull_conn_iso_ticker_cb().
1110 */
1111 cis->lll.active = 1U;
1112 cis->lll.lazy_active = lazy;
1113 }
1114
ticker_next_slot_get_op_cb(uint32_t status,void * param)1115 static void ticker_next_slot_get_op_cb(uint32_t status, void *param)
1116 {
1117 *((uint32_t volatile *)param) = status;
1118 }
1119 #endif /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
1120
ticker_start_op_cb(uint32_t status,void * param)1121 static void ticker_start_op_cb(uint32_t status, void *param)
1122 {
1123 ARG_UNUSED(param);
1124
1125 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1126 }
1127
ticker_update_cig_op_cb(uint32_t status,void * param)1128 static void ticker_update_cig_op_cb(uint32_t status, void *param)
1129 {
1130 /* CIG drift compensation succeeds, or it fails in a race condition
1131 * when disconnecting (race between ticker_update and ticker_stop
1132 * calls). TODO: Are the race-checks needed?
1133 */
1134 LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
1135 param == ull_update_mark_get() ||
1136 param == ull_disable_mark_get());
1137 }
1138
ticker_resume_op_cb(uint32_t status,void * param)1139 static void ticker_resume_op_cb(uint32_t status, void *param)
1140 {
1141 ARG_UNUSED(param);
1142
1143 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1144 }
1145
ticker_resume_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)1146 static void ticker_resume_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
1147 uint32_t remainder, uint16_t lazy, uint8_t force,
1148 void *param)
1149 {
1150 static memq_link_t link;
1151 static struct mayfly mfy = {0, 0, &link, NULL, lll_resume};
1152 struct lll_conn_iso_group *cig;
1153 struct lll_event *resume_event;
1154 uint32_t ret;
1155
1156 ARG_UNUSED(ticks_drift);
1157 LL_ASSERT(lazy == 0);
1158
1159 resume_event = param;
1160
1161 /* Append timing parameters */
1162 resume_event->prepare_param.ticks_at_expire = ticks_at_expire;
1163 resume_event->prepare_param.remainder = remainder;
1164 resume_event->prepare_param.lazy = 0;
1165 resume_event->prepare_param.force = force;
1166 mfy.param = resume_event;
1167
1168 /* Mark resume as done */
1169 cig = resume_event->prepare_param.param;
1170 cig->resume_cis = LLL_HANDLE_INVALID;
1171
1172 /* Kick LLL resume */
1173 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
1174 0, &mfy);
1175
1176 LL_ASSERT(!ret);
1177 }
1178
cis_disabled_cb(void * param)1179 static void cis_disabled_cb(void *param)
1180 {
1181 struct ll_conn_iso_group *cig;
1182 struct ll_conn_iso_stream *cis;
1183 uint32_t ticker_status;
1184 struct ll_conn *conn;
1185 uint8_t active_cises;
1186 uint16_t handle_iter;
1187 uint8_t cis_idx;
1188 uint8_t num_cis;
1189
1190 cig = HDR_LLL2ULL(param);
1191 handle_iter = UINT16_MAX;
1192 active_cises = 0;
1193
1194 /* Remove all CISes marked for teardown */
1195 num_cis = cig->lll.num_cis;
1196 for (cis_idx = 0; cis_idx < num_cis; cis_idx++) {
1197 cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
1198 LL_ASSERT(cis);
1199
1200 if (!cis->lll.active && (cis->lll.flush != LLL_CIS_FLUSH_COMPLETE)) {
1201 /* CIS is not active and did not just complete LLL flush - skip it */
1202 continue;
1203 }
1204
1205 active_cises++;
1206
1207 if (cis->lll.flush == LLL_CIS_FLUSH_PENDING) {
1208 /* CIS has LLL flush pending - wait for completion */
1209 continue;
1210 } else if (cis->lll.flush == LLL_CIS_FLUSH_COMPLETE) {
1211 ll_iso_stream_released_cb_t cis_released_cb;
1212
1213 conn = ll_conn_get(cis->lll.acl_handle);
1214 cis_released_cb = cis->released_cb;
1215 cis->released_cb = NULL;
1216
1217 if (IS_PERIPHERAL(cig)) {
1218 /* Remove data path and ISOAL sink/source associated with this
1219 * CIS for both directions. Disable them one at a time to make sure
1220 * both are removed, even if only one is set.
1221 */
1222 ll_remove_iso_path(cis->lll.handle,
1223 BIT(BT_HCI_DATAPATH_DIR_HOST_TO_CTLR));
1224 ll_remove_iso_path(cis->lll.handle,
1225 BIT(BT_HCI_DATAPATH_DIR_CTLR_TO_HOST));
1226
1227 ll_conn_iso_stream_release(cis);
1228
1229 cig->lll.num_cis--;
1230
1231 } else if (IS_CENTRAL(cig)) {
1232 cis->established = 0U;
1233 cis->teardown = 0U;
1234
1235 /* Prevent referencing inactive CIS */
1236 cis->lll.flush = LLL_CIS_FLUSH_NONE;
1237 cis->lll.acl_handle = LLL_HANDLE_INVALID;
1238
1239 } else {
1240 LL_ASSERT(0);
1241 }
1242
1243 /* CIS is no longer active */
1244 active_cises--;
1245
1246 /* CIS terminated, triggers completion of CIS_TERMINATE_IND procedure */
1247 /* Only used by local procedure, ignored for remote procedure */
1248 conn->llcp.cis.terminate_ack = 1U;
1249
1250 /* Check if removed CIS has an ACL disassociation callback. Invoke
1251 * the callback to allow cleanup.
1252 */
1253 if (cis_released_cb) {
1254 /* CIS removed - notify caller */
1255 cis_released_cb(conn);
1256 }
1257 } else if (cis->teardown) {
1258 DECLARE_MAYFLY_ARRAY(mfys, cis_tx_lll_flush,
1259 CONFIG_BT_CTLR_CONN_ISO_GROUPS);
1260 uint32_t ret;
1261
1262 if (cis->established) {
1263 struct node_rx_pdu *node_terminate;
1264
1265 /* Create and enqueue termination node. This shall prevent
1266 * further enqueuing of TX nodes for terminating CIS.
1267 */
1268 node_terminate = ull_pdu_rx_alloc();
1269 LL_ASSERT(node_terminate);
1270 node_terminate->hdr.handle = cis->lll.handle;
1271 node_terminate->hdr.type = NODE_RX_TYPE_TERMINATE;
1272 *((uint8_t *)node_terminate->pdu) = cis->terminate_reason;
1273
1274 ll_rx_put_sched(node_terminate->hdr.link, node_terminate);
1275 } else {
1276 conn = ll_conn_get(cis->lll.acl_handle);
1277
1278 /* CIS was not established - complete the procedure with error */
1279 if (ull_cp_cc_awaiting_established(conn)) {
1280 ull_cp_cc_established(conn, cis->terminate_reason);
1281 }
1282 }
1283
1284 if (cig->lll.resume_cis == cis->lll.handle) {
1285 /* Resume pending for terminating CIS - stop ticker */
1286 (void)ticker_stop(TICKER_INSTANCE_ID_CTLR,
1287 TICKER_USER_ID_ULL_HIGH,
1288 TICKER_ID_CONN_ISO_RESUME_BASE +
1289 ll_conn_iso_group_handle_get(cig),
1290 NULL, NULL);
1291
1292 cig->lll.resume_cis = LLL_HANDLE_INVALID;
1293 }
1294
1295 /* We need to flush TX nodes in LLL before releasing the stream.
1296 * More than one CIG may be terminating at the same time, so
1297 * enqueue a mayfly instance for this CIG.
1298 */
1299 cis->lll.flush = LLL_CIS_FLUSH_PENDING;
1300
1301 mfys[cig->lll.handle].param = &cis->lll;
1302 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1303 TICKER_USER_ID_LLL, 1, &mfys[cig->lll.handle]);
1304 LL_ASSERT(!ret);
1305
1306 return;
1307 }
1308 }
1309
1310 if ((cig->state == CIG_STATE_ACTIVE) && !active_cises) {
1311 /* This was the last active CIS of the CIG. Initiate CIG teardown by
1312 * stopping ticker.
1313 */
1314 cig->state = CIG_STATE_INACTIVE;
1315
1316 ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR,
1317 TICKER_USER_ID_ULL_HIGH,
1318 TICKER_ID_CONN_ISO_BASE +
1319 ll_conn_iso_group_handle_get(cig),
1320 ticker_stop_op_cb,
1321 cig);
1322
1323 LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
1324 (ticker_status == TICKER_STATUS_BUSY));
1325 }
1326 }
1327
cis_tx_lll_flush(void * param)1328 static void cis_tx_lll_flush(void *param)
1329 {
1330 DECLARE_MAYFLY_ARRAY(mfys, cis_disabled_cb, CONFIG_BT_CTLR_CONN_ISO_GROUPS);
1331
1332 struct lll_conn_iso_stream *lll;
1333 struct ll_conn_iso_stream *cis;
1334 struct ll_conn_iso_group *cig;
1335 struct node_tx_iso *tx;
1336 memq_link_t *link;
1337
1338 lll = param;
1339 lll->active = 0U;
1340
1341 cis = ll_conn_iso_stream_get(lll->handle);
1342 cig = cis->group;
1343
1344 /* Flush in LLL - may return TX nodes to ack queue */
1345 lll_conn_iso_flush(lll->handle, lll);
1346
1347 link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head, (void **)&tx);
1348 while (link) {
1349 link->next = tx->next;
1350 tx->next = link;
1351 ull_iso_lll_ack_enqueue(lll->handle, tx);
1352
1353 link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head,
1354 (void **)&tx);
1355 }
1356
1357 LL_ASSERT(!lll->link_tx_free);
1358 link = memq_deinit(&lll->memq_tx.head, &lll->memq_tx.tail);
1359 LL_ASSERT(link);
1360 lll->link_tx_free = link;
1361
1362 lll->flush = LLL_CIS_FLUSH_COMPLETE;
1363
1364 /* Resume CIS teardown in ULL_HIGH context */
1365 mfys[cig->lll.handle].param = &cig->lll;
1366 (void)mayfly_enqueue(TICKER_USER_ID_LLL,
1367 TICKER_USER_ID_ULL_HIGH, 1, &mfys[cig->lll.handle]);
1368 }
1369
ticker_stop_op_cb(uint32_t status,void * param)1370 static void ticker_stop_op_cb(uint32_t status, void *param)
1371 {
1372 static memq_link_t link;
1373 static struct mayfly mfy = {0, 0, &link, NULL, cig_disable};
1374 uint32_t ret;
1375
1376 /* Assert if race between thread and ULL */
1377 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1378
1379 /* Check if any pending LLL events that need to be aborted */
1380 mfy.param = param;
1381 ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
1382 TICKER_USER_ID_ULL_HIGH, 0, &mfy);
1383 LL_ASSERT(!ret);
1384 }
1385
cig_disable(void * param)1386 static void cig_disable(void *param)
1387 {
1388 struct ll_conn_iso_group *cig;
1389 struct ull_hdr *hdr;
1390
1391 /* Check ref count to determine if any pending LLL events in pipeline */
1392 cig = param;
1393 hdr = &cig->ull;
1394 if (ull_ref_get(hdr)) {
1395 static memq_link_t link;
1396 static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
1397 uint32_t ret;
1398
1399 mfy.param = &cig->lll;
1400
1401 /* Setup disabled callback to be called when ref count
1402 * returns to zero.
1403 */
1404 LL_ASSERT(!hdr->disabled_cb);
1405 hdr->disabled_param = mfy.param;
1406 hdr->disabled_cb = cig_disabled_cb;
1407
1408 /* Trigger LLL disable */
1409 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1410 TICKER_USER_ID_LLL, 0, &mfy);
1411 LL_ASSERT(!ret);
1412 } else {
1413 /* No pending LLL events */
1414 cig_disabled_cb(&cig->lll);
1415 }
1416 }
1417
cig_disabled_cb(void * param)1418 static void cig_disabled_cb(void *param)
1419 {
1420 struct ll_conn_iso_group *cig;
1421
1422 cig = HDR_LLL2ULL(param);
1423
1424 if (IS_PERIPHERAL(cig)) {
1425 ll_conn_iso_group_release(cig);
1426 }
1427 }
1428
disable(uint16_t handle)1429 static void disable(uint16_t handle)
1430 {
1431 struct ll_conn_iso_group *cig;
1432 int err;
1433
1434 cig = ll_conn_iso_group_get(handle);
1435
1436 (void)ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
1437 TICKER_ID_CONN_ISO_RESUME_BASE + handle, NULL,
1438 NULL);
1439
1440 err = ull_ticker_stop_with_mark(TICKER_ID_CONN_ISO_BASE + handle,
1441 cig, &cig->lll);
1442
1443 LL_ASSERT(err == 0 || err == -EALREADY);
1444
1445 cig->lll.handle = LLL_HANDLE_INVALID;
1446 cig->lll.resume_cis = LLL_HANDLE_INVALID;
1447 }
1448
1449 /* An ISO interval has elapsed for a Connected Isochronous Group */
ull_conn_iso_transmit_test_cig_interval(uint16_t handle,uint32_t ticks_at_expire)1450 void ull_conn_iso_transmit_test_cig_interval(uint16_t handle, uint32_t ticks_at_expire)
1451 {
1452 struct ll_conn_iso_stream *cis;
1453 struct ll_conn_iso_group *cig;
1454 uint32_t sdu_interval;
1455 uint32_t iso_interval;
1456 uint16_t handle_iter;
1457 uint64_t sdu_counter;
1458 uint8_t tx_sdu_count;
1459
1460 cig = ll_conn_iso_group_get(handle);
1461 LL_ASSERT(cig);
1462
1463 handle_iter = UINT16_MAX;
1464
1465 if (IS_PERIPHERAL(cig)) {
1466 /* Peripheral */
1467 sdu_interval = cig->p_sdu_interval;
1468
1469 } else if (IS_CENTRAL(cig)) {
1470 /* Central */
1471 sdu_interval = cig->c_sdu_interval;
1472
1473 } else {
1474 LL_ASSERT(0);
1475
1476 return;
1477 }
1478
1479 iso_interval = cig->iso_interval * PERIODIC_INT_UNIT_US;
1480
1481 /* Handle ISO Transmit Test for all active CISes in the group */
1482 for (uint8_t i = 0; i < cig->lll.num_cis; i++) {
1483 cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
1484 LL_ASSERT(cis);
1485
1486 if (!cis->hdr.test_mode.tx_enabled || cis->lll.handle == LLL_HANDLE_INVALID) {
1487 continue;
1488 }
1489
1490 /* Calculate number of SDUs to transmit in the next ISO event. Ensure no overflow
1491 * on 64-bit sdu_counter:
1492 * (39 bits x 22 bits (4x10^6 us) = 61 bits / 8 bits (255 us) = 53 bits)
1493 */
1494 sdu_counter = DIV_ROUND_UP((cis->lll.event_count + 1U) * iso_interval,
1495 sdu_interval);
1496
1497 if (cis->hdr.test_mode.tx_sdu_counter == 0U) {
1498 /* First ISO event. Align SDU counter for next event */
1499 cis->hdr.test_mode.tx_sdu_counter = sdu_counter;
1500 tx_sdu_count = 0U;
1501 } else {
1502 /* Calculate number of SDUs to produce for next ISO event */
1503 tx_sdu_count = sdu_counter - cis->hdr.test_mode.tx_sdu_counter;
1504 }
1505
1506 /* Now process all SDUs due for next ISO event */
1507 for (uint8_t sdu = 0; sdu < tx_sdu_count; sdu++) {
1508 ll_iso_transmit_test_send_sdu(cis->lll.handle, ticks_at_expire);
1509 }
1510 }
1511 }
1512