1 /*
2  * Copyright (c) 2021 Demant
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/sys/byteorder.h>
9 #include <zephyr/sys/util.h>
10 #include <zephyr/bluetooth/hci_types.h>
11 
12 #include "util/util.h"
13 #include "util/mem.h"
14 #include "util/memq.h"
15 #include "util/mayfly.h"
16 #include "util/dbuf.h"
17 
18 #include "ticker/ticker.h"
19 
20 #include "hal/ccm.h"
21 #include "hal/ticker.h"
22 
23 #include "pdu_df.h"
24 #include "lll/pdu_vendor.h"
25 #include "pdu.h"
26 
27 #include "lll.h"
28 #include "lll/lll_vendor.h"
29 #include "lll_clock.h"
30 #include "lll/lll_df_types.h"
31 #include "lll_conn.h"
32 #include "lll_conn_iso.h"
33 #include "lll_central_iso.h"
34 #include "lll_peripheral_iso.h"
35 #include "lll_iso_tx.h"
36 
37 #include "ll_sw/ull_tx_queue.h"
38 
39 #include "isoal.h"
40 
41 #include "ull_iso_types.h"
42 #include "ull_conn_types.h"
43 #include "ull_conn_iso_types.h"
44 
45 #include "ull_llcp.h"
46 
47 #include "ull_internal.h"
48 #include "ull_conn_internal.h"
49 #include "ull_iso_internal.h"
50 #include "ull_conn_iso_internal.h"
51 #include "ull_peripheral_iso_internal.h"
52 
53 #include "ll.h"
54 
55 #include "hal/debug.h"
56 
57 /* Used by LISTIFY */
58 #define _INIT_MAYFLY_ARRAY(_i, _l, _fp) \
59 	{ ._link = &_l[_i], .fp = _fp },
60 
61 /* Declare static initialized array of mayflies with associated link element */
62 #define DECLARE_MAYFLY_ARRAY(_name, _fp, _cnt) \
63 	static memq_link_t _links[_cnt]; \
64 	static struct mayfly _name[_cnt] = \
65 		{ LISTIFY(_cnt, _INIT_MAYFLY_ARRAY, (), _links, _fp) }
66 
67 
68 static int init_reset(void);
69 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
70 static void cis_lazy_fill(struct ll_conn_iso_stream *cis);
71 static void mfy_cis_lazy_fill(void *param);
72 static void ticker_next_slot_get_op_cb(uint32_t status, void *param);
73 #endif /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
74 static void ticker_start_op_cb(uint32_t status, void *param);
75 static void ticker_update_cig_op_cb(uint32_t status, void *param);
76 static void cis_disabled_cb(void *param);
77 static void ticker_stop_op_cb(uint32_t status, void *param);
78 static void cig_disable(void *param);
79 static void cig_disabled_cb(void *param);
80 static void disable(uint16_t handle);
81 static void cis_tx_lll_flush(void *param);
82 
83 static struct ll_conn_iso_stream cis_pool[CONFIG_BT_CTLR_CONN_ISO_STREAMS];
84 static void *cis_free;
85 
86 static struct ll_conn_iso_group cig_pool[CONFIG_BT_CTLR_CONN_ISO_GROUPS];
87 static void *cig_free;
88 
89 /* BT. 5.3 Spec - Vol 4, Part E, Sect 6.7 */
90 #define CONN_ACCEPT_TIMEOUT_DEFAULT 0x1F40
91 #define CONN_ACCEPT_TIMEOUT_MAX     0xB540
92 #define CONN_ACCEPT_TIMEOUT_MIN     0x0001
93 static uint16_t conn_accept_timeout;
94 
ll_conn_iso_group_acquire(void)95 struct ll_conn_iso_group *ll_conn_iso_group_acquire(void)
96 {
97 	return mem_acquire(&cig_free);
98 }
99 
ll_conn_iso_group_release(struct ll_conn_iso_group * cig)100 void ll_conn_iso_group_release(struct ll_conn_iso_group *cig)
101 {
102 	cig->cig_id = 0xFF;
103 	cig->state  = CIG_STATE_NO_CIG;
104 	cig->lll.num_cis = 0U;
105 
106 	mem_release(cig, &cig_free);
107 }
108 
ll_conn_iso_group_handle_get(struct ll_conn_iso_group * cig)109 uint16_t ll_conn_iso_group_handle_get(struct ll_conn_iso_group *cig)
110 {
111 	return mem_index_get(cig, cig_pool, sizeof(struct ll_conn_iso_group));
112 }
113 
ll_conn_iso_group_get(uint16_t handle)114 struct ll_conn_iso_group *ll_conn_iso_group_get(uint16_t handle)
115 {
116 	return mem_get(cig_pool, sizeof(struct ll_conn_iso_group), handle);
117 }
118 
ll_conn_iso_group_get_by_id(uint8_t id)119 struct ll_conn_iso_group *ll_conn_iso_group_get_by_id(uint8_t id)
120 {
121 	struct ll_conn_iso_group *cig;
122 
123 	for (int h = 0; h < CONFIG_BT_CTLR_CONN_ISO_GROUPS; h++) {
124 		cig = ll_conn_iso_group_get(h);
125 		if (id == cig->cig_id) {
126 			return cig;
127 		}
128 	}
129 
130 	return NULL;
131 }
132 
ll_conn_iso_stream_acquire(void)133 struct ll_conn_iso_stream *ll_conn_iso_stream_acquire(void)
134 {
135 	struct ll_conn_iso_stream *cis = mem_acquire(&cis_free);
136 
137 	if (cis) {
138 		(void)memset(&cis->hdr, 0U, sizeof(cis->hdr));
139 	}
140 
141 	return cis;
142 }
143 
ll_conn_iso_stream_release(struct ll_conn_iso_stream * cis)144 void ll_conn_iso_stream_release(struct ll_conn_iso_stream *cis)
145 {
146 	cis->cis_id = 0;
147 	cis->group = NULL;
148 
149 	mem_release(cis, &cis_free);
150 }
151 
ll_conn_iso_stream_handle_get(struct ll_conn_iso_stream * cis)152 uint16_t ll_conn_iso_stream_handle_get(struct ll_conn_iso_stream *cis)
153 {
154 	return mem_index_get(cis, cis_pool,
155 			     sizeof(struct ll_conn_iso_stream)) +
156 			     LL_CIS_HANDLE_BASE;
157 }
158 
ll_conn_iso_stream_get(uint16_t handle)159 struct ll_conn_iso_stream *ll_conn_iso_stream_get(uint16_t handle)
160 {
161 	return mem_get(cis_pool, sizeof(struct ll_conn_iso_stream), handle -
162 		       LL_CIS_HANDLE_BASE);
163 }
164 
ull_conn_iso_lll_stream_get(uint16_t handle)165 struct lll_conn_iso_stream *ull_conn_iso_lll_stream_get(uint16_t handle)
166 {
167 	struct ll_conn_iso_stream *cis;
168 
169 	cis = ll_conn_iso_stream_get(handle);
170 	if (!cis) {
171 		return NULL;
172 	}
173 
174 	return &cis->lll;
175 }
176 
ll_iso_stream_connected_get(uint16_t handle)177 struct ll_conn_iso_stream *ll_iso_stream_connected_get(uint16_t handle)
178 {
179 	struct ll_conn_iso_stream *cis;
180 
181 	if (handle >= CONFIG_BT_CTLR_CONN_ISO_STREAMS +
182 		      LL_CIS_HANDLE_BASE) {
183 		return NULL;
184 	}
185 
186 	cis = ll_conn_iso_stream_get(handle);
187 	if ((cis->group == NULL) || (cis->lll.handle != handle) || !cis->established) {
188 		/* CIS does not belong to a group, has inconsistent handle or is
189 		 * not yet established.
190 		 */
191 		return NULL;
192 	}
193 
194 	return cis;
195 }
196 
ll_conn_iso_stream_get_by_acl(struct ll_conn * conn,uint16_t * cis_iter)197 struct ll_conn_iso_stream *ll_conn_iso_stream_get_by_acl(struct ll_conn *conn, uint16_t *cis_iter)
198 {
199 	uint8_t cis_iter_start = (cis_iter == NULL) || (*cis_iter) == UINT16_MAX;
200 	uint8_t cig_handle;
201 
202 	/* Find CIS associated with ACL conn */
203 	for (cig_handle = 0; cig_handle < CONFIG_BT_CTLR_CONN_ISO_GROUPS; cig_handle++) {
204 		struct ll_conn_iso_stream *cis;
205 		struct ll_conn_iso_group *cig;
206 		uint16_t handle_iter;
207 		int8_t cis_idx;
208 
209 		cig = ll_conn_iso_group_get(cig_handle);
210 		if (!cig) {
211 			continue;
212 		}
213 
214 		handle_iter = UINT16_MAX;
215 
216 		/* Find next connected CIS in the group */
217 		for (cis_idx = 0; cis_idx < CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP; cis_idx++) {
218 			cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
219 			if (cis) {
220 				uint16_t cis_handle = cis->lll.handle;
221 
222 				cis = ll_iso_stream_connected_get(cis_handle);
223 				if (!cis) {
224 					/* CIS is not connected */
225 					continue;
226 				}
227 
228 				if (!cis_iter_start) {
229 					/* Look for iterator start handle */
230 					cis_iter_start = cis_handle == (*cis_iter);
231 				} else if (cis->lll.acl_handle == conn->lll.handle) {
232 					if (cis_iter) {
233 						(*cis_iter) = cis_handle;
234 					}
235 
236 					return cis;
237 				}
238 			}
239 		}
240 	}
241 
242 	return NULL;
243 }
244 
ll_conn_iso_stream_get_by_group(struct ll_conn_iso_group * cig,uint16_t * handle_iter)245 struct ll_conn_iso_stream *ll_conn_iso_stream_get_by_group(struct ll_conn_iso_group *cig,
246 							   uint16_t *handle_iter)
247 {
248 	struct ll_conn_iso_stream *cis;
249 	uint16_t handle_start;
250 	uint16_t handle;
251 
252 	handle_start = (handle_iter == NULL) || ((*handle_iter) == UINT16_MAX) ?
253 			LL_CIS_HANDLE_BASE : (*handle_iter) + 1;
254 
255 	for (handle = handle_start; handle <= LL_CIS_HANDLE_LAST; handle++) {
256 		cis = ll_conn_iso_stream_get(handle);
257 		if (cis->group == cig) {
258 			if (handle_iter) {
259 				(*handle_iter) = handle;
260 			}
261 			return cis;
262 		}
263 	}
264 
265 	return NULL;
266 }
267 
ll_conn_iso_stream_get_by_id(uint8_t cis_id)268 struct ll_conn_iso_stream *ll_conn_iso_stream_get_by_id(uint8_t cis_id)
269 {
270 	struct ll_conn_iso_stream *cis;
271 	uint16_t handle;
272 
273 	for (handle = LL_CIS_HANDLE_BASE; handle <= LL_CIS_HANDLE_LAST; handle++) {
274 		cis = ll_conn_iso_stream_get(handle);
275 		if (cis->group && (cis->cis_id == cis_id)) {
276 			return cis;
277 		}
278 	}
279 
280 	return NULL;
281 }
282 
283 struct lll_conn_iso_stream *
ull_conn_iso_lll_stream_get_by_group(struct lll_conn_iso_group * cig_lll,uint16_t * handle_iter)284 ull_conn_iso_lll_stream_get_by_group(struct lll_conn_iso_group *cig_lll,
285 				     uint16_t *handle_iter)
286 {
287 	struct ll_conn_iso_stream *cis;
288 	struct ll_conn_iso_group *cig;
289 
290 	cig = HDR_LLL2ULL(cig_lll);
291 	cis = ll_conn_iso_stream_get_by_group(cig, handle_iter);
292 	if (!cis) {
293 		return NULL;
294 	}
295 
296 	return &cis->lll;
297 }
298 
299 /*
300  * Helper function to iterate and return CIS LLL context sorted based on
301  * ascending order of the CIS offset from associated ACL and the CIG.
302  * This implementation be used by peripheral LLL to schedule subevents
303  * as CISes can be created in any order and ascending/descending order of
304  * CIS offsets used when creating CISes to peripheral.
305  *
306  * NOTE: This implementation assumes CISes created from same ACL. Support
307  *       for CISes created from different peer centrals is not supported yet.
308  */
309 struct lll_conn_iso_stream *
ull_conn_iso_lll_stream_sorted_get_by_group(struct lll_conn_iso_group * cig_lll,uint16_t * handle_iter)310 ull_conn_iso_lll_stream_sorted_get_by_group(struct lll_conn_iso_group *cig_lll,
311 					    uint16_t *handle_iter)
312 {
313 	struct ll_conn_iso_stream *cis_next = NULL;
314 	struct ll_conn_iso_group *cig;
315 	uint32_t cis_offset_curr;
316 	uint32_t cis_offset_next;
317 	uint16_t handle;
318 
319 	cig = HDR_LLL2ULL(cig_lll);
320 
321 	if ((handle_iter == NULL) || ((*handle_iter) == UINT16_MAX)) {
322 		/* First in the iteration, start with a minimum offset value and
323 		 * find the first CIS offset of the active CIS.
324 		 */
325 		cis_offset_curr = 0U;
326 	} else {
327 		/* Subsequent iteration, get reference to current CIS and use
328 		 * its CIS offset to find the next active CIS with offset
329 		 * greater than the current CIS.
330 		 */
331 		struct ll_conn_iso_stream *cis_curr;
332 
333 		cis_curr = ll_conn_iso_stream_get(*handle_iter);
334 		cis_offset_curr = cis_curr->offset;
335 	}
336 
337 	cis_offset_next = UINT32_MAX;
338 
339 	/* Loop through all CIS contexts */
340 	for (handle = LL_CIS_HANDLE_BASE; handle <= LL_CIS_HANDLE_LAST;
341 	     handle++) {
342 		struct ll_conn_iso_stream *cis;
343 
344 		/* Get CIS reference corresponding to loop handle */
345 		cis = ll_conn_iso_stream_get(handle);
346 
347 		/* Match CIS contexts associated with the CIG */
348 		if (cis->group == cig) {
349 			if (cis->offset <= cis_offset_curr) {
350 				/* Skip already returned CISes with offsets less
351 				 * than the current CIS.
352 				 */
353 				continue;
354 			}
355 
356 			/* Remember CIS with offset greater than current but
357 			 * lower than previous that we remember as the next CIS
358 			 * in ascending order.
359 			 */
360 			if (cis->offset < cis_offset_next) {
361 				cis_next = cis;
362 				cis_offset_next = cis_next->offset;
363 
364 				if (handle_iter) {
365 					(*handle_iter) = handle;
366 				}
367 			}
368 		}
369 	}
370 
371 	if (cis_next) {
372 		/* Found the next CIS with offset in ascending order. */
373 		return &cis_next->lll;
374 	}
375 
376 	return NULL;
377 }
378 
379 struct lll_conn_iso_group *
ull_conn_iso_lll_group_get_by_stream(struct lll_conn_iso_stream * cis_lll)380 ull_conn_iso_lll_group_get_by_stream(struct lll_conn_iso_stream *cis_lll)
381 {
382 	struct ll_conn_iso_stream *cis;
383 	struct ll_conn_iso_group *cig;
384 
385 	cis = ll_conn_iso_stream_get(cis_lll->handle);
386 	cig = cis->group;
387 
388 	return &cig->lll;
389 }
390 
ll_conn_iso_accept_timeout_get(uint16_t * timeout)391 uint8_t ll_conn_iso_accept_timeout_get(uint16_t *timeout)
392 {
393 	*timeout = conn_accept_timeout;
394 
395 	return 0;
396 }
397 
ll_conn_iso_accept_timeout_set(uint16_t timeout)398 uint8_t ll_conn_iso_accept_timeout_set(uint16_t timeout)
399 {
400 	if (!IN_RANGE(timeout, CONN_ACCEPT_TIMEOUT_MIN,
401 			       CONN_ACCEPT_TIMEOUT_MAX)) {
402 		return BT_HCI_ERR_INVALID_LL_PARAM;
403 	}
404 
405 	conn_accept_timeout = timeout;
406 
407 	return 0;
408 }
409 
ull_conn_iso_lll_cis_established(struct lll_conn_iso_stream * cis_lll)410 void ull_conn_iso_lll_cis_established(struct lll_conn_iso_stream *cis_lll)
411 {
412 	struct ll_conn_iso_stream *cis =
413 		ll_conn_iso_stream_get(cis_lll->handle);
414 	struct node_rx_pdu *node_rx;
415 
416 	if (cis->established) {
417 		return;
418 	}
419 
420 	node_rx = ull_pdu_rx_alloc();
421 	if (!node_rx) {
422 		/* No node available - try again later */
423 		return;
424 	}
425 
426 	node_rx->hdr.type = NODE_RX_TYPE_CIS_ESTABLISHED;
427 
428 	/* Send node to ULL RX demuxer for triggering LLCP state machine */
429 	node_rx->hdr.handle = cis->lll.acl_handle;
430 
431 	ull_rx_put_sched(node_rx->hdr.link, node_rx);
432 
433 	cis->established = 1;
434 }
435 
ull_conn_iso_done(struct node_rx_event_done * done)436 void ull_conn_iso_done(struct node_rx_event_done *done)
437 {
438 	struct lll_conn_iso_group *lll;
439 	struct ll_conn_iso_group *cig;
440 	struct ll_conn_iso_stream *cis;
441 	uint32_t ticks_drift_minus;
442 	uint32_t ticks_drift_plus;
443 	uint16_t handle_iter;
444 	uint8_t cis_idx;
445 
446 	/* Get reference to ULL context */
447 	cig = CONTAINER_OF(done->param, struct ll_conn_iso_group, ull);
448 	lll = &cig->lll;
449 
450 	/* Skip if CIG terminated by local host */
451 	if (unlikely(lll->handle == 0xFFFF)) {
452 		return;
453 	}
454 
455 	ticks_drift_plus  = 0;
456 	ticks_drift_minus = 0;
457 	handle_iter = UINT16_MAX;
458 	cis = NULL;
459 
460 	/* Check all CISes for supervison/establishment timeout */
461 	for (cis_idx = 0; cis_idx < cig->lll.num_cis; cis_idx++) {
462 		cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
463 		LL_ASSERT(cis);
464 
465 		if (cis->lll.active && cis->lll.handle != LLL_HANDLE_INVALID) {
466 			/* CIS was setup and is now expected to be going */
467 			if (done->extra.trx_performed_bitmask &
468 			    (1U << LL_CIS_IDX_FROM_HANDLE(cis->lll.handle))) {
469 				if (false) {
470 #if defined(CONFIG_BT_CTLR_LE_ENC)
471 				} else if (done->extra.mic_state == LLL_CONN_MIC_FAIL) {
472 					/* MIC failure - stop CIS and defer cleanup to after
473 					 * teardown.
474 					 */
475 					ull_conn_iso_cis_stop(cis, NULL,
476 							      BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL);
477 #endif /* CONFIG_BT_CTLR_LE_ENC */
478 				} else {
479 					cis->event_expire = 0U;
480 				}
481 			} else {
482 				/* We did NOT have successful transaction on established CIS,
483 				 * or CIS was not yet established, so handle timeout
484 				 */
485 				if (!cis->event_expire) {
486 					struct ll_conn *conn = ll_conn_get(cis->lll.acl_handle);
487 
488 					cis->event_expire = RADIO_CONN_EVENTS(
489 							conn->supervision_timeout * 10U * 1000U,
490 							cig->iso_interval * CONN_INT_UNIT_US);
491 
492 				} else {
493 					uint16_t event_elapsed;
494 
495 					event_elapsed = cig->lll.latency_event +
496 							cig->lll.lazy_prepare + 1U;
497 					if (cis->event_expire > event_elapsed) {
498 						cis->event_expire -= event_elapsed;
499 					} else {
500 						cis->event_expire = 0U;
501 
502 						/* Stop CIS and defer cleanup to after teardown.
503 						 * This will only generate a terminate event to the
504 						 * host if CIS has been established. If CIS was not
505 						 * established, the teardown will send
506 						 * CIS_ESTABLISHED with failure.
507 						 */
508 						ull_conn_iso_cis_stop(cis, NULL, cis->established ?
509 								BT_HCI_ERR_CONN_TIMEOUT :
510 								BT_HCI_ERR_CONN_FAIL_TO_ESTAB);
511 					}
512 				}
513 			}
514 		}
515 	}
516 
517 	if (IS_PERIPHERAL(cig) && done->extra.trx_performed_bitmask) {
518 		ull_drift_ticks_get(done, &ticks_drift_plus,
519 				    &ticks_drift_minus);
520 	}
521 
522 	/* Update CIG ticker to compensate for drift.
523 	 * Since all CISes in a CIG 'belong to' the same ACL,
524 	 * any CIS found in the above for-loop will do to dereference the ACL
525 	 */
526 	if (cis && (ticks_drift_plus || ticks_drift_minus)) {
527 		uint8_t ticker_id = TICKER_ID_CONN_ISO_BASE +
528 				    ll_conn_iso_group_handle_get(cig);
529 		struct ll_conn *conn = ll_connected_get(cis->lll.acl_handle);
530 		uint32_t ticker_status;
531 
532 		ticker_status = ticker_update(TICKER_INSTANCE_ID_CTLR,
533 					      TICKER_USER_ID_ULL_HIGH,
534 					      ticker_id,
535 					      ticks_drift_plus,
536 					      ticks_drift_minus, 0, 0,
537 					      TICKER_NULL_LAZY, 0,
538 					      ticker_update_cig_op_cb,
539 					      cig);
540 
541 		LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
542 			  (ticker_status == TICKER_STATUS_BUSY) ||
543 			  ((void *)conn == ull_disable_mark_get()));
544 	}
545 }
546 
547 /**
548  * @brief Stop and tear down a connected ISO stream
549  * This function may be called to tear down a CIS. When the CIS teardown
550  * has completed and the stream is released and callback is provided, the
551  * cis_released_cb callback is invoked.
552  *
553  * @param cis             Pointer to connected ISO stream to stop
554  * @param cis_released_cb Callback to invoke when the CIS has been released.
555  *                        NULL to ignore.
556  * @param reason          Termination reason
557  */
ull_conn_iso_cis_stop(struct ll_conn_iso_stream * cis,ll_iso_stream_released_cb_t cis_released_cb,uint8_t reason)558 void ull_conn_iso_cis_stop(struct ll_conn_iso_stream *cis,
559 			   ll_iso_stream_released_cb_t cis_released_cb,
560 			   uint8_t reason)
561 {
562 	struct ll_conn_iso_group *cig;
563 	struct ull_hdr *hdr;
564 
565 	if (cis->teardown) {
566 		/* Teardown already started */
567 		LL_ASSERT(!cis->released_cb || !cis_released_cb ||
568 			  (cis->released_cb == cis_released_cb));
569 
570 		if (cis_released_cb) {
571 			cis->released_cb = cis_released_cb;
572 		}
573 
574 		return;
575 	}
576 
577 	cis->teardown = 1;
578 	cis->released_cb = cis_released_cb;
579 	cis->terminate_reason = reason;
580 
581 	/* Check ref count to determine if any pending LLL events in pipeline */
582 	cig = cis->group;
583 	hdr = &cig->ull;
584 	if (ull_ref_get(hdr)) {
585 		static memq_link_t link;
586 		static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
587 		uint32_t ret;
588 
589 		mfy.param = &cig->lll;
590 
591 		/* Setup disabled callback to be called when ref count
592 		 * returns to zero.
593 		 */
594 		/* Event is active (prepare/done ongoing) - wait for done and
595 		 * continue CIS teardown from there. The disabled_cb cannot be
596 		 * reserved for other use.
597 		 */
598 		LL_ASSERT(!hdr->disabled_cb ||
599 			  (hdr->disabled_cb == cis_disabled_cb));
600 		hdr->disabled_param = mfy.param;
601 		hdr->disabled_cb = cis_disabled_cb;
602 
603 		/* Trigger LLL disable */
604 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
605 				     TICKER_USER_ID_LLL, 0, &mfy);
606 		LL_ASSERT(!ret);
607 	} else {
608 		/* No pending LLL events */
609 
610 		/* Tear down CIS now in ULL_HIGH context. Ignore enqueue
611 		 * error (already enqueued) as all CISes marked for teardown
612 		 * will be handled in cis_disabled_cb. Use mayfly chaining to
613 		 * prevent recursive stop calls.
614 		 */
615 		cis_disabled_cb(&cig->lll);
616 	}
617 }
618 
ull_conn_iso_init(void)619 int ull_conn_iso_init(void)
620 {
621 	return init_reset();
622 }
623 
ull_conn_iso_reset(void)624 int ull_conn_iso_reset(void)
625 {
626 	return init_reset();
627 }
628 
init_reset(void)629 static int init_reset(void)
630 {
631 	struct ll_conn_iso_stream *cis;
632 	struct ll_conn_iso_group *cig;
633 	uint16_t handle;
634 	int err;
635 
636 	/* Disable all active CIGs (uses blocking ull_ticker_stop_with_mark) */
637 	for (handle = 0U; handle < CONFIG_BT_CTLR_CONN_ISO_GROUPS; handle++) {
638 		disable(handle);
639 	}
640 
641 	/* Initialize CIS pool */
642 	mem_init(cis_pool, sizeof(struct ll_conn_iso_stream),
643 		 sizeof(cis_pool) / sizeof(struct ll_conn_iso_stream),
644 		 &cis_free);
645 
646 	/* Initialize CIG pool */
647 	mem_init(cig_pool, sizeof(struct ll_conn_iso_group),
648 		 sizeof(cig_pool) / sizeof(struct ll_conn_iso_group),
649 		 &cig_free);
650 
651 	for (handle = 0; handle < CONFIG_BT_CTLR_CONN_ISO_GROUPS; handle++) {
652 		cig = ll_conn_iso_group_get(handle);
653 		cig->cig_id = 0xFF;
654 		cig->state  = CIG_STATE_NO_CIG;
655 		cig->lll.num_cis = 0;
656 	}
657 
658 	for (handle = LL_CIS_HANDLE_BASE; handle <= LL_CIS_HANDLE_LAST;
659 	     handle++) {
660 		cis = ll_conn_iso_stream_get(handle);
661 		cis->cis_id = 0;
662 		cis->group  = NULL;
663 		cis->lll.link_tx_free = NULL;
664 	}
665 
666 	conn_accept_timeout = CONN_ACCEPT_TIMEOUT_DEFAULT;
667 
668 	/* Initialize LLL */
669 	err = lll_conn_iso_init();
670 	if (err) {
671 		return err;
672 	}
673 
674 	return 0;
675 }
676 
ull_conn_iso_ticker_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)677 void ull_conn_iso_ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
678 			    uint32_t remainder, uint16_t lazy, uint8_t force,
679 			    void *param)
680 {
681 	static memq_link_t link;
682 	static struct mayfly mfy = { 0, 0, &link, NULL, NULL };
683 	static struct lll_prepare_param p;
684 	struct ll_conn_iso_group *cig;
685 	struct ll_conn_iso_stream *cis;
686 	uint64_t leading_event_count;
687 	uint16_t handle_iter;
688 	uint32_t err;
689 	uint8_t ref;
690 
691 	cig = param;
692 	leading_event_count = 0;
693 
694 	/* Check if stopping ticker (on disconnection, race with ticker expiry)
695 	 */
696 	if (unlikely(cig->lll.handle == 0xFFFF)) {
697 		return;
698 	}
699 
700 	handle_iter = UINT16_MAX;
701 
702 	/* Increment CIS event counters */
703 	for (int i = 0; i < cig->lll.num_cis; i++)  {
704 		cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
705 		LL_ASSERT(cis);
706 
707 		/* New CIS may become available by creation prior to the CIG
708 		 * event in which it has event_count == 0. Don't increment
709 		 * event count until its handle is validated in
710 		 * ull_conn_iso_start, which means that its ACL instant
711 		 * has been reached, and offset calculated.
712 		 */
713 		if (cis->lll.handle != 0xFFFF && cis->lll.active) {
714 			cis->lll.event_count += (lazy + 1U);
715 
716 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
717 			cis->lll.event_count -= cis->lll.lazy_active;
718 			cis->lll.lazy_active = 0U;
719 #endif /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
720 
721 			leading_event_count = MAX(leading_event_count,
722 						cis->lll.event_count);
723 
724 			ull_iso_lll_event_prepare(cis->lll.handle, cis->lll.event_count);
725 		}
726 
727 		/* Latch datapath validity entering event */
728 		cis->lll.datapath_ready_rx = cis->hdr.datapath_out != NULL;
729 	}
730 
731 	/* Update the CIG reference point for this event. Event 0 for the
732 	 * leading CIS in the CIG would have had it's reference point set in
733 	 * ull_conn_iso_start(). The reference point should only be
734 	 * updated from event 1 onwards. Although the cig reference point set
735 	 * this way is not accurate, it is the best possible until the anchor
736 	 * point for the leading CIS is available for this event.
737 	 */
738 	if (leading_event_count > 0) {
739 		cig->cig_ref_point = isoal_get_wrapped_time_us(cig->cig_ref_point,
740 						cig->iso_interval * CONN_INT_UNIT_US);
741 	}
742 
743 	/* Increment prepare reference count */
744 	ref = ull_ref_inc(&cig->ull);
745 	LL_ASSERT(ref);
746 
747 	/* Append timing parameters */
748 	p.ticks_at_expire = ticks_at_expire;
749 	p.remainder = remainder;
750 	p.lazy = lazy;
751 	p.param = &cig->lll;
752 	mfy.param = &p;
753 
754 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) && \
755 	defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
756 	mfy.fp = IS_PERIPHERAL(cig) ? lll_peripheral_iso_prepare : lll_central_iso_prepare;
757 
758 #elif defined(CONFIG_BT_CTLR_CENTRAL_ISO)
759 	mfy.fp = lll_central_iso_prepare;
760 
761 #elif defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
762 	mfy.fp = lll_peripheral_iso_prepare;
763 
764 #else /* !CONFIG_BT_CTLR_CENTRAL_ISO && !CONFIG_BT_CTLR_PERIPHERAL_ISO */
765 	LL_ASSERT(0);
766 
767 	return;
768 #endif /* !CONFIG_BT_CTLR_CENTRAL_ISO && !CONFIG_BT_CTLR_PERIPHERAL_ISO */
769 
770 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
771 	if (IS_PERIPHERAL(cig) && cig->sca_update) {
772 		/* CIG/ACL affilaition established */
773 		uint32_t iso_interval_us_frac =
774 			EVENT_US_TO_US_FRAC(cig->iso_interval * CONN_INT_UNIT_US);
775 		cig->lll.window_widening_periodic_us_frac =
776 			DIV_ROUND_UP(((lll_clock_ppm_local_get() +
777 					   lll_clock_ppm_get(cig->sca_update - 1)) *
778 					  iso_interval_us_frac),
779 					 1000000U);
780 		iso_interval_us_frac -= cig->lll.window_widening_periodic_us_frac;
781 
782 		ull_peripheral_iso_update_ticker(cig, ticks_at_expire, iso_interval_us_frac);
783 		cig->sca_update = 0;
784 	}
785 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
786 
787 	/* Kick LLL prepare */
788 	err = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0, &mfy);
789 	LL_ASSERT(!err);
790 
791 	/* Handle ISO Transmit Test for this CIG */
792 	ull_conn_iso_transmit_test_cig_interval(cig->lll.handle, ticks_at_expire);
793 }
794 
cig_offset_calc(struct ll_conn_iso_group * cig,struct ll_conn_iso_stream * cis,uint32_t cis_offset,uint32_t * ticks_at_expire,uint32_t remainder)795 static uint32_t cig_offset_calc(struct ll_conn_iso_group *cig, struct ll_conn_iso_stream *cis,
796 				uint32_t cis_offset, uint32_t *ticks_at_expire, uint32_t remainder)
797 {
798 	uint32_t acl_to_cig_ref_point;
799 	uint32_t cis_offs_to_cig_ref;
800 	uint32_t remainder_us;
801 
802 	remainder_us = remainder;
803 	hal_ticker_remove_jitter(ticks_at_expire, &remainder_us);
804 
805 	cis_offs_to_cig_ref = cig->sync_delay - cis->sync_delay;
806 
807 	/* Establish the CIG reference point by adjusting ACL-to-CIS offset
808 	 * (cis->offset) by the difference between CIG- and CIS sync delays.
809 	 */
810 	acl_to_cig_ref_point = cis_offset - cis_offs_to_cig_ref;
811 
812 	/* Calculate the CIG reference point of first CIG event. This
813 	 * calculation is inaccurate. However it is the best estimate available
814 	 * until the first anchor point for the leading CIS is available.
815 	 */
816 	cig->cig_ref_point = isoal_get_wrapped_time_us(HAL_TICKER_TICKS_TO_US(*ticks_at_expire),
817 						       remainder_us +
818 						       EVENT_OVERHEAD_START_US +
819 						       acl_to_cig_ref_point);
820 	/* Calculate initial ticker offset */
821 	return remainder_us + acl_to_cig_ref_point;
822 }
823 
ull_conn_iso_start(struct ll_conn * conn,uint16_t cis_handle,uint32_t ticks_at_expire,uint32_t remainder,uint16_t instant_latency)824 void ull_conn_iso_start(struct ll_conn *conn, uint16_t cis_handle,
825 			uint32_t ticks_at_expire, uint32_t remainder,
826 			uint16_t instant_latency)
827 {
828 	struct ll_conn_iso_group *cig;
829 	struct ll_conn_iso_stream *cis;
830 	uint32_t ticks_remainder;
831 	uint32_t ticks_periodic;
832 	uint32_t ticker_status;
833 	int32_t cig_offset_us;
834 	uint32_t ticks_slot;
835 	uint8_t ticker_id;
836 
837 	cis = ll_conn_iso_stream_get(cis_handle);
838 	cig = cis->group;
839 
840 	cis->lll.offset = cig->sync_delay - cis->sync_delay;
841 	cis->lll.handle = cis_handle;
842 
843 #if defined(CONFIG_BT_CTLR_LE_ENC)
844 	if (conn->lll.enc_tx) {
845 		/* copy the Session Key */
846 		memcpy(cis->lll.tx.ccm.key, conn->lll.ccm_tx.key,
847 		       sizeof(cis->lll.tx.ccm.key));
848 
849 		/* copy the MSbits of IV Base */
850 		memcpy(&cis->lll.tx.ccm.iv[4], &conn->lll.ccm_tx.iv[4], 4);
851 
852 		/* XOR the CIS access address to get IV */
853 		mem_xor_32(cis->lll.tx.ccm.iv, conn->lll.ccm_tx.iv,
854 			   cis->lll.access_addr);
855 
856 		/* initialise counter */
857 		cis->lll.tx.ccm.counter = 0U;
858 
859 		/* set direction: peripheral to central = 0,
860 		 * central to peripheral = 1
861 		 */
862 		cis->lll.tx.ccm.direction = !conn->lll.role;
863 	}
864 
865 	if (conn->lll.enc_rx) {
866 		/* copy the Session Key */
867 		memcpy(cis->lll.rx.ccm.key, conn->lll.ccm_rx.key,
868 		       sizeof(cis->lll.rx.ccm.key));
869 
870 		/* copy the MSbits of IV Base */
871 		memcpy(&cis->lll.rx.ccm.iv[4], &conn->lll.ccm_rx.iv[4], 4);
872 
873 		/* XOR the CIS access address to get IV */
874 		mem_xor_32(cis->lll.rx.ccm.iv, conn->lll.ccm_rx.iv,
875 			   cis->lll.access_addr);
876 
877 		/* initialise counter */
878 		cis->lll.rx.ccm.counter = 0U;
879 
880 		/* set direction: peripheral to central = 0,
881 		 * central to peripheral = 1
882 		 */
883 		cis->lll.rx.ccm.direction = conn->lll.role;
884 	}
885 #endif /* CONFIG_BT_CTLR_LE_ENC */
886 
887 	/* Connection establishment timeout */
888 	cis->event_expire = CONN_ESTAB_COUNTDOWN;
889 
890 	/* Check if another CIS was already started and CIG ticker is
891 	 * running. If so, we just return with updated offset and
892 	 * validated handle.
893 	 */
894 	if (cig->state == CIG_STATE_ACTIVE) {
895 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
896 		/* Initialize CIS event lazy at CIS create */
897 		cis->lll.lazy_active = 0U;
898 
899 		/* Deferred fill CIS event lazy value at CIS create */
900 		cis_lazy_fill(cis);
901 #else /* CONFIG_BT_CTLR_JIT_SCHEDULING */
902 		/* Set CIS active in already active CIG */
903 		cis->lll.active = 1U;
904 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
905 
906 		/* We're done */
907 		return;
908 	}
909 
910 	ticker_id = TICKER_ID_CONN_ISO_BASE + ll_conn_iso_group_handle_get(cig);
911 
912 	cig_offset_us = cig_offset_calc(cig, cis, cis->offset, &ticks_at_expire, remainder);
913 
914 	if (false) {
915 
916 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
917 	} else if (IS_PERIPHERAL(cig)) {
918 		uint32_t iso_interval_us_frac;
919 
920 		/* Calculate interval in fractional microseconds for highest precision when
921 		 * accumulating the window widening window size. Ticker interval is set lopsided,
922 		 * with natural drift towards earlier timeout.
923 		 */
924 		iso_interval_us_frac = EVENT_US_TO_US_FRAC(cig->iso_interval * ISO_INT_UNIT_US) -
925 				       cig->lll.window_widening_periodic_us_frac;
926 
927 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO_EARLY_CIG_START)
928 		bool early_start = (cis->offset < EVENT_OVERHEAD_START_US);
929 
930 		if (early_start) {
931 			if (instant_latency == 0U) {
932 				/* Adjust CIG offset and reference point ahead one
933 				 * interval
934 				 */
935 				cig_offset_us += (conn->lll.interval * CONN_INT_UNIT_US);
936 				cig->cig_ref_point = isoal_get_wrapped_time_us(cig->cig_ref_point,
937 							conn->lll.interval * CONN_INT_UNIT_US);
938 			} else if (instant_latency > 1U) {
939 				/* We have passed the last possible event for a timely start. For
940 				 * early_start this means the latency is actually one less.
941 				 */
942 				instant_latency--;
943 			}
944 		}
945 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO_EARLY_CIG_START */
946 
947 		if (instant_latency > 0U) {
948 			/* Try to start the CIG late by finding the CIG event relative to current
949 			 * ACL event, taking latency into consideration. Adjust ticker periodicity
950 			 * with increased window widening.
951 			 */
952 			uint32_t lost_cig_events;
953 			uint32_t iso_interval_us;
954 			uint32_t acl_latency_us;
955 			uint32_t lost_payloads;
956 			uint32_t cis_offset;
957 
958 			acl_latency_us = instant_latency * conn->lll.interval * CONN_INT_UNIT_US;
959 			iso_interval_us = cig->iso_interval * ISO_INT_UNIT_US;
960 
961 			if (acl_latency_us > iso_interval_us) {
962 				/* Latency is greater than the ISO interval - find the offset from
963 				 * this ACL event to the next active ISO event, and adjust the event
964 				 * counter accordingly.
965 				 */
966 				lost_cig_events = DIV_ROUND_UP(acl_latency_us - cis->offset,
967 							       iso_interval_us);
968 				cis_offset = cis->offset + (lost_cig_events * iso_interval_us) -
969 					     acl_latency_us;
970 			} else {
971 				/* Latency is less than- or equal to one ISO interval - start at
972 				 * next ISO event.
973 				 */
974 				lost_cig_events = 1U;
975 				cis_offset = cis->offset + iso_interval_us - acl_latency_us;
976 			}
977 
978 			cis->lll.event_count += lost_cig_events;
979 
980 			lost_payloads = (lost_cig_events - (cis->lll.rx.ft - 1)) * cis->lll.rx.bn;
981 			cis->lll.rx.payload_count += lost_payloads;
982 
983 			lost_payloads = (lost_cig_events - (cis->lll.tx.ft - 1)) * cis->lll.tx.bn;
984 			cis->lll.tx.payload_count += lost_payloads;
985 
986 			/* Adjust for extra window widening */
987 			iso_interval_us_frac = EVENT_US_TO_US_FRAC(cig->iso_interval *
988 								   ISO_INT_UNIT_US);
989 			iso_interval_us_frac -= cig->lll.window_widening_periodic_us_frac *
990 						instant_latency;
991 			/* Calculate new offset */
992 			cig_offset_us = cig_offset_calc(cig, cis, cis_offset, &ticks_at_expire,
993 							remainder);
994 		}
995 
996 		ticks_periodic  = EVENT_US_FRAC_TO_TICKS(iso_interval_us_frac);
997 		ticks_remainder = EVENT_US_FRAC_TO_REMAINDER(iso_interval_us_frac);
998 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
999 
1000 	} else if (IS_CENTRAL(cig)) {
1001 		uint32_t iso_interval_us;
1002 
1003 		iso_interval_us = cig->iso_interval * ISO_INT_UNIT_US;
1004 		ticks_periodic  = HAL_TICKER_US_TO_TICKS(iso_interval_us);
1005 		ticks_remainder = HAL_TICKER_REMAINDER(iso_interval_us);
1006 
1007 		/* FIXME: Handle latency due to skipped ACL events around the
1008 		 * instant to start CIG
1009 		 */
1010 		LL_ASSERT(instant_latency == 0U);
1011 	} else {
1012 		LL_ASSERT(0);
1013 
1014 		return;
1015 	}
1016 
1017 	/* Make sure we have time to service first subevent. TODO: Improve
1018 	 * by skipping <n> interval(s) and incrementing event_count.
1019 	 */
1020 	LL_ASSERT(cig_offset_us > 0);
1021 
1022 	ull_hdr_init(&cig->ull);
1023 
1024 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
1025 	ticks_slot = 0U;
1026 
1027 #else /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
1028 	uint32_t ticks_slot_overhead;
1029 	uint32_t ticks_slot_offset;
1030 
1031 	/* Calculate time reservations for sequential and interleaved packing as
1032 	 * configured.
1033 	 */
1034 	if (IS_PERIPHERAL(cig)) {
1035 		uint32_t slot_us;
1036 
1037 		/* FIXME: Time reservation for interleaved packing */
1038 		/* Below is time reservation for sequential packing */
1039 		if (IS_ENABLED(CONFIG_BT_CTLR_PERIPHERAL_ISO_RESERVE_MAX)) {
1040 			slot_us = cis->lll.sub_interval * cis->lll.nse;
1041 		} else {
1042 			slot_us = cis->lll.sub_interval * MAX(cis->lll.tx.bn, cis->lll.rx.bn);
1043 		}
1044 
1045 		if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX)) {
1046 			slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
1047 		}
1048 
1049 		/* FIXME: How to use ready_delay_us in the time reservation?
1050 		 *        i.e. when CISes use different PHYs? Is that even
1051 		 *        allowed?
1052 		 *
1053 		 *        Missing code here, i.e. slot_us += ready_delay_us;
1054 		 */
1055 
1056 		/* Populate the ULL hdr with event timings overheads */
1057 		cig->ull.ticks_active_to_start = 0U;
1058 		cig->ull.ticks_prepare_to_start =
1059 			HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
1060 		cig->ull.ticks_preempt_to_start =
1061 			HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
1062 		cig->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
1063 	}
1064 
1065 	ticks_slot_offset = MAX(cig->ull.ticks_active_to_start,
1066 				cig->ull.ticks_prepare_to_start);
1067 
1068 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
1069 		ticks_slot_overhead = ticks_slot_offset;
1070 	} else {
1071 		ticks_slot_overhead = 0U;
1072 	}
1073 
1074 	ticks_slot = cig->ull.ticks_slot + ticks_slot_overhead;
1075 
1076 	/* Initialize CIS event lazy at CIS create */
1077 	cis->lll.lazy_active = 0U;
1078 #endif /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
1079 
1080 	/* Start CIS peripheral CIG ticker */
1081 	ticker_status = ticker_start_us(TICKER_INSTANCE_ID_CTLR,
1082 					TICKER_USER_ID_ULL_HIGH,
1083 					ticker_id, ticks_at_expire,
1084 					HAL_TICKER_US_TO_TICKS(cig_offset_us),
1085 					HAL_TICKER_REMAINDER(cig_offset_us),
1086 					ticks_periodic,	ticks_remainder,
1087 					TICKER_NULL_LAZY, ticks_slot,
1088 					ull_conn_iso_ticker_cb, cig,
1089 					ticker_start_op_cb, NULL);
1090 	LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
1091 		  (ticker_status == TICKER_STATUS_BUSY));
1092 
1093 	/* Set CIG and the first CIS state as active */
1094 	cig->state = CIG_STATE_ACTIVE;
1095 	cis->lll.active = 1U;
1096 }
1097 
1098 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
cis_lazy_fill(struct ll_conn_iso_stream * cis)1099 static void cis_lazy_fill(struct ll_conn_iso_stream *cis)
1100 {
1101 	static memq_link_t link;
1102 	static struct mayfly mfy = {0U, 0U, &link, NULL, mfy_cis_lazy_fill};
1103 	uint32_t ret;
1104 
1105 	mfy.param = cis;
1106 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1U, &mfy);
1107 	LL_ASSERT(!ret);
1108 }
1109 
mfy_cis_lazy_fill(void * param)1110 static void mfy_cis_lazy_fill(void *param)
1111 {
1112 	struct ll_conn_iso_stream *cis;
1113 	struct ll_conn_iso_group *cig;
1114 	uint32_t ticks_to_expire;
1115 	uint32_t ticks_current;
1116 	uint32_t remainder;
1117 	uint16_t lazy = 0U;
1118 	uint8_t ticker_id;
1119 	uint8_t retry;
1120 	uint8_t id;
1121 
1122 	cis = param;
1123 	cig = cis->group;
1124 	ticker_id = TICKER_ID_CONN_ISO_BASE + ll_conn_iso_group_handle_get(cig);
1125 
1126 	id = TICKER_NULL;
1127 	ticks_to_expire = 0U;
1128 	ticks_current = 0U;
1129 
1130 	/* In the first iteration the actual ticks_current value is returned
1131 	 * which will be different from the initial value of 0 that is set.
1132 	 * Subsequent iterations should return the same ticks_current as the
1133 	 * reference tick.
1134 	 * In order to avoid infinite updates to ticker's reference due to any
1135 	 * race condition due to expiring tickers, we try upto 3 more times.
1136 	 * Hence, first iteration to get an actual ticks_current and 3 more as
1137 	 * retries when there could be race conditions that changes the value
1138 	 * of ticks_current.
1139 	 *
1140 	 * ticker_next_slot_get_ext() restarts iterating when updated value of
1141 	 * ticks_current is returned.
1142 	 */
1143 	retry = 4U;
1144 	do {
1145 		uint32_t volatile ret_cb;
1146 		uint32_t ticks_previous;
1147 		uint32_t ret;
1148 		bool success;
1149 
1150 		ticks_previous = ticks_current;
1151 
1152 		ret_cb = TICKER_STATUS_BUSY;
1153 		ret = ticker_next_slot_get_ext(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_LOW, &id,
1154 					       &ticks_current, &ticks_to_expire, &remainder, &lazy,
1155 					       NULL, NULL, ticker_next_slot_get_op_cb,
1156 					       (void *)&ret_cb);
1157 		if (ret == TICKER_STATUS_BUSY) {
1158 			/* Busy wait until Ticker Job is enabled after any Radio
1159 			 * event is done using the Radio hardware. Ticker Job
1160 			 * ISR is disabled during Radio events in LOW_LAT
1161 			 * feature to avoid Radio ISR latencies.
1162 			 */
1163 			while (ret_cb == TICKER_STATUS_BUSY) {
1164 				ticker_job_sched(TICKER_INSTANCE_ID_CTLR,
1165 						 TICKER_USER_ID_ULL_LOW);
1166 			}
1167 		}
1168 
1169 		success = (ret_cb == TICKER_STATUS_SUCCESS);
1170 		LL_ASSERT(success);
1171 
1172 		LL_ASSERT((ticks_current == ticks_previous) || retry--);
1173 
1174 		LL_ASSERT(id != TICKER_NULL);
1175 	} while (id != ticker_id);
1176 
1177 	/* Set CIS active in already active CIG and any previous laziness in
1178 	 * CIG before the CIS gets active that be decremented when event_count
1179 	 * is incremented in ull_conn_iso_ticker_cb().
1180 	 */
1181 	cis->lll.active = 1U;
1182 	cis->lll.lazy_active = lazy;
1183 }
1184 
ticker_next_slot_get_op_cb(uint32_t status,void * param)1185 static void ticker_next_slot_get_op_cb(uint32_t status, void *param)
1186 {
1187 	*((uint32_t volatile *)param) = status;
1188 }
1189 #endif /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
1190 
ticker_start_op_cb(uint32_t status,void * param)1191 static void ticker_start_op_cb(uint32_t status, void *param)
1192 {
1193 	ARG_UNUSED(param);
1194 
1195 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1196 }
1197 
ticker_update_cig_op_cb(uint32_t status,void * param)1198 static void ticker_update_cig_op_cb(uint32_t status, void *param)
1199 {
1200 	/* CIG drift compensation succeeds, or it fails in a race condition
1201 	 * when disconnecting (race between ticker_update and ticker_stop
1202 	 * calls). TODO: Are the race-checks needed?
1203 	 */
1204 	LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
1205 		  param == ull_update_mark_get() ||
1206 		  param == ull_disable_mark_get());
1207 }
1208 
cis_disabled_cb(void * param)1209 static void cis_disabled_cb(void *param)
1210 {
1211 	struct ll_conn_iso_group *cig;
1212 	struct ll_conn_iso_stream *cis;
1213 	uint32_t ticker_status;
1214 	struct ll_conn *conn;
1215 	uint8_t active_cises;
1216 	uint16_t handle_iter;
1217 	uint8_t cis_idx;
1218 	uint8_t num_cis;
1219 
1220 	cig = HDR_LLL2ULL(param);
1221 	handle_iter = UINT16_MAX;
1222 	active_cises = 0;
1223 
1224 	/* Remove all CISes marked for teardown */
1225 	num_cis = cig->lll.num_cis;
1226 	for (cis_idx = 0; cis_idx < num_cis; cis_idx++) {
1227 		cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
1228 		LL_ASSERT(cis);
1229 
1230 		if (!cis->lll.active && (cis->lll.flush != LLL_CIS_FLUSH_COMPLETE)) {
1231 			/* CIS is not active and did not just complete LLL flush - skip it */
1232 			continue;
1233 		}
1234 
1235 		active_cises++;
1236 
1237 		if (cis->lll.flush == LLL_CIS_FLUSH_PENDING) {
1238 			/* CIS has LLL flush pending - wait for completion */
1239 			continue;
1240 		} else if (cis->lll.flush == LLL_CIS_FLUSH_COMPLETE) {
1241 			ll_iso_stream_released_cb_t cis_released_cb;
1242 
1243 			conn = ll_conn_get(cis->lll.acl_handle);
1244 			cis_released_cb = cis->released_cb;
1245 			cis->released_cb = NULL;
1246 
1247 			if (IS_PERIPHERAL(cig)) {
1248 				/* Remove data path and ISOAL sink/source associated with this
1249 				 * CIS for both directions. Disable them one at a time to make sure
1250 				 * both are removed, even if only one is set.
1251 				 */
1252 				ll_remove_iso_path(cis->lll.handle,
1253 						   BIT(BT_HCI_DATAPATH_DIR_HOST_TO_CTLR));
1254 				ll_remove_iso_path(cis->lll.handle,
1255 						   BIT(BT_HCI_DATAPATH_DIR_CTLR_TO_HOST));
1256 
1257 				ll_conn_iso_stream_release(cis);
1258 
1259 				cig->lll.num_cis--;
1260 
1261 			} else if (IS_CENTRAL(cig)) {
1262 				cis->established = 0U;
1263 				cis->teardown = 0U;
1264 
1265 				/* Prevent referencing inactive CIS */
1266 				cis->lll.flush = LLL_CIS_FLUSH_NONE;
1267 				cis->lll.acl_handle = LLL_HANDLE_INVALID;
1268 
1269 			} else {
1270 				LL_ASSERT(0);
1271 			}
1272 
1273 			/* CIS is no longer active */
1274 			active_cises--;
1275 
1276 			/* CIS terminated, triggers completion of CIS_TERMINATE_IND procedure */
1277 			/* Only used by local procedure, ignored for remote procedure */
1278 			conn->llcp.cis.terminate_ack = 1U;
1279 
1280 			/* Check if removed CIS has an ACL disassociation callback. Invoke
1281 			 * the callback to allow cleanup.
1282 			 */
1283 			if (cis_released_cb) {
1284 				/* CIS removed - notify caller */
1285 				cis_released_cb(conn);
1286 			}
1287 		} else if (cis->teardown) {
1288 			DECLARE_MAYFLY_ARRAY(mfys, cis_tx_lll_flush,
1289 				CONFIG_BT_CTLR_CONN_ISO_GROUPS);
1290 			uint32_t ret;
1291 
1292 			if (cis->established) {
1293 				struct node_rx_pdu *node_terminate;
1294 
1295 				/* Create and enqueue termination node. This shall prevent
1296 				 * further enqueuing of TX nodes for terminating CIS.
1297 				 */
1298 				node_terminate = ull_pdu_rx_alloc();
1299 				LL_ASSERT(node_terminate);
1300 				node_terminate->hdr.handle = cis->lll.handle;
1301 				node_terminate->hdr.type = NODE_RX_TYPE_TERMINATE;
1302 				*((uint8_t *)node_terminate->pdu) = cis->terminate_reason;
1303 
1304 				ll_rx_put_sched(node_terminate->hdr.link, node_terminate);
1305 			} else {
1306 				conn = ll_conn_get(cis->lll.acl_handle);
1307 
1308 				/* CIS was not established - complete the procedure with error */
1309 				if (ull_cp_cc_awaiting_established(conn)) {
1310 					ull_cp_cc_established(conn, cis->terminate_reason);
1311 				}
1312 			}
1313 
1314 			if (cig->lll.resume_cis == cis->lll.handle) {
1315 				/* Resume pending for terminating CIS - stop ticker */
1316 				(void)ticker_stop(TICKER_INSTANCE_ID_CTLR,
1317 						  TICKER_USER_ID_ULL_HIGH,
1318 						  TICKER_ID_CONN_ISO_RESUME_BASE +
1319 						  ll_conn_iso_group_handle_get(cig),
1320 						  NULL, NULL);
1321 
1322 				cig->lll.resume_cis = LLL_HANDLE_INVALID;
1323 			}
1324 
1325 			/* We need to flush TX nodes in LLL before releasing the stream.
1326 			 * More than one CIG may be terminating at the same time, so
1327 			 * enqueue a mayfly instance for this CIG.
1328 			 */
1329 			cis->lll.flush = LLL_CIS_FLUSH_PENDING;
1330 
1331 			mfys[cig->lll.handle].param = &cis->lll;
1332 			ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1333 					     TICKER_USER_ID_LLL, 1, &mfys[cig->lll.handle]);
1334 			LL_ASSERT(!ret);
1335 
1336 			return;
1337 		}
1338 	}
1339 
1340 	if ((cig->state == CIG_STATE_ACTIVE) && !active_cises) {
1341 		/* This was the last active CIS of the CIG. Initiate CIG teardown by
1342 		 * stopping ticker.
1343 		 */
1344 		cig->state = CIG_STATE_INACTIVE;
1345 
1346 		ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR,
1347 					    TICKER_USER_ID_ULL_HIGH,
1348 					    TICKER_ID_CONN_ISO_BASE +
1349 					    ll_conn_iso_group_handle_get(cig),
1350 					    ticker_stop_op_cb,
1351 					    cig);
1352 
1353 		LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
1354 			  (ticker_status == TICKER_STATUS_BUSY));
1355 	}
1356 }
1357 
cis_tx_lll_flush(void * param)1358 static void cis_tx_lll_flush(void *param)
1359 {
1360 	DECLARE_MAYFLY_ARRAY(mfys, cis_disabled_cb, CONFIG_BT_CTLR_CONN_ISO_GROUPS);
1361 
1362 	struct lll_conn_iso_stream *lll;
1363 	struct ll_conn_iso_stream *cis;
1364 	struct ll_conn_iso_group *cig;
1365 	struct node_tx_iso *tx;
1366 	memq_link_t *link;
1367 
1368 	lll = param;
1369 	lll->active = 0U;
1370 
1371 	cis = ll_conn_iso_stream_get(lll->handle);
1372 	cig = cis->group;
1373 
1374 	/* Flush in LLL - may return TX nodes to ack queue */
1375 	lll_conn_iso_flush(lll->handle, lll);
1376 
1377 	link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head, (void **)&tx);
1378 	while (link) {
1379 		link->next = tx->next;
1380 		tx->next = link;
1381 		ull_iso_lll_ack_enqueue(lll->handle, tx);
1382 
1383 		link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head,
1384 				    (void **)&tx);
1385 	}
1386 
1387 	LL_ASSERT(!lll->link_tx_free);
1388 	link = memq_deinit(&lll->memq_tx.head, &lll->memq_tx.tail);
1389 	LL_ASSERT(link);
1390 	lll->link_tx_free = link;
1391 
1392 	lll->flush = LLL_CIS_FLUSH_COMPLETE;
1393 
1394 	/* Resume CIS teardown in ULL_HIGH context */
1395 	mfys[cig->lll.handle].param = &cig->lll;
1396 	(void)mayfly_enqueue(TICKER_USER_ID_LLL,
1397 			     TICKER_USER_ID_ULL_HIGH, 1, &mfys[cig->lll.handle]);
1398 }
1399 
ticker_stop_op_cb(uint32_t status,void * param)1400 static void ticker_stop_op_cb(uint32_t status, void *param)
1401 {
1402 	static memq_link_t link;
1403 	static struct mayfly mfy = {0, 0, &link, NULL, cig_disable};
1404 	uint32_t ret;
1405 
1406 	/* Assert if race between thread and ULL */
1407 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1408 
1409 	/* Check if any pending LLL events that need to be aborted */
1410 	mfy.param = param;
1411 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
1412 			     TICKER_USER_ID_ULL_HIGH, 0, &mfy);
1413 	LL_ASSERT(!ret);
1414 }
1415 
cig_disable(void * param)1416 static void cig_disable(void *param)
1417 {
1418 	struct ll_conn_iso_group *cig;
1419 	struct ull_hdr *hdr;
1420 
1421 	/* Check ref count to determine if any pending LLL events in pipeline */
1422 	cig = param;
1423 	hdr = &cig->ull;
1424 	if (ull_ref_get(hdr)) {
1425 		static memq_link_t link;
1426 		static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
1427 		uint32_t ret;
1428 
1429 		mfy.param = &cig->lll;
1430 
1431 		/* Setup disabled callback to be called when ref count
1432 		 * returns to zero.
1433 		 */
1434 		LL_ASSERT(!hdr->disabled_cb);
1435 		hdr->disabled_param = mfy.param;
1436 		hdr->disabled_cb = cig_disabled_cb;
1437 
1438 		/* Trigger LLL disable */
1439 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1440 				     TICKER_USER_ID_LLL, 0, &mfy);
1441 		LL_ASSERT(!ret);
1442 	} else {
1443 		/* No pending LLL events */
1444 		cig_disabled_cb(&cig->lll);
1445 	}
1446 }
1447 
cig_disabled_cb(void * param)1448 static void cig_disabled_cb(void *param)
1449 {
1450 	struct ll_conn_iso_group *cig;
1451 
1452 	cig = HDR_LLL2ULL(param);
1453 
1454 	if (IS_PERIPHERAL(cig)) {
1455 		ll_conn_iso_group_release(cig);
1456 	}
1457 }
1458 
disable(uint16_t handle)1459 static void disable(uint16_t handle)
1460 {
1461 	struct ll_conn_iso_group *cig;
1462 	int err;
1463 
1464 	cig = ll_conn_iso_group_get(handle);
1465 
1466 	(void)ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
1467 			  TICKER_ID_CONN_ISO_RESUME_BASE + handle, NULL,
1468 			  NULL);
1469 
1470 	err = ull_ticker_stop_with_mark(TICKER_ID_CONN_ISO_BASE + handle,
1471 					cig, &cig->lll);
1472 	LL_ASSERT_INFO2(err == 0 || err == -EALREADY, handle, err);
1473 
1474 	cig->lll.handle = LLL_HANDLE_INVALID;
1475 	cig->lll.resume_cis = LLL_HANDLE_INVALID;
1476 }
1477 
1478 /* An ISO interval has elapsed for a Connected Isochronous Group */
ull_conn_iso_transmit_test_cig_interval(uint16_t handle,uint32_t ticks_at_expire)1479 void ull_conn_iso_transmit_test_cig_interval(uint16_t handle, uint32_t ticks_at_expire)
1480 {
1481 	struct ll_conn_iso_stream *cis;
1482 	struct ll_conn_iso_group *cig;
1483 	uint32_t sdu_interval;
1484 	uint32_t iso_interval;
1485 	uint16_t handle_iter;
1486 	uint64_t sdu_counter;
1487 	uint8_t tx_sdu_count;
1488 
1489 	cig = ll_conn_iso_group_get(handle);
1490 	LL_ASSERT(cig);
1491 
1492 	handle_iter = UINT16_MAX;
1493 
1494 	if (IS_PERIPHERAL(cig)) {
1495 		/* Peripheral */
1496 		sdu_interval = cig->p_sdu_interval;
1497 
1498 	} else if (IS_CENTRAL(cig)) {
1499 		/* Central */
1500 		sdu_interval = cig->c_sdu_interval;
1501 
1502 	} else {
1503 		LL_ASSERT(0);
1504 
1505 		return;
1506 	}
1507 
1508 	iso_interval = cig->iso_interval * PERIODIC_INT_UNIT_US;
1509 
1510 	/* Handle ISO Transmit Test for all active CISes in the group */
1511 	for (uint8_t i = 0; i < cig->lll.num_cis; i++)  {
1512 		cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
1513 		LL_ASSERT(cis);
1514 
1515 		if (!cis->hdr.test_mode.tx.enabled || cis->lll.handle == LLL_HANDLE_INVALID) {
1516 			continue;
1517 		}
1518 
1519 		/* Calculate number of SDUs to transmit in the next ISO event. Ensure no overflow
1520 		 * on 64-bit sdu_counter:
1521 		 *   (39 bits x 22 bits (4x10^6 us) = 61 bits / 8 bits (255 us) = 53 bits)
1522 		 */
1523 		sdu_counter = DIV_ROUND_UP((cis->lll.event_count + 1U) * iso_interval,
1524 					       sdu_interval);
1525 
1526 		if (cis->hdr.test_mode.tx.sdu_counter == 0U) {
1527 			/* First ISO event. Align SDU counter for next event */
1528 			cis->hdr.test_mode.tx.sdu_counter = sdu_counter;
1529 			tx_sdu_count = 0U;
1530 		} else {
1531 			/* Calculate number of SDUs to produce for next ISO event */
1532 			tx_sdu_count = sdu_counter - cis->hdr.test_mode.tx.sdu_counter;
1533 		}
1534 
1535 		/* Now process all SDUs due for next ISO event */
1536 		for (uint8_t sdu = 0; sdu < tx_sdu_count; sdu++) {
1537 			ll_iso_transmit_test_send_sdu(cis->lll.handle, ticks_at_expire);
1538 		}
1539 	}
1540 }
1541