1 /*
2  * Copyright (c) 2020 Demant
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/sys/byteorder.h>
9 
10 #include <zephyr/bluetooth/addr.h>
11 #include <zephyr/bluetooth/iso.h>
12 
13 #include "util/util.h"
14 #include "util/memq.h"
15 #include "util/mayfly.h"
16 #include "util/dbuf.h"
17 
18 #include "hal/ccm.h"
19 #include "hal/ticker.h"
20 
21 #include "ticker/ticker.h"
22 
23 #include "pdu_df.h"
24 #include "lll/pdu_vendor.h"
25 #include "pdu.h"
26 
27 #include "lll.h"
28 #include "lll/lll_vendor.h"
29 #include "lll_clock.h"
30 #include "lll/lll_df_types.h"
31 #include "lll_conn.h"
32 #include "lll_conn_iso.h"
33 #include "lll_central_iso.h"
34 
35 #include "isoal.h"
36 
37 #include "ull_tx_queue.h"
38 
39 #include "ull_conn_types.h"
40 #include "ull_iso_types.h"
41 #include "ull_conn_iso_types.h"
42 
43 #include "ull_llcp.h"
44 
45 #include "ull_internal.h"
46 #include "ull_sched_internal.h"
47 #include "ull_conn_internal.h"
48 #include "ull_conn_iso_internal.h"
49 
50 #include "ll.h"
51 #include "ll_feat.h"
52 
53 #include <zephyr/bluetooth/hci_types.h>
54 
55 #include "hal/debug.h"
56 
57 #define SDU_MAX_DRIFT_PPM 100
58 #define SUB_INTERVAL_MIN  400
59 
60 #define STREAMS_PER_GROUP CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP
61 
62 #if defined(CONFIG_BT_CTLR_PHY_CODED)
63 #define PHY_VALID_MASK (BT_HCI_ISO_PHY_VALID_MASK)
64 #else
65 #define PHY_VALID_MASK (BT_HCI_ISO_PHY_VALID_MASK & ~BIT(2))
66 #endif
67 
68 #if (CONFIG_BT_CTLR_CENTRAL_SPACING == 0)
69 static void cig_offset_get(struct ll_conn_iso_stream *cis);
70 static void mfy_cig_offset_get(void *param);
71 static void cis_offset_get(struct ll_conn_iso_stream *cis);
72 static void mfy_cis_offset_get(void *param);
73 static void ticker_op_cb(uint32_t status, void *param);
74 #endif /* CONFIG_BT_CTLR_CENTRAL_SPACING  == 0 */
75 
76 static uint32_t iso_interval_adjusted_bn_max_pdu_get(bool framed, uint32_t iso_interval,
77 						     uint32_t iso_interval_cig,
78 						     uint32_t sdu_interval,
79 						     uint16_t max_sdu, uint8_t *bn,
80 						     uint8_t *max_pdu);
81 static uint8_t ll_cig_parameters_validate(void);
82 static uint8_t ll_cis_parameters_validate(uint8_t cis_idx, uint8_t cis_id,
83 					  uint16_t c_sdu, uint16_t p_sdu,
84 					  uint16_t c_phy, uint16_t p_phy);
85 
86 #if defined(CONFIG_BT_CTLR_CONN_ISO_RELIABILITY_POLICY)
87 static uint8_t ll_cis_calculate_ft(uint32_t cig_sync_delay, uint32_t iso_interval_us,
88 				   uint32_t sdu_interval, uint32_t latency, uint8_t framed);
89 #endif /* CONFIG_BT_CTLR_CONN_ISO_RELIABILITY_POLICY */
90 
91 /* Setup cache for CIG commit transaction */
92 static struct {
93 	struct ll_conn_iso_group group;
94 	uint8_t cis_count;
95 	uint8_t c_ft;
96 	uint8_t p_ft;
97 	uint8_t cis_idx;
98 	struct ll_conn_iso_stream stream[CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP];
99 } ll_iso_setup;
100 
ll_cig_parameters_open(uint8_t cig_id,uint32_t c_interval,uint32_t p_interval,uint8_t sca,uint8_t packing,uint8_t framing,uint16_t c_latency,uint16_t p_latency,uint8_t num_cis)101 uint8_t ll_cig_parameters_open(uint8_t cig_id,
102 			       uint32_t c_interval, uint32_t p_interval,
103 			       uint8_t sca, uint8_t packing, uint8_t framing,
104 			       uint16_t c_latency, uint16_t p_latency,
105 			       uint8_t num_cis)
106 {
107 	memset(&ll_iso_setup, 0, sizeof(ll_iso_setup));
108 
109 	ll_iso_setup.group.cig_id = cig_id;
110 	ll_iso_setup.group.c_sdu_interval = c_interval;
111 	ll_iso_setup.group.p_sdu_interval = p_interval;
112 	ll_iso_setup.group.c_latency = c_latency * USEC_PER_MSEC;
113 	ll_iso_setup.group.p_latency = p_latency * USEC_PER_MSEC;
114 	ll_iso_setup.group.central.sca = sca;
115 	ll_iso_setup.group.central.packing = packing;
116 	ll_iso_setup.group.central.framing = framing;
117 	ll_iso_setup.cis_count = num_cis;
118 
119 	return ll_cig_parameters_validate();
120 }
121 
ll_cis_parameters_set(uint8_t cis_id,uint16_t c_sdu,uint16_t p_sdu,uint8_t c_phy,uint8_t p_phy,uint8_t c_rtn,uint8_t p_rtn)122 uint8_t ll_cis_parameters_set(uint8_t cis_id,
123 			      uint16_t c_sdu, uint16_t p_sdu,
124 			      uint8_t c_phy, uint8_t p_phy,
125 			      uint8_t c_rtn, uint8_t p_rtn)
126 {
127 	uint8_t cis_idx = ll_iso_setup.cis_idx;
128 	uint8_t status;
129 
130 	status = ll_cis_parameters_validate(cis_idx, cis_id, c_sdu, p_sdu, c_phy, p_phy);
131 	if (status) {
132 		return status;
133 	}
134 
135 	memset(&ll_iso_setup.stream[cis_idx], 0, sizeof(struct ll_conn_iso_stream));
136 
137 	ll_iso_setup.stream[cis_idx].cis_id = cis_id;
138 	ll_iso_setup.stream[cis_idx].c_max_sdu = c_sdu;
139 	ll_iso_setup.stream[cis_idx].p_max_sdu = p_sdu;
140 	ll_iso_setup.stream[cis_idx].lll.tx.phy = c_phy;
141 	ll_iso_setup.stream[cis_idx].lll.tx.phy_flags = PHY_FLAGS_S8;
142 	ll_iso_setup.stream[cis_idx].lll.rx.phy = p_phy;
143 	ll_iso_setup.stream[cis_idx].lll.rx.phy_flags = PHY_FLAGS_S8;
144 	ll_iso_setup.stream[cis_idx].central.c_rtn = c_rtn;
145 	ll_iso_setup.stream[cis_idx].central.p_rtn = p_rtn;
146 	ll_iso_setup.cis_idx++;
147 
148 	return BT_HCI_ERR_SUCCESS;
149 }
150 
151 /* TODO:
152  * - Calculate ISO_Interval to allow SDU_Interval < ISO_Interval
153  */
ll_cig_parameters_commit(uint8_t cig_id,uint16_t * handles)154 uint8_t ll_cig_parameters_commit(uint8_t cig_id, uint16_t *handles)
155 {
156 	uint16_t cis_created_handles[STREAMS_PER_GROUP];
157 	struct ll_conn_iso_stream *cis;
158 	struct ll_conn_iso_group *cig;
159 	uint32_t iso_interval_cig_us;
160 	uint32_t iso_interval_us;
161 	uint32_t cig_sync_delay;
162 	uint32_t max_se_length;
163 	uint32_t c_max_latency;
164 	uint32_t p_max_latency;
165 	uint16_t handle_iter;
166 	uint32_t total_time;
167 	bool force_framed;
168 	bool cig_created;
169 	uint8_t  num_cis;
170 	uint8_t  err;
171 
172 	/* Intermediate subevent data */
173 	struct {
174 		uint32_t length;
175 		uint8_t  total_count;
176 	} se[STREAMS_PER_GROUP];
177 
178 	for (uint8_t i = 0U; i < STREAMS_PER_GROUP; i++) {
179 		cis_created_handles[i] = LLL_HANDLE_INVALID;
180 	};
181 
182 	cig_created = false;
183 
184 	/* If CIG already exists, this is a reconfigure */
185 	cig = ll_conn_iso_group_get_by_id(cig_id);
186 	if (!cig) {
187 		/* CIG does not exist - create it */
188 		cig = ll_conn_iso_group_acquire();
189 		if (!cig) {
190 			ll_iso_setup.cis_idx = 0U;
191 
192 			/* No space for new CIG */
193 			return BT_HCI_ERR_INSUFFICIENT_RESOURCES;
194 		}
195 		cig->lll.num_cis = 0U;
196 		cig_created = true;
197 
198 	} else if (cig->state != CIG_STATE_CONFIGURABLE) {
199 		/* CIG is not in configurable state */
200 		return BT_HCI_ERR_CMD_DISALLOWED;
201 	}
202 
203 	/* Store currently configured number of CISes before cache transfer */
204 	num_cis = cig->lll.num_cis;
205 
206 	/* Transfer parameters from configuration cache and clear LLL fields */
207 	memcpy(cig, &ll_iso_setup.group, sizeof(struct ll_conn_iso_group));
208 
209 	cig->state = CIG_STATE_CONFIGURABLE;
210 
211 	/* Setup LLL parameters */
212 	cig->lll.handle = ll_conn_iso_group_handle_get(cig);
213 	cig->lll.role = BT_HCI_ROLE_CENTRAL;
214 	cig->lll.resume_cis = LLL_HANDLE_INVALID;
215 	cig->lll.num_cis = num_cis;
216 	force_framed = false;
217 
218 	if (!cig->central.test) {
219 		/* TODO: Calculate ISO_Interval based on SDU_Interval and Max_SDU vs Max_PDU,
220 		 * taking the policy into consideration. It may also be interesting to select an
221 		 * ISO_Interval which is less likely to collide with other connections.
222 		 * For instance:
223 		 *
224 		 *  SDU_Interval   ISO_Interval   Max_SDU   Max_SDU   Collision risk (10 ms)
225 		 *  ------------------------------------------------------------------------
226 		 *  10 ms          10 ms          40        40        100%
227 		 *  10 ms          12.5 ms        40        50         25%
228 		 */
229 
230 		/* Set ISO_Interval to the closest lower value of SDU_Interval to be able to
231 		 * handle the throughput. For unframed these must be divisible, if they're not,
232 		 * framed mode must be forced.
233 		 */
234 		iso_interval_us = cig->c_sdu_interval;
235 
236 		if (iso_interval_us < ISO_INTERVAL_TO_US(BT_HCI_ISO_INTERVAL_MIN)) {
237 			/* ISO_Interval is below minimum (5 ms) */
238 			iso_interval_us = ISO_INTERVAL_TO_US(BT_HCI_ISO_INTERVAL_MIN);
239 		}
240 
241 #if defined(CONFIG_BT_CTLR_CONN_ISO_AVOID_SEGMENTATION)
242 		/* Check if this is a HAP usecase which requires higher link bandwidth to ensure
243 		 * segmentation is not invoked in ISO-AL.
244 		 */
245 		if (cig->central.framing && cig->c_sdu_interval == 10000U) {
246 			iso_interval_us = 7500U; /* us */
247 		}
248 #endif
249 
250 		if (!cig->central.framing && (cig->c_sdu_interval % ISO_INT_UNIT_US)) {
251 			/* Framing not requested but requirement for unframed is not met. Force
252 			 * CIG into framed mode.
253 			 */
254 			force_framed = true;
255 		}
256 	} else {
257 		iso_interval_us = cig->iso_interval * ISO_INT_UNIT_US;
258 	}
259 
260 	iso_interval_cig_us = iso_interval_us;
261 
262 	lll_hdr_init(&cig->lll, cig);
263 	max_se_length = 0U;
264 
265 	/* Create all configurable CISes */
266 	for (uint8_t i = 0U; i < ll_iso_setup.cis_count; i++) {
267 		memq_link_t *link_tx_free;
268 		memq_link_t link_tx;
269 
270 		cis = ll_conn_iso_stream_get_by_id(ll_iso_setup.stream[i].cis_id);
271 		if (cis) {
272 			/* Check if Max_SDU reconfigure violates datapath by changing
273 			 * non-zero Max_SDU with associated datapath, to zero.
274 			 */
275 			if ((cis->c_max_sdu && cis->hdr.datapath_in &&
276 			     !ll_iso_setup.stream[i].c_max_sdu) ||
277 			    (cis->p_max_sdu && cis->hdr.datapath_out &&
278 			     !ll_iso_setup.stream[i].p_max_sdu)) {
279 				/* Reconfiguring CIS with datapath to wrong direction is
280 				 * not allowed.
281 				 */
282 				err = BT_HCI_ERR_CMD_DISALLOWED;
283 				goto ll_cig_parameters_commit_cleanup;
284 			}
285 		} else {
286 			/* Acquire new CIS */
287 			cis = ll_conn_iso_stream_acquire();
288 			if (!cis) {
289 				/* No space for new CIS */
290 				ll_iso_setup.cis_idx = 0U;
291 
292 				err = BT_HCI_ERR_CONN_LIMIT_EXCEEDED;
293 				goto ll_cig_parameters_commit_cleanup;
294 			}
295 
296 			cis_created_handles[i] = ll_conn_iso_stream_handle_get(cis);
297 			cig->lll.num_cis++;
298 		}
299 
300 		/* Store TX link and free link before transfer */
301 		link_tx_free = cis->lll.link_tx_free;
302 		link_tx = cis->lll.link_tx;
303 
304 		/* Transfer parameters from configuration cache */
305 		memcpy(cis, &ll_iso_setup.stream[i], sizeof(struct ll_conn_iso_stream));
306 
307 		cis->group  = cig;
308 		cis->framed = cig->central.framing || force_framed;
309 
310 		cis->lll.link_tx_free = link_tx_free;
311 		cis->lll.link_tx = link_tx;
312 		cis->lll.handle = ll_conn_iso_stream_handle_get(cis);
313 		handles[i] = cis->lll.handle;
314 	}
315 
316 	num_cis = cig->lll.num_cis;
317 
318 ll_cig_parameters_commit_retry:
319 	handle_iter = UINT16_MAX;
320 
321 	/* 1) Acquire CIS instances and initialize instance data.
322 	 * 2) Calculate SE_Length for each CIS and store the largest
323 	 * 3) Calculate BN
324 	 * 4) Calculate total number of subevents needed to transfer payloads
325 	 *
326 	 *                 Sequential                Interleaved
327 	 * CIS0            ___█_█_█_____________█_   ___█___█___█_________█_
328 	 * CIS1            _________█_█_█_________   _____█___█___█_________
329 	 * CIS_Sub_Interval  |.|                       |...|
330 	 * CIG_Sync_Delay    |............|            |............|
331 	 * CIS_Sync_Delay 0  |............|            |............|
332 	 * CIS_Sync_Delay 1        |......|              |..........|
333 	 * ISO_Interval      |.................|..     |.................|..
334 	 */
335 	for (uint8_t i = 0U; i < num_cis; i++) {
336 		uint32_t mpt_c;
337 		uint32_t mpt_p;
338 		bool tx;
339 		bool rx;
340 
341 		cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
342 
343 		if (cig->central.test) {
344 			cis->lll.tx.ft = ll_iso_setup.c_ft;
345 			cis->lll.rx.ft = ll_iso_setup.p_ft;
346 
347 			tx = cis->lll.tx.bn && cis->lll.tx.max_pdu;
348 			rx = cis->lll.rx.bn && cis->lll.rx.max_pdu;
349 		} else {
350 			LL_ASSERT(cis->framed || iso_interval_us >= cig->c_sdu_interval);
351 
352 			tx = cig->c_sdu_interval && cis->c_max_sdu;
353 			rx = cig->p_sdu_interval && cis->p_max_sdu;
354 
355 			/* Use Max_PDU = MIN(<buffer_size>, Max_SDU) as default.
356 			 * May be changed by set_bn_max_pdu.
357 			 */
358 			cis->lll.tx.max_pdu = MIN(LL_CIS_OCTETS_TX_MAX,
359 						  cis->c_max_sdu);
360 			cis->lll.rx.max_pdu = MIN(LL_CIS_OCTETS_RX_MAX,
361 						  cis->p_max_sdu);
362 
363 			/* Calculate BN and Max_PDU (framed) for both
364 			 * directions
365 			 */
366 			if (tx) {
367 				uint32_t iso_interval_adjust_us;
368 				uint8_t max_pdu;
369 				uint8_t bn;
370 
371 				bn = cis->lll.tx.bn;
372 				max_pdu = cis->lll.tx.max_pdu;
373 				iso_interval_adjust_us =
374 					iso_interval_adjusted_bn_max_pdu_get(cis->framed,
375 						iso_interval_us, iso_interval_cig_us,
376 						cig->c_sdu_interval, cis->c_max_sdu, &bn, &max_pdu);
377 				if (iso_interval_adjust_us != iso_interval_us) {
378 					iso_interval_us = iso_interval_adjust_us;
379 
380 					goto ll_cig_parameters_commit_retry;
381 				}
382 				cis->lll.tx.bn = bn;
383 				cis->lll.tx.max_pdu = max_pdu;
384 			} else {
385 				cis->lll.tx.bn = 0U;
386 			}
387 
388 			if (rx) {
389 				uint32_t iso_interval_adjust_us;
390 				uint8_t max_pdu;
391 				uint8_t bn;
392 
393 				bn = cis->lll.rx.bn;
394 				max_pdu = cis->lll.rx.max_pdu;
395 				iso_interval_adjust_us =
396 					iso_interval_adjusted_bn_max_pdu_get(cis->framed,
397 						iso_interval_us, iso_interval_cig_us,
398 						cig->p_sdu_interval, cis->p_max_sdu, &bn, &max_pdu);
399 				if (iso_interval_adjust_us != iso_interval_us) {
400 					iso_interval_us = iso_interval_adjust_us;
401 
402 					goto ll_cig_parameters_commit_retry;
403 				}
404 				cis->lll.rx.bn = bn;
405 				cis->lll.rx.max_pdu = max_pdu;
406 			} else {
407 				cis->lll.rx.bn = 0U;
408 			}
409 		}
410 
411 		/* Calculate SE_Length */
412 		mpt_c = PDU_CIS_MAX_US(cis->lll.tx.max_pdu, tx, cis->lll.tx.phy);
413 		mpt_p = PDU_CIS_MAX_US(cis->lll.rx.max_pdu, rx, cis->lll.rx.phy);
414 
415 		se[i].length = mpt_c + EVENT_IFS_US + mpt_p + EVENT_MSS_US;
416 		max_se_length = MAX(max_se_length, se[i].length);
417 
418 		/* Total number of subevents needed */
419 		se[i].total_count = MAX((cis->central.c_rtn + 1) * cis->lll.tx.bn,
420 					(cis->central.p_rtn + 1) * cis->lll.rx.bn);
421 	}
422 
423 	cig->lll.iso_interval_us = iso_interval_us;
424 	cig->iso_interval = iso_interval_us / ISO_INT_UNIT_US;
425 
426 	handle_iter = UINT16_MAX;
427 	total_time = 0U;
428 
429 	/* 1) Prepare calculation of the flush timeout by adding up the total time needed to
430 	 *    transfer all payloads, including retransmissions.
431 	 */
432 	if (cig->central.packing == BT_ISO_PACKING_SEQUENTIAL) {
433 		/* Sequential CISes - add up the total duration */
434 		for (uint8_t i = 0U; i < num_cis; i++) {
435 			total_time += se[i].total_count * se[i].length;
436 		}
437 	}
438 
439 	handle_iter = UINT16_MAX;
440 	cig_sync_delay = 0U;
441 
442 	/* 1) Calculate the flush timeout either by dividing the total time needed to transfer all,
443 	 *    payloads including retransmissions, and divide by the ISO_Interval (low latency
444 	 *    policy), or by dividing the Max_Transmission_Latency by the ISO_Interval (reliability
445 	 *    policy).
446 	 * 2) Calculate the number of subevents (NSE) by distributing total number of subevents into
447 	 *    FT ISO_intervals.
448 	 * 3) Calculate subinterval as either individual CIS subinterval (sequential), or the
449 	 *    largest SE_Length times number of CISes (interleaved). Min. subinterval is 400 us.
450 	 * 4) Calculate CIG_Sync_Delay
451 	 */
452 	for (uint8_t i = 0U; i < num_cis; i++) {
453 		cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
454 
455 		if (!cig->central.test) {
456 #if defined(CONFIG_BT_CTLR_CONN_ISO_LOW_LATENCY_POLICY)
457 			/* TODO: Only implemented for sequential packing */
458 			LL_ASSERT(cig->central.packing == BT_ISO_PACKING_SEQUENTIAL);
459 
460 			/* Use symmetric flush timeout */
461 			cis->lll.tx.ft = DIV_ROUND_UP(total_time, iso_interval_us);
462 			cis->lll.rx.ft = cis->lll.tx.ft;
463 
464 #elif defined(CONFIG_BT_CTLR_CONN_ISO_RELIABILITY_POLICY)
465 			/* Utilize Max_Transport_latency */
466 
467 			/*
468 			 * Set CIG_Sync_Delay = ISO_Interval as largest possible CIG_Sync_Delay.
469 			 * This favors utilizing as much as possible of the Max_Transport_latency,
470 			 * and spreads out payloads over multiple CIS events (if necessary).
471 			 */
472 			uint32_t cig_sync_delay_us_max = iso_interval_us;
473 
474 			cis->lll.tx.ft = ll_cis_calculate_ft(cig_sync_delay_us_max, iso_interval_us,
475 							     cig->c_sdu_interval, cig->c_latency,
476 							     cis->framed);
477 
478 			cis->lll.rx.ft = ll_cis_calculate_ft(cig_sync_delay_us_max, iso_interval_us,
479 							     cig->p_sdu_interval, cig->p_latency,
480 							     cis->framed);
481 
482 			if ((cis->lll.tx.ft == 0U) || (cis->lll.rx.ft == 0U)) {
483 				/* Invalid FT caused by invalid combination of parameters */
484 				err = BT_HCI_ERR_INVALID_PARAM;
485 				goto ll_cig_parameters_commit_cleanup;
486 			}
487 
488 #else
489 			LL_ASSERT(0);
490 #endif
491 			cis->lll.nse = DIV_ROUND_UP(se[i].total_count, cis->lll.tx.ft);
492 		}
493 
494 		if (cig->central.packing == BT_ISO_PACKING_SEQUENTIAL) {
495 			/* Accumulate CIG sync delay for sequential CISes */
496 			cis->lll.sub_interval = MAX(SUB_INTERVAL_MIN, se[i].length);
497 			cig_sync_delay += cis->lll.nse * cis->lll.sub_interval;
498 		} else {
499 			/* For interleaved CISes, offset each CIS by a fraction of a subinterval,
500 			 * positioning them evenly within the subinterval.
501 			 */
502 			cis->lll.sub_interval = MAX(SUB_INTERVAL_MIN, num_cis * max_se_length);
503 			cig_sync_delay = MAX(cig_sync_delay,
504 					     (cis->lll.nse * cis->lll.sub_interval) +
505 					     (i * cis->lll.sub_interval / num_cis));
506 		}
507 	}
508 
509 	cig->sync_delay = cig_sync_delay;
510 
511 	handle_iter = UINT16_MAX;
512 	c_max_latency = 0U;
513 	p_max_latency = 0U;
514 
515 	/* 1) Calculate transport latencies for each CIS and validate against Max_Transport_Latency.
516 	 * 2) Lay out CISes by updating CIS_Sync_Delay, distributing according to the packing.
517 	 */
518 	for (uint8_t i = 0U; i < num_cis; i++) {
519 		uint32_t c_latency;
520 		uint32_t p_latency;
521 
522 		cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
523 
524 		if (cis->framed) {
525 			/* Transport_Latency = CIG_Sync_Delay + FT x ISO_Interval + SDU_Interval */
526 			c_latency = cig->sync_delay +
527 				    (cis->lll.tx.ft * iso_interval_us) +
528 				    cig->c_sdu_interval;
529 			p_latency = cig->sync_delay +
530 				    (cis->lll.rx.ft * iso_interval_us) +
531 				    cig->p_sdu_interval;
532 
533 		} else {
534 			/* Transport_Latency = CIG_Sync_Delay + FT x ISO_Interval - SDU_Interval */
535 			c_latency = cig->sync_delay +
536 				    (cis->lll.tx.ft * iso_interval_us) -
537 				    cig->c_sdu_interval;
538 			p_latency = cig->sync_delay +
539 				    (cis->lll.rx.ft * iso_interval_us) -
540 				    cig->p_sdu_interval;
541 		}
542 
543 		if (!cig->central.test) {
544 			/* Make sure specified Max_Transport_Latency is not exceeded */
545 			if ((c_latency > cig->c_latency) || (p_latency > cig->p_latency)) {
546 				/* Check if we can reduce RTN to meet requested latency */
547 				if (!cis->central.c_rtn && !cis->central.p_rtn) {
548 					/* Actual latency exceeds the Max. Transport Latency */
549 					err = BT_HCI_ERR_INVALID_PARAM;
550 
551 					/* Release allocated resources  and exit */
552 					goto ll_cig_parameters_commit_cleanup;
553 				}
554 
555 				/* Reduce the RTN to meet host requested latency.
556 				 * NOTE: Both central and peripheral retransmission is reduced for
557 				 * simplicity.
558 				 */
559 				if (cis->central.c_rtn) {
560 					cis->central.c_rtn--;
561 				}
562 				if (cis->central.p_rtn) {
563 					cis->central.p_rtn--;
564 				}
565 
566 				goto ll_cig_parameters_commit_retry;
567 			}
568 		}
569 
570 		c_max_latency = MAX(c_max_latency, c_latency);
571 		p_max_latency = MAX(p_max_latency, p_latency);
572 
573 		if (cig->central.packing == BT_ISO_PACKING_SEQUENTIAL) {
574 			/* Distribute CISes sequentially */
575 			cis->sync_delay = cig_sync_delay;
576 			cig_sync_delay -= cis->lll.nse * cis->lll.sub_interval;
577 		} else {
578 			/* Distribute CISes interleaved */
579 			cis->sync_delay = cig_sync_delay;
580 			cig_sync_delay -= (cis->lll.sub_interval / num_cis);
581 		}
582 
583 		if (cis->lll.nse <= 1) {
584 			cis->lll.sub_interval = 0U;
585 		}
586 	}
587 
588 	/* Update actual latency */
589 	cig->c_latency = c_max_latency;
590 	cig->p_latency = p_max_latency;
591 
592 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
593 	uint32_t slot_us;
594 
595 	/* CIG sync_delay has been calculated considering the configured
596 	 * packing.
597 	 */
598 	slot_us = cig->sync_delay;
599 
600 	slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
601 
602 	/* Populate the ULL hdr with event timings overheads */
603 	cig->ull.ticks_active_to_start = 0U;
604 	cig->ull.ticks_prepare_to_start =
605 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
606 	cig->ull.ticks_preempt_to_start =
607 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
608 	cig->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
609 #endif /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
610 
611 	/* Reset params cache */
612 	ll_iso_setup.cis_idx = 0U;
613 
614 	return BT_HCI_ERR_SUCCESS;
615 
616 ll_cig_parameters_commit_cleanup:
617 	/* Late configuration failure - clean up */
618 	for (uint8_t i = 0U; i < ll_iso_setup.cis_count; i++) {
619 		if (cis_created_handles[i] != LLL_HANDLE_INVALID) {
620 			/* Release CIS instance created in failing configuration */
621 			cis = ll_conn_iso_stream_get(cis_created_handles[i]);
622 			ll_conn_iso_stream_release(cis);
623 		} else {
624 			break;
625 		}
626 	}
627 
628 	/* If CIG was created in this failed configuration - release it */
629 	if (cig_created) {
630 		ll_conn_iso_group_release(cig);
631 	}
632 
633 	return err;
634 }
635 
ll_cig_parameters_test_open(uint8_t cig_id,uint32_t c_interval,uint32_t p_interval,uint8_t c_ft,uint8_t p_ft,uint16_t iso_interval,uint8_t sca,uint8_t packing,uint8_t framing,uint8_t num_cis)636 uint8_t ll_cig_parameters_test_open(uint8_t cig_id, uint32_t c_interval,
637 				    uint32_t p_interval, uint8_t c_ft,
638 				    uint8_t p_ft, uint16_t iso_interval,
639 				    uint8_t sca, uint8_t packing,
640 				    uint8_t framing, uint8_t num_cis)
641 {
642 	memset(&ll_iso_setup, 0, sizeof(ll_iso_setup));
643 
644 	ll_iso_setup.group.cig_id = cig_id;
645 	ll_iso_setup.group.c_sdu_interval = c_interval;
646 	ll_iso_setup.group.p_sdu_interval = p_interval;
647 	ll_iso_setup.group.iso_interval = iso_interval;
648 	ll_iso_setup.group.central.sca = sca;
649 	ll_iso_setup.group.central.packing = packing;
650 	ll_iso_setup.group.central.framing = framing;
651 	ll_iso_setup.group.central.test = 1U;
652 	ll_iso_setup.cis_count = num_cis;
653 
654 	/* TODO: Perhaps move FT to LLL CIG */
655 	ll_iso_setup.c_ft = c_ft;
656 	ll_iso_setup.p_ft = p_ft;
657 
658 	return ll_cig_parameters_validate();
659 }
660 
ll_cis_parameters_test_set(uint8_t cis_id,uint8_t nse,uint16_t c_sdu,uint16_t p_sdu,uint16_t c_pdu,uint16_t p_pdu,uint8_t c_phy,uint8_t p_phy,uint8_t c_bn,uint8_t p_bn)661 uint8_t ll_cis_parameters_test_set(uint8_t cis_id, uint8_t nse,
662 				   uint16_t c_sdu, uint16_t p_sdu,
663 				   uint16_t c_pdu, uint16_t p_pdu,
664 				   uint8_t c_phy, uint8_t p_phy,
665 				   uint8_t c_bn, uint8_t p_bn)
666 {
667 	uint8_t cis_idx = ll_iso_setup.cis_idx;
668 	uint8_t status;
669 
670 	status = ll_cis_parameters_validate(cis_idx, cis_id, c_sdu, p_sdu, c_phy, p_phy);
671 	if (status) {
672 		return status;
673 	}
674 
675 	memset(&ll_iso_setup.stream[cis_idx], 0, sizeof(struct ll_conn_iso_stream));
676 
677 	ll_iso_setup.stream[cis_idx].cis_id = cis_id;
678 	ll_iso_setup.stream[cis_idx].c_max_sdu = c_sdu;
679 	ll_iso_setup.stream[cis_idx].p_max_sdu = p_sdu;
680 	ll_iso_setup.stream[cis_idx].lll.nse = nse;
681 	ll_iso_setup.stream[cis_idx].lll.tx.max_pdu = c_bn ? c_pdu : 0U;
682 	ll_iso_setup.stream[cis_idx].lll.rx.max_pdu = p_bn ? p_pdu : 0U;
683 	ll_iso_setup.stream[cis_idx].lll.tx.phy = c_phy;
684 	ll_iso_setup.stream[cis_idx].lll.tx.phy_flags = PHY_FLAGS_S8;
685 	ll_iso_setup.stream[cis_idx].lll.rx.phy = p_phy;
686 	ll_iso_setup.stream[cis_idx].lll.rx.phy_flags = PHY_FLAGS_S8;
687 	ll_iso_setup.stream[cis_idx].lll.tx.bn = c_bn;
688 	ll_iso_setup.stream[cis_idx].lll.rx.bn = p_bn;
689 	ll_iso_setup.cis_idx++;
690 
691 	return BT_HCI_ERR_SUCCESS;
692 }
693 
ll_cis_create_check(uint16_t cis_handle,uint16_t acl_handle)694 uint8_t ll_cis_create_check(uint16_t cis_handle, uint16_t acl_handle)
695 {
696 	struct ll_conn *conn;
697 
698 	conn = ll_connected_get(acl_handle);
699 	if (conn) {
700 		struct ll_conn_iso_stream *cis;
701 
702 		/* Verify conn refers to a device acting as central */
703 		if (conn->lll.role != BT_HCI_ROLE_CENTRAL) {
704 			return BT_HCI_ERR_CMD_DISALLOWED;
705 		}
706 
707 		/* Verify handle validity and association */
708 		cis = ll_conn_iso_stream_get(cis_handle);
709 
710 		if (cis->group && (cis->lll.handle == cis_handle)) {
711 			if (cis->established) {
712 				/* CIS is already created */
713 				return BT_HCI_ERR_CONN_ALREADY_EXISTS;
714 			}
715 
716 			return BT_HCI_ERR_SUCCESS;
717 		}
718 	}
719 
720 	return BT_HCI_ERR_UNKNOWN_CONN_ID;
721 }
722 
ll_cis_create(uint16_t cis_handle,uint16_t acl_handle)723 void ll_cis_create(uint16_t cis_handle, uint16_t acl_handle)
724 {
725 	struct ll_conn_iso_stream *cis;
726 	struct ll_conn *conn;
727 	int err;
728 
729 	/* Handles have been verified prior to calling this function */
730 	conn = ll_connected_get(acl_handle);
731 	cis = ll_conn_iso_stream_get(cis_handle);
732 	cis->lll.acl_handle = acl_handle;
733 
734 	/* Create access address */
735 	err = util_aa_le32(cis->lll.access_addr);
736 	LL_ASSERT(!err);
737 
738 	/* Initialize stream states */
739 	cis->established = 0;
740 	cis->teardown = 0;
741 
742 	(void)memset(&cis->hdr, 0U, sizeof(cis->hdr));
743 
744 	/* Initialize TX link */
745 	if (!cis->lll.link_tx_free) {
746 		cis->lll.link_tx_free = &cis->lll.link_tx;
747 	}
748 
749 	memq_init(cis->lll.link_tx_free, &cis->lll.memq_tx.head, &cis->lll.memq_tx.tail);
750 	cis->lll.link_tx_free = NULL;
751 
752 	/* Initiate CIS Request Control Procedure */
753 	if (ull_cp_cis_create(conn, cis) == BT_HCI_ERR_SUCCESS) {
754 		LL_ASSERT(cis->group);
755 
756 		if (cis->group->state == CIG_STATE_CONFIGURABLE) {
757 			/* This CIG is now initiating an ISO connection */
758 			cis->group->state = CIG_STATE_INITIATING;
759 		}
760 	}
761 }
762 
763 /* Core 5.3 Vol 6, Part B section 7.8.100:
764  * The HCI_LE_Remove_CIG command is used by the Central’s Host to remove the CIG
765  * identified by CIG_ID.
766  * This command shall delete the CIG_ID and also delete the Connection_Handles
767  * of the CIS configurations stored in the CIG.
768  * This command shall also remove the isochronous data paths that are associated
769  * with the Connection_Handles of the CIS configurations.
770  */
ll_cig_remove(uint8_t cig_id)771 uint8_t ll_cig_remove(uint8_t cig_id)
772 {
773 	struct ll_conn_iso_stream *cis;
774 	struct ll_conn_iso_group *cig;
775 	uint16_t handle_iter;
776 
777 	cig = ll_conn_iso_group_get_by_id(cig_id);
778 	if (!cig) {
779 		/* Unknown CIG id */
780 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
781 	}
782 
783 	if ((cig->state == CIG_STATE_INITIATING) || (cig->state == CIG_STATE_ACTIVE)) {
784 		/* CIG is in initiating- or active state */
785 		return BT_HCI_ERR_CMD_DISALLOWED;
786 	}
787 
788 	handle_iter = UINT16_MAX;
789 	for (uint8_t i = 0U; i < cig->lll.num_cis; i++)  {
790 		struct ll_conn *conn;
791 
792 		cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
793 		if (!cis) {
794 			break;
795 		}
796 
797 		conn = ll_connected_get(cis->lll.acl_handle);
798 
799 		if (conn) {
800 			if (ull_lp_cc_is_active(conn)) {
801 				/* CIG creation is ongoing */
802 				return BT_HCI_ERR_CMD_DISALLOWED;
803 			}
804 		}
805 	}
806 
807 	/* CIG exists and is not active */
808 	handle_iter = UINT16_MAX;
809 
810 	for (uint8_t i = 0U; i < cig->lll.num_cis; i++)  {
811 		cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
812 		if (cis) {
813 			/* Release CIS instance */
814 			ll_conn_iso_stream_release(cis);
815 		}
816 	}
817 
818 	/* Release the CIG instance */
819 	ll_conn_iso_group_release(cig);
820 
821 	return BT_HCI_ERR_SUCCESS;
822 }
823 
ull_central_iso_init(void)824 int ull_central_iso_init(void)
825 {
826 	return 0;
827 }
828 
ull_central_iso_reset(void)829 int ull_central_iso_reset(void)
830 {
831 	return 0;
832 }
833 
ull_central_iso_setup(uint16_t cis_handle,uint32_t * cig_sync_delay,uint32_t * cis_sync_delay,uint32_t * cis_offset_min,uint32_t * cis_offset_max,uint16_t * conn_event_count,uint8_t * access_addr)834 uint8_t ull_central_iso_setup(uint16_t cis_handle,
835 			      uint32_t *cig_sync_delay,
836 			      uint32_t *cis_sync_delay,
837 			      uint32_t *cis_offset_min,
838 			      uint32_t *cis_offset_max,
839 			      uint16_t *conn_event_count,
840 			      uint8_t  *access_addr)
841 {
842 	struct ll_conn_iso_stream *cis;
843 	struct ll_conn_iso_group *cig;
844 	uint16_t event_counter;
845 	struct ll_conn *conn;
846 	uint16_t instant;
847 
848 	cis = ll_conn_iso_stream_get(cis_handle);
849 	if (!cis) {
850 		return BT_HCI_ERR_UNSPECIFIED;
851 	}
852 
853 	cig = cis->group;
854 	if (!cig) {
855 		return BT_HCI_ERR_UNSPECIFIED;
856 	}
857 
858 	/* ACL connection of the new CIS */
859 	conn = ll_conn_get(cis->lll.acl_handle);
860 	event_counter = ull_conn_event_counter(conn);
861 	instant = MAX(*conn_event_count, event_counter + 1);
862 
863 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
864 	uint32_t cis_offset;
865 
866 	cis_offset = *cis_offset_min;
867 
868 	/* Calculate offset for CIS */
869 	if (cig->state == CIG_STATE_ACTIVE) {
870 		uint32_t time_of_intant;
871 		uint32_t cig_ref_point;
872 
873 		/* CIG is started. Use the CIG reference point and latest ticks_at_expire
874 		 * for associated ACL, to calculate the offset.
875 		 * NOTE: The following calculations are done in a 32-bit time
876 		 * range with full consideration and expectation that the
877 		 * controller clock does not support the full 32-bit range in
878 		 * microseconds. However it is valid as the purpose is to
879 		 * calculate the difference and the spare higher order bits will
880 		 * ensure that no wrapping can occur before the termination
881 		 * condition of the while loop is met. Using time wrapping will
882 		 * complicate this.
883 		 */
884 		time_of_intant = HAL_TICKER_TICKS_TO_US(conn->llcp.prep.ticks_at_expire) +
885 				EVENT_OVERHEAD_START_US +
886 				((instant - event_counter) * conn->lll.interval * CONN_INT_UNIT_US);
887 
888 		cig_ref_point = cig->cig_ref_point;
889 		while (cig_ref_point < time_of_intant) {
890 			cig_ref_point += cig->iso_interval * ISO_INT_UNIT_US;
891 		}
892 
893 		cis_offset = (cig_ref_point - time_of_intant) +
894 			     (cig->sync_delay - cis->sync_delay);
895 
896 		/* We have to narrow down the min/max offset to the calculated value */
897 		*cis_offset_min = cis_offset;
898 		*cis_offset_max = cis_offset;
899 	}
900 
901 	cis->offset = cis_offset;
902 
903 #else /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
904 
905 	if (false) {
906 
907 #if defined(CONFIG_BT_CTLR_CENTRAL_SPACING)
908 	} else if (CONFIG_BT_CTLR_CENTRAL_SPACING > 0) {
909 		uint32_t cis_offset;
910 
911 		cis_offset = HAL_TICKER_TICKS_TO_US(conn->ull.ticks_slot) +
912 			     (EVENT_TICKER_RES_MARGIN_US << 1U);
913 
914 		cis_offset += cig->sync_delay - cis->sync_delay;
915 
916 		if (cis_offset < *cis_offset_min) {
917 			cis_offset = *cis_offset_min;
918 		}
919 
920 		cis->offset = cis_offset;
921 #endif /* CONFIG_BT_CTLR_CENTRAL_SPACING */
922 
923 	} else {
924 		cis->offset = *cis_offset_min;
925 	}
926 #endif /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
927 
928 	cis->central.instant = instant;
929 #if defined(CONFIG_BT_CTLR_ISOAL_PSN_IGNORE)
930 	cis->pkt_seq_num = 0U;
931 #endif /* CONFIG_BT_CTLR_ISOAL_PSN_IGNORE */
932 	cis->lll.event_count = LLL_CONN_ISO_EVENT_COUNT_MAX;
933 	cis->lll.next_subevent = 0U;
934 	cis->lll.tifs_us = conn->lll.tifs_cis_us;
935 	cis->lll.sn = 0U;
936 	cis->lll.nesn = 0U;
937 	cis->lll.cie = 0U;
938 	cis->lll.npi = 0U;
939 	cis->lll.flush = LLL_CIS_FLUSH_NONE;
940 	cis->lll.active = 0U;
941 	cis->lll.datapath_ready_rx = 0U;
942 	cis->lll.tx.payload_count = 0U;
943 	cis->lll.rx.payload_count = 0U;
944 
945 	cis->lll.tx.bn_curr = 1U;
946 	cis->lll.rx.bn_curr = 1U;
947 
948 	/* Transfer to caller */
949 	*cig_sync_delay = cig->sync_delay;
950 	*cis_sync_delay = cis->sync_delay;
951 	*cis_offset_min = cis->offset;
952 	memcpy(access_addr, cis->lll.access_addr, sizeof(cis->lll.access_addr));
953 
954 	*conn_event_count = instant;
955 
956 	return 0U;
957 }
958 
ull_central_iso_cis_offset_get(uint16_t cis_handle,uint32_t * cis_offset_min,uint32_t * cis_offset_max,uint16_t * conn_event_count)959 int ull_central_iso_cis_offset_get(uint16_t cis_handle,
960 				   uint32_t *cis_offset_min,
961 				   uint32_t *cis_offset_max,
962 				   uint16_t *conn_event_count)
963 {
964 	struct ll_conn_iso_stream *cis;
965 	struct ll_conn_iso_group *cig;
966 	struct ll_conn *conn;
967 
968 	cis = ll_conn_iso_stream_get(cis_handle);
969 	LL_ASSERT(cis);
970 
971 	conn = ll_conn_get(cis->lll.acl_handle);
972 
973 	cis->central.instant = ull_conn_event_counter(conn) + 3U;
974 	*conn_event_count = cis->central.instant;
975 
976 	/* Provide CIS offset range
977 	 * CIS_Offset_Max < (connInterval - (CIG_Sync_Delay + T_MSS))
978 	 */
979 	cig = cis->group;
980 	*cis_offset_max = (conn->lll.interval * CONN_INT_UNIT_US) -
981 			  cig->sync_delay;
982 
983 	if (IS_ENABLED(CONFIG_BT_CTLR_JIT_SCHEDULING)) {
984 		*cis_offset_min = MAX(CIS_MIN_OFFSET_MIN, EVENT_OVERHEAD_CIS_SETUP_US);
985 		return 0;
986 	}
987 
988 #if (CONFIG_BT_CTLR_CENTRAL_SPACING == 0)
989 	if (cig->state == CIG_STATE_ACTIVE) {
990 		cis_offset_get(cis);
991 	} else {
992 		cig_offset_get(cis);
993 	}
994 
995 	return -EBUSY;
996 #else /* CONFIG_BT_CTLR_CENTRAL_SPACING != 0 */
997 
998 	*cis_offset_min = HAL_TICKER_TICKS_TO_US(conn->ull.ticks_slot) +
999 			  (EVENT_TICKER_RES_MARGIN_US << 1U);
1000 
1001 	*cis_offset_min += cig->sync_delay - cis->sync_delay;
1002 
1003 	return 0;
1004 #endif /* CONFIG_BT_CTLR_CENTRAL_SPACING != 0 */
1005 }
1006 
1007 #if (CONFIG_BT_CTLR_CENTRAL_SPACING == 0)
cig_offset_get(struct ll_conn_iso_stream * cis)1008 static void cig_offset_get(struct ll_conn_iso_stream *cis)
1009 {
1010 	static memq_link_t link;
1011 	static struct mayfly mfy = {0, 0, &link, NULL, mfy_cig_offset_get};
1012 	uint32_t ret;
1013 
1014 	mfy.param = cis;
1015 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1,
1016 			     &mfy);
1017 	LL_ASSERT(!ret);
1018 }
1019 
mfy_cig_offset_get(void * param)1020 static void mfy_cig_offset_get(void *param)
1021 {
1022 	struct ll_conn_iso_stream *cis;
1023 	struct ll_conn_iso_group *cig;
1024 	uint32_t conn_interval_us;
1025 	uint32_t offset_limit_us;
1026 	uint32_t ticks_to_expire;
1027 	uint32_t offset_max_us;
1028 	uint32_t offset_min_us;
1029 	struct ll_conn *conn;
1030 	int err;
1031 
1032 	cis = param;
1033 	cig = cis->group;
1034 
1035 	/* Find a free offset that does not overlap other periodically scheduled
1036 	 * states/roles.
1037 	 */
1038 	err = ull_sched_conn_iso_free_offset_get(cig->ull.ticks_slot,
1039 						 &ticks_to_expire);
1040 	LL_ASSERT(!err);
1041 
1042 	/* Calculate the offset for the select CIS in the CIG */
1043 	offset_min_us = HAL_TICKER_TICKS_TO_US(ticks_to_expire) +
1044 			(EVENT_TICKER_RES_MARGIN_US << 2U);
1045 	offset_min_us += cig->sync_delay - cis->sync_delay;
1046 
1047 	/* Ensure the offset is not greater than the ACL interval, considering
1048 	 * the minimum CIS offset requirement.
1049 	 */
1050 	conn = ll_conn_get(cis->lll.acl_handle);
1051 	conn_interval_us = (uint32_t)conn->lll.interval * CONN_INT_UNIT_US;
1052 	offset_limit_us = conn_interval_us + PDU_CIS_OFFSET_MIN_US;
1053 	while (offset_min_us >= offset_limit_us) {
1054 		offset_min_us -= conn_interval_us;
1055 	}
1056 
1057 	offset_max_us = conn_interval_us - cig->sync_delay;
1058 
1059 	ull_cp_cc_offset_calc_reply(conn, offset_min_us, offset_max_us);
1060 }
1061 
cis_offset_get(struct ll_conn_iso_stream * cis)1062 static void cis_offset_get(struct ll_conn_iso_stream *cis)
1063 {
1064 	static memq_link_t link;
1065 	static struct mayfly mfy = {0, 0, &link, NULL, mfy_cis_offset_get};
1066 	uint32_t ret;
1067 
1068 	mfy.param = cis;
1069 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1,
1070 			     &mfy);
1071 	LL_ASSERT(!ret);
1072 }
1073 
mfy_cis_offset_get(void * param)1074 static void mfy_cis_offset_get(void *param)
1075 {
1076 	uint32_t elapsed_acl_us, elapsed_cig_us;
1077 	uint16_t latency_acl, latency_cig;
1078 	struct ll_conn_iso_stream *cis;
1079 	struct ll_conn_iso_group *cig;
1080 	uint32_t cig_remainder_us;
1081 	uint32_t acl_remainder_us;
1082 	uint32_t cig_interval_us;
1083 	uint32_t offset_limit_us;
1084 	uint32_t ticks_to_expire;
1085 	uint32_t ticks_current;
1086 	uint32_t offset_min_us;
1087 	struct ll_conn *conn;
1088 	uint32_t remainder;
1089 	uint8_t ticker_id;
1090 	uint16_t lazy;
1091 	uint8_t retry;
1092 	uint8_t id;
1093 
1094 	cis = param;
1095 	cig = cis->group;
1096 	ticker_id = TICKER_ID_CONN_ISO_BASE + ll_conn_iso_group_handle_get(cig);
1097 
1098 	id = TICKER_NULL;
1099 	ticks_to_expire = 0U;
1100 	ticks_current = 0U;
1101 
1102 	/* In the first iteration the actual ticks_current value is returned
1103 	 * which will be different from the initial value of 0 that is set.
1104 	 * Subsequent iterations should return the same ticks_current as the
1105 	 * reference tick.
1106 	 * In order to avoid infinite updates to ticker's reference due to any
1107 	 * race condition due to expiring tickers, we try upto 3 more times.
1108 	 * Hence, first iteration to get an actual ticks_current and 3 more as
1109 	 * retries when there could be race conditions that changes the value
1110 	 * of ticks_current.
1111 	 *
1112 	 * ticker_next_slot_get_ext() restarts iterating when updated value of
1113 	 * ticks_current is returned.
1114 	 */
1115 	retry = 4U;
1116 	do {
1117 		uint32_t volatile ret_cb;
1118 		uint32_t ticks_previous;
1119 		uint32_t ret;
1120 		bool success;
1121 
1122 		ticks_previous = ticks_current;
1123 
1124 		ret_cb = TICKER_STATUS_BUSY;
1125 		ret = ticker_next_slot_get_ext(TICKER_INSTANCE_ID_CTLR,
1126 					       TICKER_USER_ID_ULL_LOW,
1127 					       &id, &ticks_current,
1128 					       &ticks_to_expire, &remainder,
1129 					       &lazy, NULL, NULL,
1130 					       ticker_op_cb, (void *)&ret_cb);
1131 		if (ret == TICKER_STATUS_BUSY) {
1132 			/* Busy wait until Ticker Job is enabled after any Radio
1133 			 * event is done using the Radio hardware. Ticker Job
1134 			 * ISR is disabled during Radio events in LOW_LAT
1135 			 * feature to avoid Radio ISR latencies.
1136 			 */
1137 			while (ret_cb == TICKER_STATUS_BUSY) {
1138 				ticker_job_sched(TICKER_INSTANCE_ID_CTLR,
1139 						 TICKER_USER_ID_ULL_LOW);
1140 			}
1141 		}
1142 
1143 		success = (ret_cb == TICKER_STATUS_SUCCESS);
1144 		LL_ASSERT(success);
1145 
1146 		LL_ASSERT((ticks_current == ticks_previous) || retry--);
1147 
1148 		LL_ASSERT(id != TICKER_NULL);
1149 	} while (id != ticker_id);
1150 
1151 	/* Reduced a tick for negative remainder and return positive remainder
1152 	 * value.
1153 	 */
1154 	hal_ticker_remove_jitter(&ticks_to_expire, &remainder);
1155 	cig_remainder_us = remainder;
1156 
1157 	/* Add a tick for negative remainder and return positive remainder
1158 	 * value.
1159 	 */
1160 	conn = ll_conn_get(cis->lll.acl_handle);
1161 	remainder = conn->llcp.prep.remainder;
1162 	hal_ticker_add_jitter(&ticks_to_expire, &remainder);
1163 	acl_remainder_us = remainder;
1164 
1165 	/* Calculate the CIS offset in the CIG */
1166 	offset_min_us = HAL_TICKER_TICKS_TO_US(ticks_to_expire) +
1167 			cig_remainder_us + cig->sync_delay -
1168 			acl_remainder_us - cis->sync_delay;
1169 
1170 	/* Calculate instant latency */
1171 	/* 32-bits are sufficient as maximum connection interval is 4 seconds,
1172 	 * and latency counts (typically 3) is low enough to avoid 32-bit
1173 	 * overflow. Refer to ull_central_iso_cis_offset_get().
1174 	 */
1175 	latency_acl = cis->central.instant - ull_conn_event_counter(conn);
1176 	elapsed_acl_us = latency_acl * conn->lll.interval * CONN_INT_UNIT_US;
1177 
1178 	/* Calculate elapsed CIG intervals until the instant */
1179 	cig_interval_us = cig->iso_interval * ISO_INT_UNIT_US;
1180 	latency_cig = DIV_ROUND_UP(elapsed_acl_us, cig_interval_us);
1181 	elapsed_cig_us = latency_cig * cig_interval_us;
1182 
1183 	/* Compensate for the difference between ACL elapsed vs CIG elapsed */
1184 	offset_min_us += elapsed_cig_us - elapsed_acl_us;
1185 
1186 	/* Ensure that the minimum offset is not greater than ISO interval
1187 	 * considering the select CIS in the CIG meets the minimum CIS offset
1188 	 * requirement.
1189 	 */
1190 	offset_limit_us = cig_interval_us + cig->sync_delay - cis->sync_delay;
1191 	while (offset_min_us >= offset_limit_us) {
1192 		offset_min_us -= cig_interval_us;
1193 	}
1194 
1195 	/* Decrement event_count to compensate for offset_min_us greater than
1196 	 * CIG interval.
1197 	 */
1198 	if (offset_min_us > cig_interval_us) {
1199 		cis->lll.event_count--;
1200 	}
1201 
1202 	ull_cp_cc_offset_calc_reply(conn, offset_min_us, offset_min_us);
1203 }
1204 
ticker_op_cb(uint32_t status,void * param)1205 static void ticker_op_cb(uint32_t status, void *param)
1206 {
1207 	*((uint32_t volatile *)param) = status;
1208 }
1209 #endif /* CONFIG_BT_CTLR_CENTRAL_SPACING  == 0 */
1210 
iso_interval_adjusted_bn_max_pdu_get(bool framed,uint32_t iso_interval,uint32_t iso_interval_cig,uint32_t sdu_interval,uint16_t max_sdu,uint8_t * bn,uint8_t * max_pdu)1211 static uint32_t iso_interval_adjusted_bn_max_pdu_get(bool framed, uint32_t iso_interval,
1212 						     uint32_t iso_interval_cig,
1213 						     uint32_t sdu_interval,
1214 						     uint16_t max_sdu, uint8_t *bn,
1215 						     uint8_t *max_pdu)
1216 {
1217 	if (framed) {
1218 		uint32_t max_drift_us;
1219 		uint32_t ceil_f;
1220 
1221 		/* BT Core 5.4 Vol 6, Part G, Section 2.2:
1222 		 *   Max_PDU >= ((ceil(F) x 5 + ceil(F x Max_SDU)) / BN) + 2
1223 		 *   F = (1 + MaxDrift) x ISO_Interval / SDU_Interval
1224 		 *   SegmentationHeader + TimeOffset = 5 bytes
1225 		 *   Continuation header = 2 bytes
1226 		 *   MaxDrift (Max. allowed SDU delivery timing drift) = 100 ppm
1227 		 */
1228 		max_drift_us = DIV_ROUND_UP(SDU_MAX_DRIFT_PPM * sdu_interval, USEC_PER_SEC);
1229 		ceil_f = DIV_ROUND_UP((USEC_PER_SEC + max_drift_us) * (uint64_t)iso_interval,
1230 				       USEC_PER_SEC * (uint64_t)sdu_interval);
1231 		if (false) {
1232 #if defined(CONFIG_BT_CTLR_CONN_ISO_AVOID_SEGMENTATION)
1233 		/* To avoid segmentation according to HAP, if the ISO_Interval is less than
1234 		 * the SDU_Interval, we assume BN=1 and calculate the Max_PDU as:
1235 		 *     Max_PDU = celi(F / BN) x (5 / Max_SDU)
1236 		 *
1237 		 * This is in accordance with the "Core enhancement for ISOAL CR".
1238 		 *
1239 		 * This ensures that the drift can be contained in the difference between
1240 		 * SDU_Interval and link bandwidth. For BN=1, ceil(F) == ceil(F/BN).
1241 		 */
1242 		} else if (iso_interval < sdu_interval) {
1243 			*bn = 1;
1244 			*max_pdu = ceil_f * (PDU_ISO_SEG_HDR_SIZE + PDU_ISO_SEG_TIMEOFFSET_SIZE +
1245 					     max_sdu);
1246 #endif
1247 		} else {
1248 			uint32_t ceil_f_x_max_sdu;
1249 			uint16_t max_pdu_bn1;
1250 
1251 			ceil_f_x_max_sdu = DIV_ROUND_UP(max_sdu * ((USEC_PER_SEC + max_drift_us) *
1252 								   (uint64_t)iso_interval),
1253 							USEC_PER_SEC * (uint64_t)sdu_interval);
1254 
1255 			/* Strategy: Keep lowest possible BN.
1256 			 * TODO: Implement other strategies, possibly as policies.
1257 			 */
1258 			max_pdu_bn1 = ceil_f * (PDU_ISO_SEG_HDR_SIZE +
1259 						PDU_ISO_SEG_TIMEOFFSET_SIZE) + ceil_f_x_max_sdu;
1260 			*bn = DIV_ROUND_UP(max_pdu_bn1, LL_CIS_OCTETS_TX_MAX);
1261 			*max_pdu = DIV_ROUND_UP(max_pdu_bn1, *bn) + PDU_ISO_SEG_HDR_SIZE;
1262 		}
1263 	} else {
1264 		/* For unframed, ISO_Interval must be N x SDU_Interval */
1265 		if ((iso_interval % sdu_interval) != 0) {
1266 			/* The requested ISO interval is doubled until it is multiple of
1267 			 * SDU_interval.
1268 			 * For example, between 7.5 and 10 ms, 7.5 is added in iterations to reach
1269 			 * 30 ms ISO interval; or between 10 and 7.5 ms, 10 is added in iterations
1270 			 * to reach the same 30 ms ISO interval.
1271 			 */
1272 			iso_interval += iso_interval_cig;
1273 		}
1274 
1275 		/* Core 5.3 Vol 6, Part G section 2.1:
1276 		 * BN >= ceil(Max_SDU/Max_PDU * ISO_Interval/SDU_Interval)
1277 		 */
1278 		*bn = DIV_ROUND_UP(max_sdu * iso_interval, (*max_pdu) * sdu_interval);
1279 	}
1280 
1281 	return iso_interval;
1282 }
1283 
ll_cig_parameters_validate(void)1284 static uint8_t ll_cig_parameters_validate(void)
1285 {
1286 	if (ll_iso_setup.cis_count > BT_HCI_ISO_CIS_COUNT_MAX) {
1287 		/* Invalid CIS_Count */
1288 		return BT_HCI_ERR_INVALID_PARAM;
1289 	}
1290 
1291 	if (ll_iso_setup.group.cig_id > BT_HCI_ISO_CIG_ID_MAX) {
1292 		/* Invalid CIG_ID */
1293 		return BT_HCI_ERR_INVALID_PARAM;
1294 	}
1295 
1296 	if (!IN_RANGE(ll_iso_setup.group.c_sdu_interval, BT_HCI_ISO_SDU_INTERVAL_MIN,
1297 		      BT_HCI_ISO_SDU_INTERVAL_MAX) ||
1298 	    !IN_RANGE(ll_iso_setup.group.p_sdu_interval, BT_HCI_ISO_SDU_INTERVAL_MIN,
1299 		      BT_HCI_ISO_SDU_INTERVAL_MAX)) {
1300 		/* Parameter out of range */
1301 		return BT_HCI_ERR_INVALID_PARAM;
1302 	}
1303 
1304 	if (ll_iso_setup.group.central.test) {
1305 		if (!IN_RANGE(ll_iso_setup.group.iso_interval,
1306 			      BT_HCI_ISO_INTERVAL_MIN, BT_HCI_ISO_INTERVAL_MAX)) {
1307 			/* Parameter out of range */
1308 			return BT_HCI_ERR_INVALID_PARAM;
1309 		}
1310 	} else {
1311 		if (!IN_RANGE(ll_iso_setup.group.c_latency,
1312 			      BT_HCI_ISO_MAX_TRANSPORT_LATENCY_MIN * USEC_PER_MSEC,
1313 			      BT_HCI_ISO_MAX_TRANSPORT_LATENCY_MAX * USEC_PER_MSEC) ||
1314 		    !IN_RANGE(ll_iso_setup.group.p_latency,
1315 			      BT_HCI_ISO_MAX_TRANSPORT_LATENCY_MIN * USEC_PER_MSEC,
1316 			      BT_HCI_ISO_MAX_TRANSPORT_LATENCY_MAX * USEC_PER_MSEC)) {
1317 			/* Parameter out of range */
1318 			return BT_HCI_ERR_INVALID_PARAM;
1319 		}
1320 	}
1321 
1322 	if (((ll_iso_setup.group.central.sca & ~BT_HCI_ISO_WORST_CASE_SCA_VALID_MASK) != 0U) ||
1323 	    ((ll_iso_setup.group.central.packing & ~BT_HCI_ISO_PACKING_VALID_MASK) != 0U) ||
1324 	    ((ll_iso_setup.group.central.framing & ~BT_HCI_ISO_FRAMING_VALID_MASK) != 0U)) {
1325 		/* Worst_Case_SCA, Packing or Framing sets RFU value */
1326 		return BT_HCI_ERR_INVALID_PARAM;
1327 	}
1328 
1329 	if (ll_iso_setup.cis_count > STREAMS_PER_GROUP) {
1330 		/* Requested number of CISes not available by configuration. Check as last
1331 		 * to avoid interfering with qualification parameter checks.
1332 		 */
1333 		return BT_HCI_ERR_CONN_LIMIT_EXCEEDED;
1334 	}
1335 
1336 	return BT_HCI_ERR_SUCCESS;
1337 }
1338 
ll_cis_parameters_validate(uint8_t cis_idx,uint8_t cis_id,uint16_t c_sdu,uint16_t p_sdu,uint16_t c_phy,uint16_t p_phy)1339 static uint8_t ll_cis_parameters_validate(uint8_t cis_idx, uint8_t cis_id,
1340 					  uint16_t c_sdu, uint16_t p_sdu,
1341 					  uint16_t c_phy, uint16_t p_phy)
1342 {
1343 	if ((cis_id > BT_HCI_ISO_CIS_ID_VALID_MAX) ||
1344 	    ((c_sdu & ~BT_HCI_ISO_MAX_SDU_VALID_MASK) != 0U) ||
1345 	    ((p_sdu & ~BT_HCI_ISO_MAX_SDU_VALID_MASK) != 0U)) {
1346 		return BT_HCI_ERR_INVALID_PARAM;
1347 	}
1348 
1349 	if (!c_phy || ((c_phy & ~PHY_VALID_MASK) != 0U) ||
1350 	    !p_phy || ((p_phy & ~PHY_VALID_MASK) != 0U)) {
1351 		return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
1352 	}
1353 
1354 	if (cis_idx >= STREAMS_PER_GROUP) {
1355 		return BT_HCI_ERR_CONN_LIMIT_EXCEEDED;
1356 	}
1357 
1358 	return BT_HCI_ERR_SUCCESS;
1359 }
1360 
1361 #if defined(CONFIG_BT_CTLR_CONN_ISO_RELIABILITY_POLICY)
ll_cis_calculate_ft(uint32_t cig_sync_delay,uint32_t iso_interval_us,uint32_t sdu_interval,uint32_t latency,uint8_t framed)1362 static uint8_t ll_cis_calculate_ft(uint32_t cig_sync_delay, uint32_t iso_interval_us,
1363 				   uint32_t sdu_interval, uint32_t latency, uint8_t framed)
1364 {
1365 	uint32_t tl;
1366 
1367 	/* Framed:
1368 	 *   TL = CIG_Sync_Delay + FT x ISO_Interval + SDU_Interval
1369 	 *
1370 	 * Unframed:
1371 	 *   TL = CIG_Sync_Delay + FT x ISO_Interval - SDU_Interval
1372 	 */
1373 	for (uint16_t ft = 1U; ft <= CONFIG_BT_CTLR_CONN_ISO_STREAMS_MAX_FT; ft++) {
1374 		if (framed) {
1375 			tl = cig_sync_delay + ft * iso_interval_us + sdu_interval;
1376 		} else {
1377 			tl = cig_sync_delay + ft * iso_interval_us - sdu_interval;
1378 		}
1379 
1380 		if (tl > latency) {
1381 			/* Latency exceeded - use one less */
1382 			return ft - 1U;
1383 		}
1384 	}
1385 
1386 	return 0;
1387 }
1388 #endif /* CONFIG_BT_CTLR_CONN_ISO_RELIABILITY_POLICY */
1389