1 /*
2 * Copyright (c) 2020 Demant
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/sys/byteorder.h>
9
10 #include <zephyr/bluetooth/addr.h>
11 #include <zephyr/bluetooth/iso.h>
12
13 #include "util/util.h"
14 #include "util/memq.h"
15 #include "util/mayfly.h"
16 #include "util/dbuf.h"
17
18 #include "hal/ccm.h"
19 #include "hal/ticker.h"
20
21 #include "ticker/ticker.h"
22
23 #include "pdu_df.h"
24 #include "lll/pdu_vendor.h"
25 #include "pdu.h"
26
27 #include "lll.h"
28 #include "lll/lll_vendor.h"
29 #include "lll_clock.h"
30 #include "lll/lll_df_types.h"
31 #include "lll_conn.h"
32 #include "lll_conn_iso.h"
33 #include "lll_central_iso.h"
34
35 #include "isoal.h"
36
37 #include "ull_tx_queue.h"
38
39 #include "ull_conn_types.h"
40 #include "ull_iso_types.h"
41 #include "ull_conn_iso_types.h"
42
43 #include "ull_llcp.h"
44
45 #include "ull_internal.h"
46 #include "ull_sched_internal.h"
47 #include "ull_conn_internal.h"
48 #include "ull_conn_iso_internal.h"
49
50 #include "ll.h"
51 #include "ll_feat.h"
52
53 #include <zephyr/bluetooth/hci_types.h>
54
55 #include "hal/debug.h"
56
57 #define SDU_MAX_DRIFT_PPM 100
58 #define SUB_INTERVAL_MIN 400
59
60 #define STREAMS_PER_GROUP CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP
61
62 #if defined(CONFIG_BT_CTLR_PHY_CODED)
63 #define PHY_VALID_MASK (BT_HCI_ISO_PHY_VALID_MASK)
64 #else
65 #define PHY_VALID_MASK (BT_HCI_ISO_PHY_VALID_MASK & ~BIT(2))
66 #endif
67
68 #if (CONFIG_BT_CTLR_CENTRAL_SPACING == 0)
69 static void cig_offset_get(struct ll_conn_iso_stream *cis);
70 static void mfy_cig_offset_get(void *param);
71 static void cis_offset_get(struct ll_conn_iso_stream *cis);
72 static void mfy_cis_offset_get(void *param);
73 static void ticker_op_cb(uint32_t status, void *param);
74 #endif /* CONFIG_BT_CTLR_CENTRAL_SPACING == 0 */
75
76 static void set_bn_max_pdu(bool framed, uint32_t iso_interval,
77 uint32_t sdu_interval, uint16_t max_sdu, uint8_t *bn,
78 uint8_t *max_pdu);
79 static uint8_t ll_cig_parameters_validate(void);
80 static uint8_t ll_cis_parameters_validate(uint8_t cis_idx, uint8_t cis_id,
81 uint16_t c_sdu, uint16_t p_sdu,
82 uint16_t c_phy, uint16_t p_phy);
83
84 #if defined(CONFIG_BT_CTLR_CONN_ISO_RELIABILITY_POLICY)
85 static uint8_t ll_cis_calculate_ft(uint32_t cig_sync_delay, uint32_t iso_interval_us,
86 uint32_t sdu_interval, uint32_t latency, uint8_t framed);
87 #endif /* CONFIG_BT_CTLR_CONN_ISO_RELIABILITY_POLICY */
88
89 /* Setup cache for CIG commit transaction */
90 static struct {
91 struct ll_conn_iso_group group;
92 uint8_t cis_count;
93 uint8_t c_ft;
94 uint8_t p_ft;
95 uint8_t cis_idx;
96 struct ll_conn_iso_stream stream[CONFIG_BT_CTLR_CONN_ISO_STREAMS_PER_GROUP];
97 } ll_iso_setup;
98
ll_cig_parameters_open(uint8_t cig_id,uint32_t c_interval,uint32_t p_interval,uint8_t sca,uint8_t packing,uint8_t framing,uint16_t c_latency,uint16_t p_latency,uint8_t num_cis)99 uint8_t ll_cig_parameters_open(uint8_t cig_id,
100 uint32_t c_interval, uint32_t p_interval,
101 uint8_t sca, uint8_t packing, uint8_t framing,
102 uint16_t c_latency, uint16_t p_latency,
103 uint8_t num_cis)
104 {
105 memset(&ll_iso_setup, 0, sizeof(ll_iso_setup));
106
107 ll_iso_setup.group.cig_id = cig_id;
108 ll_iso_setup.group.c_sdu_interval = c_interval;
109 ll_iso_setup.group.p_sdu_interval = p_interval;
110 ll_iso_setup.group.c_latency = c_latency * USEC_PER_MSEC;
111 ll_iso_setup.group.p_latency = p_latency * USEC_PER_MSEC;
112 ll_iso_setup.group.central.sca = sca;
113 ll_iso_setup.group.central.packing = packing;
114 ll_iso_setup.group.central.framing = framing;
115 ll_iso_setup.cis_count = num_cis;
116
117 return ll_cig_parameters_validate();
118 }
119
ll_cis_parameters_set(uint8_t cis_id,uint16_t c_sdu,uint16_t p_sdu,uint8_t c_phy,uint8_t p_phy,uint8_t c_rtn,uint8_t p_rtn)120 uint8_t ll_cis_parameters_set(uint8_t cis_id,
121 uint16_t c_sdu, uint16_t p_sdu,
122 uint8_t c_phy, uint8_t p_phy,
123 uint8_t c_rtn, uint8_t p_rtn)
124 {
125 uint8_t cis_idx = ll_iso_setup.cis_idx;
126 uint8_t status;
127
128 status = ll_cis_parameters_validate(cis_idx, cis_id, c_sdu, p_sdu, c_phy, p_phy);
129 if (status) {
130 return status;
131 }
132
133 memset(&ll_iso_setup.stream[cis_idx], 0, sizeof(struct ll_conn_iso_stream));
134
135 ll_iso_setup.stream[cis_idx].cis_id = cis_id;
136 ll_iso_setup.stream[cis_idx].c_max_sdu = c_sdu;
137 ll_iso_setup.stream[cis_idx].p_max_sdu = p_sdu;
138 ll_iso_setup.stream[cis_idx].lll.tx.phy = c_phy;
139 ll_iso_setup.stream[cis_idx].lll.tx.phy_flags = PHY_FLAGS_S8;
140 ll_iso_setup.stream[cis_idx].lll.rx.phy = p_phy;
141 ll_iso_setup.stream[cis_idx].lll.rx.phy_flags = PHY_FLAGS_S8;
142 ll_iso_setup.stream[cis_idx].central.c_rtn = c_rtn;
143 ll_iso_setup.stream[cis_idx].central.p_rtn = p_rtn;
144 ll_iso_setup.cis_idx++;
145
146 return BT_HCI_ERR_SUCCESS;
147 }
148
149 /* TODO:
150 * - Calculate ISO_Interval to allow SDU_Interval < ISO_Interval
151 */
ll_cig_parameters_commit(uint8_t cig_id,uint16_t * handles)152 uint8_t ll_cig_parameters_commit(uint8_t cig_id, uint16_t *handles)
153 {
154 uint16_t cis_created_handles[STREAMS_PER_GROUP];
155 struct ll_conn_iso_stream *cis;
156 struct ll_conn_iso_group *cig;
157 uint32_t iso_interval_us;
158 uint32_t cig_sync_delay;
159 uint32_t max_se_length;
160 uint32_t c_max_latency;
161 uint32_t p_max_latency;
162 uint16_t handle_iter;
163 uint32_t total_time;
164 bool force_framed;
165 bool cig_created;
166 uint8_t num_cis;
167 uint8_t err;
168
169 /* Intermediate subevent data */
170 struct {
171 uint32_t length;
172 uint8_t total_count;
173 } se[STREAMS_PER_GROUP];
174
175 for (uint8_t i = 0U; i < STREAMS_PER_GROUP; i++) {
176 cis_created_handles[i] = LLL_HANDLE_INVALID;
177 };
178
179 cig_created = false;
180
181 /* If CIG already exists, this is a reconfigure */
182 cig = ll_conn_iso_group_get_by_id(cig_id);
183 if (!cig) {
184 /* CIG does not exist - create it */
185 cig = ll_conn_iso_group_acquire();
186 if (!cig) {
187 ll_iso_setup.cis_idx = 0U;
188
189 /* No space for new CIG */
190 return BT_HCI_ERR_INSUFFICIENT_RESOURCES;
191 }
192 cig->lll.num_cis = 0U;
193 cig_created = true;
194
195 } else if (cig->state != CIG_STATE_CONFIGURABLE) {
196 /* CIG is not in configurable state */
197 return BT_HCI_ERR_CMD_DISALLOWED;
198 }
199
200 /* Store currently configured number of CISes before cache transfer */
201 num_cis = cig->lll.num_cis;
202
203 /* Transfer parameters from configuration cache and clear LLL fields */
204 memcpy(cig, &ll_iso_setup.group, sizeof(struct ll_conn_iso_group));
205
206 cig->state = CIG_STATE_CONFIGURABLE;
207
208 /* Setup LLL parameters */
209 cig->lll.handle = ll_conn_iso_group_handle_get(cig);
210 cig->lll.role = BT_HCI_ROLE_CENTRAL;
211 cig->lll.resume_cis = LLL_HANDLE_INVALID;
212 cig->lll.num_cis = num_cis;
213 force_framed = false;
214
215 if (!cig->central.test) {
216 /* TODO: Calculate ISO_Interval based on SDU_Interval and Max_SDU vs Max_PDU,
217 * taking the policy into consideration. It may also be interesting to select an
218 * ISO_Interval which is less likely to collide with other connections.
219 * For instance:
220 *
221 * SDU_Interval ISO_Interval Max_SDU Max_SDU Collision risk (10 ms)
222 * ------------------------------------------------------------------------
223 * 10 ms 10 ms 40 40 100%
224 * 10 ms 12.5 ms 40 50 25%
225 */
226
227 /* Set ISO_Interval to the closest lower value of SDU_Interval to be able to
228 * handle the throughput. For unframed these must be divisible, if they're not,
229 * framed mode must be forced.
230 */
231 cig->iso_interval = cig->c_sdu_interval / ISO_INT_UNIT_US;
232
233 if (cig->iso_interval < BT_HCI_ISO_INTERVAL_MIN) {
234 /* ISO_Interval is below minimum (5 ms) */
235 cig->iso_interval = BT_HCI_ISO_INTERVAL_MIN;
236 }
237
238 #if defined(CONFIG_BT_CTLR_CONN_ISO_AVOID_SEGMENTATION)
239 /* Check if this is a HAP usecase which requires higher link bandwidth to ensure
240 * segmentation is not invoked in ISO-AL.
241 */
242 if (cig->central.framing && cig->c_sdu_interval == 10000U) {
243 cig->iso_interval = 6; /* 7500 us */
244 }
245 #endif
246
247 if (!cig->central.framing && (cig->c_sdu_interval % ISO_INT_UNIT_US)) {
248 /* Framing not requested but requirement for unframed is not met. Force
249 * CIG into framed mode.
250 */
251 force_framed = true;
252 }
253 }
254
255 iso_interval_us = cig->iso_interval * ISO_INT_UNIT_US;
256 cig->lll.iso_interval_us = iso_interval_us;
257
258 lll_hdr_init(&cig->lll, cig);
259 max_se_length = 0U;
260
261 /* Create all configurable CISes */
262 for (uint8_t i = 0U; i < ll_iso_setup.cis_count; i++) {
263 memq_link_t *link_tx_free;
264 memq_link_t link_tx;
265
266 cis = ll_conn_iso_stream_get_by_id(ll_iso_setup.stream[i].cis_id);
267 if (cis) {
268 /* Check if Max_SDU reconfigure violates datapath by changing
269 * non-zero Max_SDU with associated datapath, to zero.
270 */
271 if ((cis->c_max_sdu && cis->hdr.datapath_in &&
272 !ll_iso_setup.stream[i].c_max_sdu) ||
273 (cis->p_max_sdu && cis->hdr.datapath_out &&
274 !ll_iso_setup.stream[i].p_max_sdu)) {
275 /* Reconfiguring CIS with datapath to wrong direction is
276 * not allowed.
277 */
278 err = BT_HCI_ERR_CMD_DISALLOWED;
279 goto ll_cig_parameters_commit_cleanup;
280 }
281 } else {
282 /* Acquire new CIS */
283 cis = ll_conn_iso_stream_acquire();
284 if (!cis) {
285 /* No space for new CIS */
286 ll_iso_setup.cis_idx = 0U;
287
288 err = BT_HCI_ERR_CONN_LIMIT_EXCEEDED;
289 goto ll_cig_parameters_commit_cleanup;
290 }
291
292 cis_created_handles[i] = ll_conn_iso_stream_handle_get(cis);
293 cig->lll.num_cis++;
294 }
295
296 /* Store TX link and free link before transfer */
297 link_tx_free = cis->lll.link_tx_free;
298 link_tx = cis->lll.link_tx;
299
300 /* Transfer parameters from configuration cache */
301 memcpy(cis, &ll_iso_setup.stream[i], sizeof(struct ll_conn_iso_stream));
302
303 cis->group = cig;
304 cis->framed = cig->central.framing || force_framed;
305
306 cis->lll.link_tx_free = link_tx_free;
307 cis->lll.link_tx = link_tx;
308 cis->lll.handle = ll_conn_iso_stream_handle_get(cis);
309 handles[i] = cis->lll.handle;
310 }
311
312 num_cis = cig->lll.num_cis;
313
314 ll_cig_parameters_commit_retry:
315 handle_iter = UINT16_MAX;
316
317 /* 1) Acquire CIS instances and initialize instance data.
318 * 2) Calculate SE_Length for each CIS and store the largest
319 * 3) Calculate BN
320 * 4) Calculate total number of subevents needed to transfer payloads
321 *
322 * Sequential Interleaved
323 * CIS0 ___█_█_█_____________█_ ___█___█___█_________█_
324 * CIS1 _________█_█_█_________ _____█___█___█_________
325 * CIS_Sub_Interval |.| |...|
326 * CIG_Sync_Delay |............| |............|
327 * CIS_Sync_Delay 0 |............| |............|
328 * CIS_Sync_Delay 1 |......| |..........|
329 * ISO_Interval |.................|.. |.................|..
330 */
331 for (uint8_t i = 0U; i < num_cis; i++) {
332 uint32_t mpt_c;
333 uint32_t mpt_p;
334 bool tx;
335 bool rx;
336
337 cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
338
339 if (cig->central.test) {
340 cis->lll.tx.ft = ll_iso_setup.c_ft;
341 cis->lll.rx.ft = ll_iso_setup.p_ft;
342
343 tx = cis->lll.tx.bn && cis->lll.tx.max_pdu;
344 rx = cis->lll.rx.bn && cis->lll.rx.max_pdu;
345 } else {
346 LL_ASSERT(cis->framed || iso_interval_us >= cig->c_sdu_interval);
347
348 tx = cig->c_sdu_interval && cis->c_max_sdu;
349 rx = cig->p_sdu_interval && cis->p_max_sdu;
350
351 /* Use Max_PDU = MIN(<buffer_size>, Max_SDU) as default.
352 * May be changed by set_bn_max_pdu.
353 */
354 cis->lll.tx.max_pdu = MIN(LL_CIS_OCTETS_TX_MAX,
355 cis->c_max_sdu);
356 cis->lll.rx.max_pdu = MIN(LL_CIS_OCTETS_RX_MAX,
357 cis->p_max_sdu);
358
359 /* Calculate BN and Max_PDU (framed) for both
360 * directions
361 */
362 if (tx) {
363 uint8_t max_pdu;
364 uint8_t bn;
365
366 bn = cis->lll.tx.bn;
367 max_pdu = cis->lll.tx.max_pdu;
368 set_bn_max_pdu(cis->framed, iso_interval_us,
369 cig->c_sdu_interval,
370 cis->c_max_sdu, &bn, &max_pdu);
371 cis->lll.tx.bn = bn;
372 cis->lll.tx.max_pdu = max_pdu;
373 } else {
374 cis->lll.tx.bn = 0U;
375 }
376
377 if (rx) {
378 uint8_t max_pdu;
379 uint8_t bn;
380
381 bn = cis->lll.rx.bn;
382 max_pdu = cis->lll.rx.max_pdu;
383 set_bn_max_pdu(cis->framed, iso_interval_us,
384 cig->p_sdu_interval,
385 cis->p_max_sdu, &bn, &max_pdu);
386 cis->lll.rx.bn = bn;
387 cis->lll.rx.max_pdu = max_pdu;
388 } else {
389 cis->lll.rx.bn = 0U;
390 }
391 }
392
393 /* Calculate SE_Length */
394 mpt_c = PDU_CIS_MAX_US(cis->lll.tx.max_pdu, tx, cis->lll.tx.phy);
395 mpt_p = PDU_CIS_MAX_US(cis->lll.rx.max_pdu, rx, cis->lll.rx.phy);
396
397 se[i].length = mpt_c + EVENT_IFS_US + mpt_p + EVENT_MSS_US;
398 max_se_length = MAX(max_se_length, se[i].length);
399
400 /* Total number of subevents needed */
401 se[i].total_count = MAX((cis->central.c_rtn + 1) * cis->lll.tx.bn,
402 (cis->central.p_rtn + 1) * cis->lll.rx.bn);
403 }
404
405 handle_iter = UINT16_MAX;
406 total_time = 0U;
407
408 /* 1) Prepare calculation of the flush timeout by adding up the total time needed to
409 * transfer all payloads, including retransmissions.
410 */
411 if (cig->central.packing == BT_ISO_PACKING_SEQUENTIAL) {
412 /* Sequential CISes - add up the total duration */
413 for (uint8_t i = 0U; i < num_cis; i++) {
414 total_time += se[i].total_count * se[i].length;
415 }
416 }
417
418 handle_iter = UINT16_MAX;
419 cig_sync_delay = 0U;
420
421 /* 1) Calculate the flush timeout either by dividing the total time needed to transfer all,
422 * payloads including retransmissions, and divide by the ISO_Interval (low latency
423 * policy), or by dividing the Max_Transmission_Latency by the ISO_Interval (reliability
424 * policy).
425 * 2) Calculate the number of subevents (NSE) by distributing total number of subevents into
426 * FT ISO_intervals.
427 * 3) Calculate subinterval as either individual CIS subinterval (sequential), or the
428 * largest SE_Length times number of CISes (interleaved). Min. subinterval is 400 us.
429 * 4) Calculate CIG_Sync_Delay
430 */
431 for (uint8_t i = 0U; i < num_cis; i++) {
432 cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
433
434 if (!cig->central.test) {
435 #if defined(CONFIG_BT_CTLR_CONN_ISO_LOW_LATENCY_POLICY)
436 /* TODO: Only implemented for sequential packing */
437 LL_ASSERT(cig->central.packing == BT_ISO_PACKING_SEQUENTIAL);
438
439 /* Use symmetric flush timeout */
440 cis->lll.tx.ft = DIV_ROUND_UP(total_time, iso_interval_us);
441 cis->lll.rx.ft = cis->lll.tx.ft;
442
443 #elif defined(CONFIG_BT_CTLR_CONN_ISO_RELIABILITY_POLICY)
444 /* Utilize Max_Transport_latency */
445
446 /*
447 * Set CIG_Sync_Delay = ISO_Interval as largest possible CIG_Sync_Delay.
448 * This favors utilizing as much as possible of the Max_Transport_latency,
449 * and spreads out payloads over multiple CIS events (if necessary).
450 */
451 uint32_t cig_sync_delay_us_max = iso_interval_us;
452
453 cis->lll.tx.ft = ll_cis_calculate_ft(cig_sync_delay_us_max, iso_interval_us,
454 cig->c_sdu_interval, cig->c_latency,
455 cis->framed);
456
457 cis->lll.rx.ft = ll_cis_calculate_ft(cig_sync_delay_us_max, iso_interval_us,
458 cig->p_sdu_interval, cig->p_latency,
459 cis->framed);
460
461 if ((cis->lll.tx.ft == 0U) || (cis->lll.rx.ft == 0U)) {
462 /* Invalid FT caused by invalid combination of parameters */
463 err = BT_HCI_ERR_INVALID_PARAM;
464 goto ll_cig_parameters_commit_cleanup;
465 }
466
467 #else
468 LL_ASSERT(0);
469 #endif
470 cis->lll.nse = DIV_ROUND_UP(se[i].total_count, cis->lll.tx.ft);
471 }
472
473 if (cig->central.packing == BT_ISO_PACKING_SEQUENTIAL) {
474 /* Accumulate CIG sync delay for sequential CISes */
475 cis->lll.sub_interval = MAX(SUB_INTERVAL_MIN, se[i].length);
476 cig_sync_delay += cis->lll.nse * cis->lll.sub_interval;
477 } else {
478 /* For interleaved CISes, offset each CIS by a fraction of a subinterval,
479 * positioning them evenly within the subinterval.
480 */
481 cis->lll.sub_interval = MAX(SUB_INTERVAL_MIN, num_cis * max_se_length);
482 cig_sync_delay = MAX(cig_sync_delay,
483 (cis->lll.nse * cis->lll.sub_interval) +
484 (i * cis->lll.sub_interval / num_cis));
485 }
486 }
487
488 cig->sync_delay = cig_sync_delay;
489
490 handle_iter = UINT16_MAX;
491 c_max_latency = 0U;
492 p_max_latency = 0U;
493
494 /* 1) Calculate transport latencies for each CIS and validate against Max_Transport_Latency.
495 * 2) Lay out CISes by updating CIS_Sync_Delay, distributing according to the packing.
496 */
497 for (uint8_t i = 0U; i < num_cis; i++) {
498 uint32_t c_latency;
499 uint32_t p_latency;
500
501 cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
502
503 if (cis->framed) {
504 /* Transport_Latency = CIG_Sync_Delay + FT x ISO_Interval + SDU_Interval */
505 c_latency = cig->sync_delay +
506 (cis->lll.tx.ft * iso_interval_us) +
507 cig->c_sdu_interval;
508 p_latency = cig->sync_delay +
509 (cis->lll.rx.ft * iso_interval_us) +
510 cig->p_sdu_interval;
511
512 } else {
513 /* Transport_Latency = CIG_Sync_Delay + FT x ISO_Interval - SDU_Interval */
514 c_latency = cig->sync_delay +
515 (cis->lll.tx.ft * iso_interval_us) -
516 cig->c_sdu_interval;
517 p_latency = cig->sync_delay +
518 (cis->lll.rx.ft * iso_interval_us) -
519 cig->p_sdu_interval;
520 }
521
522 if (!cig->central.test) {
523 /* Make sure specified Max_Transport_Latency is not exceeded */
524 if ((c_latency > cig->c_latency) || (p_latency > cig->p_latency)) {
525 /* Check if we can reduce RTN to meet requested latency */
526 if (!cis->central.c_rtn && !cis->central.p_rtn) {
527 /* Actual latency exceeds the Max. Transport Latency */
528 err = BT_HCI_ERR_INVALID_PARAM;
529
530 /* Release allocated resources and exit */
531 goto ll_cig_parameters_commit_cleanup;
532 }
533
534 /* Reduce the RTN to meet host requested latency.
535 * NOTE: Both central and peripheral retransmission is reduced for
536 * simplicity.
537 */
538 if (cis->central.c_rtn) {
539 cis->central.c_rtn--;
540 }
541 if (cis->central.p_rtn) {
542 cis->central.p_rtn--;
543 }
544
545 goto ll_cig_parameters_commit_retry;
546 }
547 }
548
549 c_max_latency = MAX(c_max_latency, c_latency);
550 p_max_latency = MAX(p_max_latency, p_latency);
551
552 if (cig->central.packing == BT_ISO_PACKING_SEQUENTIAL) {
553 /* Distribute CISes sequentially */
554 cis->sync_delay = cig_sync_delay;
555 cig_sync_delay -= cis->lll.nse * cis->lll.sub_interval;
556 } else {
557 /* Distribute CISes interleaved */
558 cis->sync_delay = cig_sync_delay;
559 cig_sync_delay -= (cis->lll.sub_interval / num_cis);
560 }
561
562 if (cis->lll.nse <= 1) {
563 cis->lll.sub_interval = 0U;
564 }
565 }
566
567 /* Update actual latency */
568 cig->c_latency = c_max_latency;
569 cig->p_latency = p_max_latency;
570
571 #if !defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
572 uint32_t slot_us;
573
574 /* CIG sync_delay has been calculated considering the configured
575 * packing.
576 */
577 slot_us = cig->sync_delay;
578
579 slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
580
581 /* Populate the ULL hdr with event timings overheads */
582 cig->ull.ticks_active_to_start = 0U;
583 cig->ull.ticks_prepare_to_start =
584 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
585 cig->ull.ticks_preempt_to_start =
586 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
587 cig->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
588 #endif /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
589
590 /* Reset params cache */
591 ll_iso_setup.cis_idx = 0U;
592
593 return BT_HCI_ERR_SUCCESS;
594
595 ll_cig_parameters_commit_cleanup:
596 /* Late configuration failure - clean up */
597 for (uint8_t i = 0U; i < ll_iso_setup.cis_count; i++) {
598 if (cis_created_handles[i] != LLL_HANDLE_INVALID) {
599 /* Release CIS instance created in failing configuration */
600 cis = ll_conn_iso_stream_get(cis_created_handles[i]);
601 ll_conn_iso_stream_release(cis);
602 } else {
603 break;
604 }
605 }
606
607 /* If CIG was created in this failed configuration - release it */
608 if (cig_created) {
609 ll_conn_iso_group_release(cig);
610 }
611
612 return err;
613 }
614
ll_cig_parameters_test_open(uint8_t cig_id,uint32_t c_interval,uint32_t p_interval,uint8_t c_ft,uint8_t p_ft,uint16_t iso_interval,uint8_t sca,uint8_t packing,uint8_t framing,uint8_t num_cis)615 uint8_t ll_cig_parameters_test_open(uint8_t cig_id, uint32_t c_interval,
616 uint32_t p_interval, uint8_t c_ft,
617 uint8_t p_ft, uint16_t iso_interval,
618 uint8_t sca, uint8_t packing,
619 uint8_t framing, uint8_t num_cis)
620 {
621 memset(&ll_iso_setup, 0, sizeof(ll_iso_setup));
622
623 ll_iso_setup.group.cig_id = cig_id;
624 ll_iso_setup.group.c_sdu_interval = c_interval;
625 ll_iso_setup.group.p_sdu_interval = p_interval;
626 ll_iso_setup.group.iso_interval = iso_interval;
627 ll_iso_setup.group.central.sca = sca;
628 ll_iso_setup.group.central.packing = packing;
629 ll_iso_setup.group.central.framing = framing;
630 ll_iso_setup.group.central.test = 1U;
631 ll_iso_setup.cis_count = num_cis;
632
633 /* TODO: Perhaps move FT to LLL CIG */
634 ll_iso_setup.c_ft = c_ft;
635 ll_iso_setup.p_ft = p_ft;
636
637 return ll_cig_parameters_validate();
638 }
639
ll_cis_parameters_test_set(uint8_t cis_id,uint8_t nse,uint16_t c_sdu,uint16_t p_sdu,uint16_t c_pdu,uint16_t p_pdu,uint8_t c_phy,uint8_t p_phy,uint8_t c_bn,uint8_t p_bn)640 uint8_t ll_cis_parameters_test_set(uint8_t cis_id, uint8_t nse,
641 uint16_t c_sdu, uint16_t p_sdu,
642 uint16_t c_pdu, uint16_t p_pdu,
643 uint8_t c_phy, uint8_t p_phy,
644 uint8_t c_bn, uint8_t p_bn)
645 {
646 uint8_t cis_idx = ll_iso_setup.cis_idx;
647 uint8_t status;
648
649 status = ll_cis_parameters_validate(cis_idx, cis_id, c_sdu, p_sdu, c_phy, p_phy);
650 if (status) {
651 return status;
652 }
653
654 memset(&ll_iso_setup.stream[cis_idx], 0, sizeof(struct ll_conn_iso_stream));
655
656 ll_iso_setup.stream[cis_idx].cis_id = cis_id;
657 ll_iso_setup.stream[cis_idx].c_max_sdu = c_sdu;
658 ll_iso_setup.stream[cis_idx].p_max_sdu = p_sdu;
659 ll_iso_setup.stream[cis_idx].lll.nse = nse;
660 ll_iso_setup.stream[cis_idx].lll.tx.max_pdu = c_bn ? c_pdu : 0U;
661 ll_iso_setup.stream[cis_idx].lll.rx.max_pdu = p_bn ? p_pdu : 0U;
662 ll_iso_setup.stream[cis_idx].lll.tx.phy = c_phy;
663 ll_iso_setup.stream[cis_idx].lll.tx.phy_flags = PHY_FLAGS_S8;
664 ll_iso_setup.stream[cis_idx].lll.rx.phy = p_phy;
665 ll_iso_setup.stream[cis_idx].lll.rx.phy_flags = PHY_FLAGS_S8;
666 ll_iso_setup.stream[cis_idx].lll.tx.bn = c_bn;
667 ll_iso_setup.stream[cis_idx].lll.rx.bn = p_bn;
668 ll_iso_setup.cis_idx++;
669
670 return BT_HCI_ERR_SUCCESS;
671 }
672
ll_cis_create_check(uint16_t cis_handle,uint16_t acl_handle)673 uint8_t ll_cis_create_check(uint16_t cis_handle, uint16_t acl_handle)
674 {
675 struct ll_conn *conn;
676
677 conn = ll_connected_get(acl_handle);
678 if (conn) {
679 struct ll_conn_iso_stream *cis;
680
681 /* Verify conn refers to a device acting as central */
682 if (conn->lll.role != BT_HCI_ROLE_CENTRAL) {
683 return BT_HCI_ERR_CMD_DISALLOWED;
684 }
685
686 /* Verify handle validity and association */
687 cis = ll_conn_iso_stream_get(cis_handle);
688
689 if (cis->group && (cis->lll.handle == cis_handle)) {
690 if (cis->established) {
691 /* CIS is already created */
692 return BT_HCI_ERR_CONN_ALREADY_EXISTS;
693 }
694
695 return BT_HCI_ERR_SUCCESS;
696 }
697 }
698
699 return BT_HCI_ERR_UNKNOWN_CONN_ID;
700 }
701
ll_cis_create(uint16_t cis_handle,uint16_t acl_handle)702 void ll_cis_create(uint16_t cis_handle, uint16_t acl_handle)
703 {
704 struct ll_conn_iso_stream *cis;
705 struct ll_conn *conn;
706 int err;
707
708 /* Handles have been verified prior to calling this function */
709 conn = ll_connected_get(acl_handle);
710 cis = ll_conn_iso_stream_get(cis_handle);
711 cis->lll.acl_handle = acl_handle;
712
713 /* Create access address */
714 err = util_aa_le32(cis->lll.access_addr);
715 LL_ASSERT(!err);
716
717 /* Initialize stream states */
718 cis->established = 0;
719 cis->teardown = 0;
720
721 (void)memset(&cis->hdr, 0U, sizeof(cis->hdr));
722
723 /* Initialize TX link */
724 if (!cis->lll.link_tx_free) {
725 cis->lll.link_tx_free = &cis->lll.link_tx;
726 }
727
728 memq_init(cis->lll.link_tx_free, &cis->lll.memq_tx.head, &cis->lll.memq_tx.tail);
729 cis->lll.link_tx_free = NULL;
730
731 /* Initiate CIS Request Control Procedure */
732 if (ull_cp_cis_create(conn, cis) == BT_HCI_ERR_SUCCESS) {
733 LL_ASSERT(cis->group);
734
735 if (cis->group->state == CIG_STATE_CONFIGURABLE) {
736 /* This CIG is now initiating an ISO connection */
737 cis->group->state = CIG_STATE_INITIATING;
738 }
739 }
740 }
741
742 /* Core 5.3 Vol 6, Part B section 7.8.100:
743 * The HCI_LE_Remove_CIG command is used by the Central’s Host to remove the CIG
744 * identified by CIG_ID.
745 * This command shall delete the CIG_ID and also delete the Connection_Handles
746 * of the CIS configurations stored in the CIG.
747 * This command shall also remove the isochronous data paths that are associated
748 * with the Connection_Handles of the CIS configurations.
749 */
ll_cig_remove(uint8_t cig_id)750 uint8_t ll_cig_remove(uint8_t cig_id)
751 {
752 struct ll_conn_iso_stream *cis;
753 struct ll_conn_iso_group *cig;
754 uint16_t handle_iter;
755
756 cig = ll_conn_iso_group_get_by_id(cig_id);
757 if (!cig) {
758 /* Unknown CIG id */
759 return BT_HCI_ERR_UNKNOWN_CONN_ID;
760 }
761
762 if ((cig->state == CIG_STATE_INITIATING) || (cig->state == CIG_STATE_ACTIVE)) {
763 /* CIG is in initiating- or active state */
764 return BT_HCI_ERR_CMD_DISALLOWED;
765 }
766
767 handle_iter = UINT16_MAX;
768 for (uint8_t i = 0U; i < cig->lll.num_cis; i++) {
769 struct ll_conn *conn;
770
771 cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
772 if (!cis) {
773 break;
774 }
775
776 conn = ll_connected_get(cis->lll.acl_handle);
777
778 if (conn) {
779 if (ull_lp_cc_is_active(conn)) {
780 /* CIG creation is ongoing */
781 return BT_HCI_ERR_CMD_DISALLOWED;
782 }
783 }
784 }
785
786 /* CIG exists and is not active */
787 handle_iter = UINT16_MAX;
788
789 for (uint8_t i = 0U; i < cig->lll.num_cis; i++) {
790 cis = ll_conn_iso_stream_get_by_group(cig, &handle_iter);
791 if (cis) {
792 /* Release CIS instance */
793 ll_conn_iso_stream_release(cis);
794 }
795 }
796
797 /* Release the CIG instance */
798 ll_conn_iso_group_release(cig);
799
800 return BT_HCI_ERR_SUCCESS;
801 }
802
ull_central_iso_init(void)803 int ull_central_iso_init(void)
804 {
805 return 0;
806 }
807
ull_central_iso_reset(void)808 int ull_central_iso_reset(void)
809 {
810 return 0;
811 }
812
ull_central_iso_setup(uint16_t cis_handle,uint32_t * cig_sync_delay,uint32_t * cis_sync_delay,uint32_t * cis_offset_min,uint32_t * cis_offset_max,uint16_t * conn_event_count,uint8_t * access_addr)813 uint8_t ull_central_iso_setup(uint16_t cis_handle,
814 uint32_t *cig_sync_delay,
815 uint32_t *cis_sync_delay,
816 uint32_t *cis_offset_min,
817 uint32_t *cis_offset_max,
818 uint16_t *conn_event_count,
819 uint8_t *access_addr)
820 {
821 struct ll_conn_iso_stream *cis;
822 struct ll_conn_iso_group *cig;
823 uint16_t event_counter;
824 struct ll_conn *conn;
825 uint16_t instant;
826
827 cis = ll_conn_iso_stream_get(cis_handle);
828 if (!cis) {
829 return BT_HCI_ERR_UNSPECIFIED;
830 }
831
832 cig = cis->group;
833 if (!cig) {
834 return BT_HCI_ERR_UNSPECIFIED;
835 }
836
837 /* ACL connection of the new CIS */
838 conn = ll_conn_get(cis->lll.acl_handle);
839 event_counter = ull_conn_event_counter(conn);
840 instant = MAX(*conn_event_count, event_counter + 1);
841
842 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
843 uint32_t cis_offset;
844
845 cis_offset = *cis_offset_min;
846
847 /* Calculate offset for CIS */
848 if (cig->state == CIG_STATE_ACTIVE) {
849 uint32_t time_of_intant;
850 uint32_t cig_ref_point;
851
852 /* CIG is started. Use the CIG reference point and latest ticks_at_expire
853 * for associated ACL, to calculate the offset.
854 * NOTE: The following calculations are done in a 32-bit time
855 * range with full consideration and expectation that the
856 * controller clock does not support the full 32-bit range in
857 * microseconds. However it is valid as the purpose is to
858 * calculate the difference and the spare higher order bits will
859 * ensure that no wrapping can occur before the termination
860 * condition of the while loop is met. Using time wrapping will
861 * complicate this.
862 */
863 time_of_intant = HAL_TICKER_TICKS_TO_US(conn->llcp.prep.ticks_at_expire) +
864 EVENT_OVERHEAD_START_US +
865 ((instant - event_counter) * conn->lll.interval * CONN_INT_UNIT_US);
866
867 cig_ref_point = cig->cig_ref_point;
868 while (cig_ref_point < time_of_intant) {
869 cig_ref_point += cig->iso_interval * ISO_INT_UNIT_US;
870 }
871
872 cis_offset = (cig_ref_point - time_of_intant) +
873 (cig->sync_delay - cis->sync_delay);
874
875 /* We have to narrow down the min/max offset to the calculated value */
876 *cis_offset_min = cis_offset;
877 *cis_offset_max = cis_offset;
878 }
879
880 cis->offset = cis_offset;
881
882 #else /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
883
884 if (false) {
885
886 #if defined(CONFIG_BT_CTLR_CENTRAL_SPACING)
887 } else if (CONFIG_BT_CTLR_CENTRAL_SPACING > 0) {
888 uint32_t cis_offset;
889
890 cis_offset = HAL_TICKER_TICKS_TO_US(conn->ull.ticks_slot) +
891 (EVENT_TICKER_RES_MARGIN_US << 1U);
892
893 cis_offset += cig->sync_delay - cis->sync_delay;
894
895 if (cis_offset < *cis_offset_min) {
896 cis_offset = *cis_offset_min;
897 }
898
899 cis->offset = cis_offset;
900 #endif /* CONFIG_BT_CTLR_CENTRAL_SPACING */
901
902 } else {
903 cis->offset = *cis_offset_min;
904 }
905 #endif /* !CONFIG_BT_CTLR_JIT_SCHEDULING */
906
907 cis->central.instant = instant;
908 #if defined(CONFIG_BT_CTLR_ISOAL_PSN_IGNORE)
909 cis->pkt_seq_num = 0U;
910 #endif /* CONFIG_BT_CTLR_ISOAL_PSN_IGNORE */
911 cis->lll.event_count = LLL_CONN_ISO_EVENT_COUNT_MAX;
912 cis->lll.next_subevent = 0U;
913 cis->lll.sn = 0U;
914 cis->lll.nesn = 0U;
915 cis->lll.cie = 0U;
916 cis->lll.npi = 0U;
917 cis->lll.flush = LLL_CIS_FLUSH_NONE;
918 cis->lll.active = 0U;
919 cis->lll.datapath_ready_rx = 0U;
920 cis->lll.tx.payload_count = 0U;
921 cis->lll.rx.payload_count = 0U;
922
923 cis->lll.tx.bn_curr = 1U;
924 cis->lll.rx.bn_curr = 1U;
925
926 /* Transfer to caller */
927 *cig_sync_delay = cig->sync_delay;
928 *cis_sync_delay = cis->sync_delay;
929 *cis_offset_min = cis->offset;
930 memcpy(access_addr, cis->lll.access_addr, sizeof(cis->lll.access_addr));
931
932 *conn_event_count = instant;
933
934 return 0U;
935 }
936
ull_central_iso_cis_offset_get(uint16_t cis_handle,uint32_t * cis_offset_min,uint32_t * cis_offset_max,uint16_t * conn_event_count)937 int ull_central_iso_cis_offset_get(uint16_t cis_handle,
938 uint32_t *cis_offset_min,
939 uint32_t *cis_offset_max,
940 uint16_t *conn_event_count)
941 {
942 struct ll_conn_iso_stream *cis;
943 struct ll_conn_iso_group *cig;
944 struct ll_conn *conn;
945
946 cis = ll_conn_iso_stream_get(cis_handle);
947 LL_ASSERT(cis);
948
949 conn = ll_conn_get(cis->lll.acl_handle);
950
951 cis->central.instant = ull_conn_event_counter(conn) + 3U;
952 *conn_event_count = cis->central.instant;
953
954 /* Provide CIS offset range
955 * CIS_Offset_Max < (connInterval - (CIG_Sync_Delay + T_MSS))
956 */
957 cig = cis->group;
958 *cis_offset_max = (conn->lll.interval * CONN_INT_UNIT_US) -
959 cig->sync_delay;
960
961 if (IS_ENABLED(CONFIG_BT_CTLR_JIT_SCHEDULING)) {
962 *cis_offset_min = MAX(CIS_MIN_OFFSET_MIN, EVENT_OVERHEAD_CIS_SETUP_US);
963 return 0;
964 }
965
966 #if (CONFIG_BT_CTLR_CENTRAL_SPACING == 0)
967 if (cig->state == CIG_STATE_ACTIVE) {
968 cis_offset_get(cis);
969 } else {
970 cig_offset_get(cis);
971 }
972
973 return -EBUSY;
974 #else /* CONFIG_BT_CTLR_CENTRAL_SPACING != 0 */
975
976 *cis_offset_min = HAL_TICKER_TICKS_TO_US(conn->ull.ticks_slot) +
977 (EVENT_TICKER_RES_MARGIN_US << 1U);
978
979 *cis_offset_min += cig->sync_delay - cis->sync_delay;
980
981 return 0;
982 #endif /* CONFIG_BT_CTLR_CENTRAL_SPACING != 0 */
983 }
984
985 #if (CONFIG_BT_CTLR_CENTRAL_SPACING == 0)
cig_offset_get(struct ll_conn_iso_stream * cis)986 static void cig_offset_get(struct ll_conn_iso_stream *cis)
987 {
988 static memq_link_t link;
989 static struct mayfly mfy = {0, 0, &link, NULL, mfy_cig_offset_get};
990 uint32_t ret;
991
992 mfy.param = cis;
993 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1,
994 &mfy);
995 LL_ASSERT(!ret);
996 }
997
mfy_cig_offset_get(void * param)998 static void mfy_cig_offset_get(void *param)
999 {
1000 struct ll_conn_iso_stream *cis;
1001 struct ll_conn_iso_group *cig;
1002 uint32_t conn_interval_us;
1003 uint32_t ticks_to_expire;
1004 uint32_t offset_max_us;
1005 uint32_t offset_min_us;
1006 struct ll_conn *conn;
1007 int err;
1008
1009 cis = param;
1010 cig = cis->group;
1011
1012 err = ull_sched_conn_iso_free_offset_get(cig->ull.ticks_slot,
1013 &ticks_to_expire);
1014 LL_ASSERT(!err);
1015
1016 offset_min_us = HAL_TICKER_TICKS_TO_US(ticks_to_expire) +
1017 (EVENT_TICKER_RES_MARGIN_US << 2U);
1018 offset_min_us += cig->sync_delay - cis->sync_delay;
1019
1020 conn = ll_conn_get(cis->lll.acl_handle);
1021 conn_interval_us = (uint32_t)conn->lll.interval * CONN_INT_UNIT_US;
1022 while (offset_min_us >= (conn_interval_us + PDU_CIS_OFFSET_MIN_US)) {
1023 offset_min_us -= conn_interval_us;
1024 }
1025
1026 offset_max_us = conn_interval_us - cig->sync_delay;
1027
1028 ull_cp_cc_offset_calc_reply(conn, offset_min_us, offset_max_us);
1029 }
1030
cis_offset_get(struct ll_conn_iso_stream * cis)1031 static void cis_offset_get(struct ll_conn_iso_stream *cis)
1032 {
1033 static memq_link_t link;
1034 static struct mayfly mfy = {0, 0, &link, NULL, mfy_cis_offset_get};
1035 uint32_t ret;
1036
1037 mfy.param = cis;
1038 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1,
1039 &mfy);
1040 LL_ASSERT(!ret);
1041 }
1042
mfy_cis_offset_get(void * param)1043 static void mfy_cis_offset_get(void *param)
1044 {
1045 uint32_t elapsed_acl_us, elapsed_cig_us;
1046 uint16_t latency_acl, latency_cig;
1047 struct ll_conn_iso_stream *cis;
1048 struct ll_conn_iso_group *cig;
1049 uint32_t cig_remainder_us;
1050 uint32_t acl_remainder_us;
1051 uint32_t cig_interval_us;
1052 uint32_t ticks_to_expire;
1053 uint32_t ticks_current;
1054 uint32_t offset_min_us;
1055 struct ll_conn *conn;
1056 uint32_t remainder;
1057 uint8_t ticker_id;
1058 uint16_t lazy;
1059 uint8_t retry;
1060 uint8_t id;
1061
1062 cis = param;
1063 cig = cis->group;
1064 ticker_id = TICKER_ID_CONN_ISO_BASE + ll_conn_iso_group_handle_get(cig);
1065
1066 id = TICKER_NULL;
1067 ticks_to_expire = 0U;
1068 ticks_current = 0U;
1069
1070 /* In the first iteration the actual ticks_current value is returned
1071 * which will be different from the initial value of 0 that is set.
1072 * Subsequent iterations should return the same ticks_current as the
1073 * reference tick.
1074 * In order to avoid infinite updates to ticker's reference due to any
1075 * race condition due to expiring tickers, we try upto 3 more times.
1076 * Hence, first iteration to get an actual ticks_current and 3 more as
1077 * retries when there could be race conditions that changes the value
1078 * of ticks_current.
1079 *
1080 * ticker_next_slot_get_ext() restarts iterating when updated value of
1081 * ticks_current is returned.
1082 */
1083 retry = 4U;
1084 do {
1085 uint32_t volatile ret_cb;
1086 uint32_t ticks_previous;
1087 uint32_t ret;
1088 bool success;
1089
1090 ticks_previous = ticks_current;
1091
1092 ret_cb = TICKER_STATUS_BUSY;
1093 ret = ticker_next_slot_get_ext(TICKER_INSTANCE_ID_CTLR,
1094 TICKER_USER_ID_ULL_LOW,
1095 &id, &ticks_current,
1096 &ticks_to_expire, &remainder,
1097 &lazy, NULL, NULL,
1098 ticker_op_cb, (void *)&ret_cb);
1099 if (ret == TICKER_STATUS_BUSY) {
1100 /* Busy wait until Ticker Job is enabled after any Radio
1101 * event is done using the Radio hardware. Ticker Job
1102 * ISR is disabled during Radio events in LOW_LAT
1103 * feature to avoid Radio ISR latencies.
1104 */
1105 while (ret_cb == TICKER_STATUS_BUSY) {
1106 ticker_job_sched(TICKER_INSTANCE_ID_CTLR,
1107 TICKER_USER_ID_ULL_LOW);
1108 }
1109 }
1110
1111 success = (ret_cb == TICKER_STATUS_SUCCESS);
1112 LL_ASSERT(success);
1113
1114 LL_ASSERT((ticks_current == ticks_previous) || retry--);
1115
1116 LL_ASSERT(id != TICKER_NULL);
1117 } while (id != ticker_id);
1118
1119 /* Reduced a tick for negative remainder and return positive remainder
1120 * value.
1121 */
1122 hal_ticker_remove_jitter(&ticks_to_expire, &remainder);
1123 cig_remainder_us = remainder;
1124
1125 /* Add a tick for negative remainder and return positive remainder
1126 * value.
1127 */
1128 conn = ll_conn_get(cis->lll.acl_handle);
1129 remainder = conn->llcp.prep.remainder;
1130 hal_ticker_add_jitter(&ticks_to_expire, &remainder);
1131 acl_remainder_us = remainder;
1132
1133 /* Calculate the CIS offset in the CIG */
1134 offset_min_us = HAL_TICKER_TICKS_TO_US(ticks_to_expire) +
1135 cig_remainder_us + cig->sync_delay -
1136 acl_remainder_us - cis->sync_delay;
1137
1138 /* Calculate instant latency */
1139 /* 32-bits are sufficient as maximum connection interval is 4 seconds,
1140 * and latency counts (typically 3) is low enough to avoid 32-bit
1141 * overflow. Refer to ull_central_iso_cis_offset_get().
1142 */
1143 latency_acl = cis->central.instant - ull_conn_event_counter(conn);
1144 elapsed_acl_us = latency_acl * conn->lll.interval * CONN_INT_UNIT_US;
1145
1146 /* Calculate elapsed CIG intervals until the instant */
1147 cig_interval_us = cig->iso_interval * ISO_INT_UNIT_US;
1148 latency_cig = DIV_ROUND_UP(elapsed_acl_us, cig_interval_us);
1149 elapsed_cig_us = latency_cig * cig_interval_us;
1150
1151 /* Compensate for the difference between ACL elapsed vs CIG elapsed */
1152 offset_min_us += elapsed_cig_us - elapsed_acl_us;
1153 while (offset_min_us >= (cig_interval_us + PDU_CIS_OFFSET_MIN_US)) {
1154 offset_min_us -= cig_interval_us;
1155 }
1156
1157 /* Decrement event_count to compensate for offset_min_us greater than
1158 * CIG interval due to offset being at least PDU_CIS_OFFSET_MIN_US.
1159 */
1160 if (offset_min_us > cig_interval_us) {
1161 cis->lll.event_count--;
1162 }
1163
1164 ull_cp_cc_offset_calc_reply(conn, offset_min_us, offset_min_us);
1165 }
1166
ticker_op_cb(uint32_t status,void * param)1167 static void ticker_op_cb(uint32_t status, void *param)
1168 {
1169 *((uint32_t volatile *)param) = status;
1170 }
1171 #endif /* CONFIG_BT_CTLR_CENTRAL_SPACING == 0 */
1172
set_bn_max_pdu(bool framed,uint32_t iso_interval,uint32_t sdu_interval,uint16_t max_sdu,uint8_t * bn,uint8_t * max_pdu)1173 static void set_bn_max_pdu(bool framed, uint32_t iso_interval,
1174 uint32_t sdu_interval, uint16_t max_sdu, uint8_t *bn,
1175 uint8_t *max_pdu)
1176 {
1177 if (framed) {
1178 uint32_t max_drift_us;
1179 uint32_t ceil_f;
1180
1181 /* BT Core 5.4 Vol 6, Part G, Section 2.2:
1182 * Max_PDU >= ((ceil(F) x 5 + ceil(F x Max_SDU)) / BN) + 2
1183 * F = (1 + MaxDrift) x ISO_Interval / SDU_Interval
1184 * SegmentationHeader + TimeOffset = 5 bytes
1185 * Continuation header = 2 bytes
1186 * MaxDrift (Max. allowed SDU delivery timing drift) = 100 ppm
1187 */
1188 max_drift_us = DIV_ROUND_UP(SDU_MAX_DRIFT_PPM * sdu_interval, USEC_PER_SEC);
1189 ceil_f = DIV_ROUND_UP((USEC_PER_SEC + max_drift_us) * (uint64_t)iso_interval,
1190 USEC_PER_SEC * (uint64_t)sdu_interval);
1191 if (false) {
1192 #if defined(CONFIG_BT_CTLR_CONN_ISO_AVOID_SEGMENTATION)
1193 /* To avoid segmentation according to HAP, if the ISO_Interval is less than
1194 * the SDU_Interval, we assume BN=1 and calculate the Max_PDU as:
1195 * Max_PDU = celi(F / BN) x (5 / Max_SDU)
1196 *
1197 * This is in accordance with the "Core enhancement for ISOAL CR".
1198 *
1199 * This ensures that the drift can be contained in the difference between
1200 * SDU_Interval and link bandwidth. For BN=1, ceil(F) == ceil(F/BN).
1201 */
1202 } else if (iso_interval < sdu_interval) {
1203 *bn = 1;
1204 *max_pdu = ceil_f * (PDU_ISO_SEG_HDR_SIZE + PDU_ISO_SEG_TIMEOFFSET_SIZE +
1205 max_sdu);
1206 #endif
1207 } else {
1208 uint32_t ceil_f_x_max_sdu;
1209 uint16_t max_pdu_bn1;
1210
1211 ceil_f_x_max_sdu = DIV_ROUND_UP(max_sdu * ((USEC_PER_SEC + max_drift_us) *
1212 (uint64_t)iso_interval),
1213 USEC_PER_SEC * (uint64_t)sdu_interval);
1214
1215 /* Strategy: Keep lowest possible BN.
1216 * TODO: Implement other strategies, possibly as policies.
1217 */
1218 max_pdu_bn1 = ceil_f * (PDU_ISO_SEG_HDR_SIZE +
1219 PDU_ISO_SEG_TIMEOFFSET_SIZE) + ceil_f_x_max_sdu;
1220 *bn = DIV_ROUND_UP(max_pdu_bn1, LL_CIS_OCTETS_TX_MAX);
1221 *max_pdu = DIV_ROUND_UP(max_pdu_bn1, *bn) + PDU_ISO_SEG_HDR_SIZE;
1222 }
1223 } else {
1224 /* For unframed, ISO_Interval must be N x SDU_Interval */
1225 LL_ASSERT(iso_interval % sdu_interval == 0);
1226
1227 /* Core 5.3 Vol 6, Part G section 2.1:
1228 * BN >= ceil(Max_SDU/Max_PDU * ISO_Interval/SDU_Interval)
1229 */
1230 *bn = DIV_ROUND_UP(max_sdu * iso_interval, (*max_pdu) * sdu_interval);
1231 }
1232 }
1233
ll_cig_parameters_validate(void)1234 static uint8_t ll_cig_parameters_validate(void)
1235 {
1236 if (ll_iso_setup.cis_count > BT_HCI_ISO_CIS_COUNT_MAX) {
1237 /* Invalid CIS_Count */
1238 return BT_HCI_ERR_INVALID_PARAM;
1239 }
1240
1241 if (ll_iso_setup.group.cig_id > BT_HCI_ISO_CIG_ID_MAX) {
1242 /* Invalid CIG_ID */
1243 return BT_HCI_ERR_INVALID_PARAM;
1244 }
1245
1246 if (!IN_RANGE(ll_iso_setup.group.c_sdu_interval, BT_HCI_ISO_SDU_INTERVAL_MIN,
1247 BT_HCI_ISO_SDU_INTERVAL_MAX) ||
1248 !IN_RANGE(ll_iso_setup.group.p_sdu_interval, BT_HCI_ISO_SDU_INTERVAL_MIN,
1249 BT_HCI_ISO_SDU_INTERVAL_MAX)) {
1250 /* Parameter out of range */
1251 return BT_HCI_ERR_INVALID_PARAM;
1252 }
1253
1254 if (ll_iso_setup.group.central.test) {
1255 if (!IN_RANGE(ll_iso_setup.group.iso_interval,
1256 BT_HCI_ISO_INTERVAL_MIN, BT_HCI_ISO_INTERVAL_MAX)) {
1257 /* Parameter out of range */
1258 return BT_HCI_ERR_INVALID_PARAM;
1259 }
1260 } else {
1261 if (!IN_RANGE(ll_iso_setup.group.c_latency,
1262 BT_HCI_ISO_MAX_TRANSPORT_LATENCY_MIN * USEC_PER_MSEC,
1263 BT_HCI_ISO_MAX_TRANSPORT_LATENCY_MAX * USEC_PER_MSEC) ||
1264 !IN_RANGE(ll_iso_setup.group.p_latency,
1265 BT_HCI_ISO_MAX_TRANSPORT_LATENCY_MIN * USEC_PER_MSEC,
1266 BT_HCI_ISO_MAX_TRANSPORT_LATENCY_MAX * USEC_PER_MSEC)) {
1267 /* Parameter out of range */
1268 return BT_HCI_ERR_INVALID_PARAM;
1269 }
1270 }
1271
1272 if (((ll_iso_setup.group.central.sca & ~BT_HCI_ISO_WORST_CASE_SCA_VALID_MASK) != 0U) ||
1273 ((ll_iso_setup.group.central.packing & ~BT_HCI_ISO_PACKING_VALID_MASK) != 0U) ||
1274 ((ll_iso_setup.group.central.framing & ~BT_HCI_ISO_FRAMING_VALID_MASK) != 0U)) {
1275 /* Worst_Case_SCA, Packing or Framing sets RFU value */
1276 return BT_HCI_ERR_INVALID_PARAM;
1277 }
1278
1279 if (ll_iso_setup.cis_count > STREAMS_PER_GROUP) {
1280 /* Requested number of CISes not available by configuration. Check as last
1281 * to avoid interfering with qualification parameter checks.
1282 */
1283 return BT_HCI_ERR_CONN_LIMIT_EXCEEDED;
1284 }
1285
1286 return BT_HCI_ERR_SUCCESS;
1287 }
1288
ll_cis_parameters_validate(uint8_t cis_idx,uint8_t cis_id,uint16_t c_sdu,uint16_t p_sdu,uint16_t c_phy,uint16_t p_phy)1289 static uint8_t ll_cis_parameters_validate(uint8_t cis_idx, uint8_t cis_id,
1290 uint16_t c_sdu, uint16_t p_sdu,
1291 uint16_t c_phy, uint16_t p_phy)
1292 {
1293 if ((cis_id > BT_HCI_ISO_CIS_ID_VALID_MAX) ||
1294 ((c_sdu & ~BT_HCI_ISO_MAX_SDU_VALID_MASK) != 0U) ||
1295 ((p_sdu & ~BT_HCI_ISO_MAX_SDU_VALID_MASK) != 0U)) {
1296 return BT_HCI_ERR_INVALID_PARAM;
1297 }
1298
1299 if (!c_phy || ((c_phy & ~PHY_VALID_MASK) != 0U) ||
1300 !p_phy || ((p_phy & ~PHY_VALID_MASK) != 0U)) {
1301 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
1302 }
1303
1304 if (cis_idx >= STREAMS_PER_GROUP) {
1305 return BT_HCI_ERR_CONN_LIMIT_EXCEEDED;
1306 }
1307
1308 return BT_HCI_ERR_SUCCESS;
1309 }
1310
1311 #if defined(CONFIG_BT_CTLR_CONN_ISO_RELIABILITY_POLICY)
ll_cis_calculate_ft(uint32_t cig_sync_delay,uint32_t iso_interval_us,uint32_t sdu_interval,uint32_t latency,uint8_t framed)1312 static uint8_t ll_cis_calculate_ft(uint32_t cig_sync_delay, uint32_t iso_interval_us,
1313 uint32_t sdu_interval, uint32_t latency, uint8_t framed)
1314 {
1315 uint32_t tl;
1316
1317 /* Framed:
1318 * TL = CIG_Sync_Delay + FT x ISO_Interval + SDU_Interval
1319 *
1320 * Unframed:
1321 * TL = CIG_Sync_Delay + FT x ISO_Interval - SDU_Interval
1322 */
1323 for (uint16_t ft = 1U; ft <= CONFIG_BT_CTLR_CONN_ISO_STREAMS_MAX_FT; ft++) {
1324 if (framed) {
1325 tl = cig_sync_delay + ft * iso_interval_us + sdu_interval;
1326 } else {
1327 tl = cig_sync_delay + ft * iso_interval_us - sdu_interval;
1328 }
1329
1330 if (tl > latency) {
1331 /* Latency exceeded - use one less */
1332 return ft - 1U;
1333 }
1334 }
1335
1336 return 0;
1337 }
1338 #endif /* CONFIG_BT_CTLR_CONN_ISO_RELIABILITY_POLICY */
1339