1 /*
2 * Copyright (c) 2021 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <soc.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/sys/byteorder.h>
10 #include <zephyr/bluetooth/hci_types.h>
11
12 #include "hal/cpu.h"
13 #include "hal/ccm.h"
14 #include "hal/ticker.h"
15
16 #include "util/util.h"
17 #include "util/mem.h"
18 #include "util/memq.h"
19 #include "util/mfifo.h"
20 #include "util/mayfly.h"
21
22 #include "ticker/ticker.h"
23
24 #include "pdu_df.h"
25 #include "lll/pdu_vendor.h"
26 #include "pdu.h"
27
28 #include "lll.h"
29 #include "lll/lll_vendor.h"
30 #include "lll/lll_adv_types.h"
31 #include "lll_adv.h"
32 #include "lll/lll_adv_pdu.h"
33 #include "lll_adv_iso.h"
34 #include "lll_iso_tx.h"
35
36 #include "isoal.h"
37
38 #include "ull_adv_types.h"
39 #include "ull_iso_types.h"
40
41 #include "ull_internal.h"
42 #include "ull_adv_internal.h"
43 #include "ull_chan_internal.h"
44 #include "ull_sched_internal.h"
45 #include "ull_iso_internal.h"
46
47 #include "ll.h"
48 #include "ll_feat.h"
49
50 #include "bt_crypto.h"
51
52 #include "hal/debug.h"
53
54 static int init_reset(void);
55 static struct ll_adv_iso_set *adv_iso_get(uint8_t handle);
56 static struct stream *adv_iso_stream_acquire(void);
57 static uint16_t adv_iso_stream_handle_get(struct lll_adv_iso_stream *stream);
58 static uint8_t ptc_calc(const struct lll_adv_iso *lll, uint32_t event_spacing,
59 uint32_t event_spacing_max);
60 static uint32_t adv_iso_start(struct ll_adv_iso_set *adv_iso,
61 uint32_t iso_interval_us);
62 static uint8_t adv_iso_chm_update(uint8_t big_handle);
63 static void adv_iso_chm_complete_commit(struct lll_adv_iso *lll_iso);
64 static void mfy_iso_offset_get(void *param);
65 static void pdu_big_info_chan_map_phy_set(uint8_t *chm_phy, uint8_t *chan_map,
66 uint8_t phy);
67 static inline struct pdu_big_info *big_info_get(struct pdu_adv *pdu);
68 static inline void big_info_offset_fill(struct pdu_big_info *bi,
69 uint32_t ticks_offset,
70 uint32_t start_us);
71 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
72 uint32_t remainder, uint16_t lazy, uint8_t force,
73 void *param);
74 static void ticker_op_cb(uint32_t status, void *param);
75 static void ticker_stop_op_cb(uint32_t status, void *param);
76 static void adv_iso_disable(void *param);
77 static void disabled_cb(void *param);
78 static void tx_lll_flush(void *param);
79
80 static memq_link_t link_lll_prepare;
81 static struct mayfly mfy_lll_prepare = {0U, 0U, &link_lll_prepare, NULL, NULL};
82
83 static struct ll_adv_iso_set ll_adv_iso[CONFIG_BT_CTLR_ADV_ISO_SET];
84 static struct lll_adv_iso_stream
85 stream_pool[CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT];
86 static void *stream_free;
87
ll_big_create(uint8_t big_handle,uint8_t adv_handle,uint8_t num_bis,uint32_t sdu_interval,uint16_t max_sdu,uint16_t max_latency,uint8_t rtn,uint8_t phy,uint8_t packing,uint8_t framing,uint8_t encryption,uint8_t * bcode)88 uint8_t ll_big_create(uint8_t big_handle, uint8_t adv_handle, uint8_t num_bis,
89 uint32_t sdu_interval, uint16_t max_sdu,
90 uint16_t max_latency, uint8_t rtn, uint8_t phy,
91 uint8_t packing, uint8_t framing, uint8_t encryption,
92 uint8_t *bcode)
93 {
94 uint8_t hdr_data[1 + sizeof(uint8_t *)];
95 struct lll_adv_sync *lll_adv_sync;
96 struct lll_adv_iso *lll_adv_iso;
97 struct ll_adv_iso_set *adv_iso;
98 struct pdu_adv *pdu_prev, *pdu;
99 struct pdu_big_info *big_info;
100 uint32_t event_spacing_max;
101 uint8_t pdu_big_info_size;
102 uint32_t iso_interval_us;
103 uint32_t latency_packing;
104 memq_link_t *link_cmplt;
105 memq_link_t *link_term;
106 struct ll_adv_set *adv;
107 uint32_t event_spacing;
108 uint16_t ctrl_spacing;
109 uint8_t sdu_per_event;
110 uint8_t ter_idx;
111 uint8_t *acad;
112 uint32_t ret;
113 uint8_t err;
114 uint8_t bn;
115 int res;
116
117 adv_iso = adv_iso_get(big_handle);
118
119 /* Already created */
120 if (!adv_iso || adv_iso->lll.adv) {
121 return BT_HCI_ERR_CMD_DISALLOWED;
122 }
123
124 /* No advertising set created */
125 adv = ull_adv_is_created_get(adv_handle);
126 if (!adv) {
127 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
128 }
129
130 /* Does not identify a periodic advertising train or
131 * the periodic advertising trains is already associated
132 * with another BIG.
133 */
134 lll_adv_sync = adv->lll.sync;
135 if (!lll_adv_sync || lll_adv_sync->iso) {
136 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
137 }
138
139 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
140 if (num_bis == 0U || num_bis > 0x1F) {
141 return BT_HCI_ERR_INVALID_PARAM;
142 }
143
144 if (sdu_interval < 0x000100 || sdu_interval > 0x0FFFFF) {
145 return BT_HCI_ERR_INVALID_PARAM;
146 }
147
148 if (max_sdu < 0x0001 || max_sdu > 0x0FFF) {
149 return BT_HCI_ERR_INVALID_PARAM;
150 }
151
152 if (max_latency > 0x0FA0) {
153 return BT_HCI_ERR_INVALID_PARAM;
154 }
155
156 if (rtn > 0x0F) {
157 return BT_HCI_ERR_INVALID_PARAM;
158 }
159
160 if (phy > (BT_HCI_LE_EXT_SCAN_PHY_1M |
161 BT_HCI_LE_EXT_SCAN_PHY_2M |
162 BT_HCI_LE_EXT_SCAN_PHY_CODED)) {
163 return BT_HCI_ERR_INVALID_PARAM;
164 }
165
166 if (packing > 1U) {
167 return BT_HCI_ERR_INVALID_PARAM;
168 }
169
170 if (framing > 1U) {
171 return BT_HCI_ERR_INVALID_PARAM;
172 }
173
174 if (encryption > 1U) {
175 return BT_HCI_ERR_INVALID_PARAM;
176 }
177 }
178
179 /* Check if free BISes available */
180 if (mem_free_count_get(stream_free) < num_bis) {
181 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
182 }
183
184 /* Allocate link buffer for created event */
185 link_cmplt = ll_rx_link_alloc();
186 if (!link_cmplt) {
187 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
188 }
189
190 /* Allocate link buffer for sync lost event */
191 link_term = ll_rx_link_alloc();
192 if (!link_term) {
193 ll_rx_link_release(link_cmplt);
194
195 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
196 }
197
198 /* Store parameters in LLL context */
199 /* TODO: parameters to ULL if only accessed by ULL */
200 lll_adv_iso = &adv_iso->lll;
201 lll_adv_iso->handle = big_handle;
202 lll_adv_iso->max_pdu = MIN(LL_BIS_OCTETS_TX_MAX, max_sdu);
203 lll_adv_iso->phy = phy;
204 lll_adv_iso->phy_flags = PHY_FLAGS_S8;
205
206 /* Mandatory Num_BIS = 1 */
207 lll_adv_iso->num_bis = num_bis;
208
209 /* Allocate streams */
210 for (uint8_t i = 0U; i < num_bis; i++) {
211 struct lll_adv_iso_stream *stream;
212
213 stream = (void *)adv_iso_stream_acquire();
214 stream->big_handle = big_handle;
215 stream->dp = NULL;
216
217 if (!stream->link_tx_free) {
218 stream->link_tx_free = &stream->link_tx;
219 }
220 memq_init(stream->link_tx_free, &stream->memq_tx.head,
221 &stream->memq_tx.tail);
222 stream->link_tx_free = NULL;
223
224 stream->pkt_seq_num = 0U;
225
226 lll_adv_iso->stream_handle[i] =
227 adv_iso_stream_handle_get(stream);
228 }
229
230 /* FIXME: SDU per max latency */
231 sdu_per_event = MAX((max_latency * USEC_PER_MSEC / sdu_interval), 2U) -
232 1U;
233
234 /* BN (Burst Count), Mandatory BN = 1 */
235 bn = DIV_ROUND_UP(max_sdu, lll_adv_iso->max_pdu) * sdu_per_event;
236 if (bn > PDU_BIG_BN_MAX) {
237 /* Restrict each BIG event to maximum burst per BIG event */
238 lll_adv_iso->bn = PDU_BIG_BN_MAX;
239
240 /* Ceil the required burst count per SDU to next maximum burst
241 * per BIG event.
242 */
243 bn = DIV_ROUND_UP(bn, PDU_BIG_BN_MAX) * PDU_BIG_BN_MAX;
244 } else {
245 lll_adv_iso->bn = bn;
246 }
247
248 /* Calculate ISO interval */
249 /* iso_interval shall be at least SDU interval,
250 * or integer multiple of SDU interval for unframed PDUs
251 */
252 iso_interval_us = ((sdu_interval * lll_adv_iso->bn * sdu_per_event) /
253 (bn * PERIODIC_INT_UNIT_US)) * PERIODIC_INT_UNIT_US;
254 lll_adv_iso->iso_interval = iso_interval_us / PERIODIC_INT_UNIT_US;
255
256 /* Immediate Repetition Count (IRC), Mandatory IRC = 1 */
257 lll_adv_iso->irc = rtn + 1U;
258
259 /* Calculate NSE (No. of Sub Events), Mandatory NSE = 1,
260 * without PTO added.
261 */
262 lll_adv_iso->nse = lll_adv_iso->bn * lll_adv_iso->irc;
263
264 /* NOTE: Calculate sub_interval, if interleaved then it is Num_BIS x
265 * BIS_Spacing (by BT Spec.)
266 * else if sequential, then by our implementation, lets keep it
267 * max_tx_time for Max_PDU + tMSS.
268 */
269 lll_adv_iso->sub_interval = PDU_BIS_US(lll_adv_iso->max_pdu, encryption,
270 phy, lll_adv_iso->phy_flags) +
271 EVENT_MSS_US;
272 ctrl_spacing = PDU_BIS_US(sizeof(struct pdu_big_ctrl), encryption, phy,
273 lll_adv_iso->phy_flags);
274 latency_packing = lll_adv_iso->sub_interval * lll_adv_iso->nse *
275 lll_adv_iso->num_bis;
276 event_spacing = latency_packing + ctrl_spacing +
277 EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
278 /* FIXME: calculate overheads due to extended and periodic advertising.
279 */
280 event_spacing_max = iso_interval_us - 2000U;
281 if (event_spacing > event_spacing_max) {
282 /* ISO interval too small to fit the calculated BIG event
283 * timing required for the supplied BIG create parameters.
284 */
285
286 /* Release allocated link buffers */
287 ll_rx_link_release(link_cmplt);
288 ll_rx_link_release(link_term);
289
290 return BT_HCI_ERR_INVALID_PARAM;
291 }
292
293 /* Based on packing requested, sequential or interleaved */
294 if (packing) {
295 /* Interleaved Packing */
296 lll_adv_iso->bis_spacing = lll_adv_iso->sub_interval;
297 lll_adv_iso->ptc = ptc_calc(lll_adv_iso, event_spacing,
298 event_spacing_max);
299 lll_adv_iso->nse += lll_adv_iso->ptc;
300 lll_adv_iso->sub_interval = lll_adv_iso->bis_spacing *
301 lll_adv_iso->nse;
302 } else {
303 /* Sequential Packing */
304 lll_adv_iso->ptc = ptc_calc(lll_adv_iso, event_spacing,
305 event_spacing_max);
306 lll_adv_iso->nse += lll_adv_iso->ptc;
307 lll_adv_iso->bis_spacing = lll_adv_iso->sub_interval *
308 lll_adv_iso->nse;
309 }
310
311 /* Pre-Transmission Offset (PTO) */
312 if (lll_adv_iso->ptc) {
313 lll_adv_iso->pto = bn / lll_adv_iso->bn;
314 } else {
315 lll_adv_iso->pto = 0U;
316 }
317
318 /* TODO: Group count, GC = NSE / BN; PTO = GC - IRC;
319 * Is this required?
320 */
321
322 lll_adv_iso->sdu_interval = sdu_interval;
323 lll_adv_iso->max_sdu = max_sdu;
324
325 res = util_saa_le32(lll_adv_iso->seed_access_addr, big_handle);
326 LL_ASSERT(!res);
327
328 (void)lll_csrand_get(lll_adv_iso->base_crc_init,
329 sizeof(lll_adv_iso->base_crc_init));
330 lll_adv_iso->data_chan_count =
331 ull_chan_map_get(lll_adv_iso->data_chan_map);
332 lll_adv_iso->payload_count = 0U;
333 lll_adv_iso->latency_prepare = 0U;
334 lll_adv_iso->latency_event = 0U;
335 lll_adv_iso->term_req = 0U;
336 lll_adv_iso->term_ack = 0U;
337 lll_adv_iso->chm_req = 0U;
338 lll_adv_iso->chm_ack = 0U;
339 lll_adv_iso->ctrl_expire = 0U;
340
341 /* TODO: framing support */
342 lll_adv_iso->framing = framing;
343
344 /* Allocate next PDU */
345 err = ull_adv_sync_pdu_alloc(adv, ULL_ADV_PDU_EXTRA_DATA_ALLOC_IF_EXIST,
346 &pdu_prev, &pdu, NULL, NULL, &ter_idx);
347 if (err) {
348 /* Insufficient Advertising PDU buffers to allocate new PDU
349 * to add BIGInfo into the ACAD of the Periodic Advertising.
350 */
351
352 /* Release allocated link buffers */
353 ll_rx_link_release(link_cmplt);
354 ll_rx_link_release(link_term);
355
356 return err;
357 }
358
359 /* Add ACAD to AUX_SYNC_IND */
360 if (encryption) {
361 pdu_big_info_size = PDU_BIG_INFO_ENCRYPTED_SIZE;
362 } else {
363 pdu_big_info_size = PDU_BIG_INFO_CLEARTEXT_SIZE;
364 }
365 hdr_data[0] = pdu_big_info_size + PDU_ADV_DATA_HEADER_SIZE;
366 err = ull_adv_sync_pdu_set_clear(lll_adv_sync, pdu_prev, pdu,
367 ULL_ADV_PDU_HDR_FIELD_ACAD, 0U,
368 &hdr_data);
369 if (err) {
370 /* Failed to add BIGInfo into the ACAD of the Periodic
371 * Advertising.
372 */
373
374 /* Release allocated link buffers */
375 ll_rx_link_release(link_cmplt);
376 ll_rx_link_release(link_term);
377
378 return err;
379 }
380
381 (void)memcpy(&acad, &hdr_data[1], sizeof(acad));
382 acad[PDU_ADV_DATA_HEADER_LEN_OFFSET] =
383 pdu_big_info_size + (PDU_ADV_DATA_HEADER_SIZE -
384 PDU_ADV_DATA_HEADER_LEN_SIZE);
385 acad[PDU_ADV_DATA_HEADER_TYPE_OFFSET] = BT_DATA_BIG_INFO;
386 big_info = (void *)&acad[PDU_ADV_DATA_HEADER_DATA_OFFSET];
387
388 /* big_info->offset, big_info->offset_units and
389 * big_info->payload_count_framing[] will be filled by periodic
390 * advertising event.
391 */
392
393 big_info->iso_interval =
394 sys_cpu_to_le16(iso_interval_us / PERIODIC_INT_UNIT_US);
395 big_info->num_bis = lll_adv_iso->num_bis;
396 big_info->nse = lll_adv_iso->nse;
397 big_info->bn = lll_adv_iso->bn;
398 big_info->sub_interval = sys_cpu_to_le24(lll_adv_iso->sub_interval);
399 big_info->pto = lll_adv_iso->pto;
400 big_info->spacing = sys_cpu_to_le24(lll_adv_iso->bis_spacing);
401 big_info->irc = lll_adv_iso->irc;
402 big_info->max_pdu = lll_adv_iso->max_pdu;
403 (void)memcpy(&big_info->seed_access_addr, lll_adv_iso->seed_access_addr,
404 sizeof(big_info->seed_access_addr));
405 big_info->sdu_interval = sys_cpu_to_le24(sdu_interval);
406 big_info->max_sdu = max_sdu;
407 (void)memcpy(&big_info->base_crc_init, lll_adv_iso->base_crc_init,
408 sizeof(big_info->base_crc_init));
409 pdu_big_info_chan_map_phy_set(big_info->chm_phy,
410 lll_adv_iso->data_chan_map,
411 phy);
412 /* Assign the 39-bit payload count, and 1-bit framing */
413 big_info->payload_count_framing[0] = lll_adv_iso->payload_count;
414 big_info->payload_count_framing[1] = lll_adv_iso->payload_count >> 8;
415 big_info->payload_count_framing[2] = lll_adv_iso->payload_count >> 16;
416 big_info->payload_count_framing[3] = lll_adv_iso->payload_count >> 24;
417 big_info->payload_count_framing[4] = lll_adv_iso->payload_count >> 32;
418 big_info->payload_count_framing[4] &= ~BIT(7);
419 big_info->payload_count_framing[4] |= ((framing & 0x01) << 7);
420
421 if (encryption) {
422 const uint8_t BIG1[16] = {0x31, 0x47, 0x49, 0x42, };
423 const uint8_t BIG2[4] = {0x32, 0x47, 0x49, 0x42};
424 const uint8_t BIG3[4] = {0x33, 0x47, 0x49, 0x42};
425 struct ccm *ccm_tx;
426 uint8_t igltk[16];
427 uint8_t gltk[16];
428 uint8_t gsk[16];
429
430 /* Fill GIV and GSKD */
431 (void)lll_csrand_get(lll_adv_iso->giv,
432 sizeof(lll_adv_iso->giv));
433 (void)memcpy(big_info->giv, lll_adv_iso->giv,
434 sizeof(big_info->giv));
435 (void)lll_csrand_get(big_info->gskd, sizeof(big_info->gskd));
436
437 /* Calculate GSK */
438 err = bt_crypto_h7(BIG1, bcode, igltk);
439 LL_ASSERT(!err);
440 err = bt_crypto_h6(igltk, BIG2, gltk);
441 LL_ASSERT(!err);
442 err = bt_crypto_h8(gltk, big_info->gskd, BIG3, gsk);
443 LL_ASSERT(!err);
444
445 /* Prepare the CCM parameters */
446 ccm_tx = &lll_adv_iso->ccm_tx;
447 ccm_tx->direction = 1U;
448 (void)memcpy(&ccm_tx->iv[4], &lll_adv_iso->giv[4], 4U);
449 (void)mem_rcopy(ccm_tx->key, gsk, sizeof(ccm_tx->key));
450
451 /* NOTE: counter is filled in LLL */
452
453 lll_adv_iso->enc = 1U;
454 } else {
455 lll_adv_iso->enc = 0U;
456 }
457
458 /* Associate the ISO instance with an Extended Advertising instance */
459 lll_adv_iso->adv = &adv->lll;
460
461 /* Store the link buffer for ISO create and terminate complete event */
462 adv_iso->node_rx_complete.hdr.link = link_cmplt;
463 adv_iso->node_rx_terminate.hdr.link = link_term;
464
465 /* Initialise LLL header members */
466 lll_hdr_init(lll_adv_iso, adv_iso);
467
468 /* Start sending BIS empty data packet for each BIS */
469 ret = adv_iso_start(adv_iso, iso_interval_us);
470 if (ret) {
471 /* Failed to schedule BIG events */
472
473 /* Reset the association of ISO instance with the Extended
474 * Advertising Instance
475 */
476 lll_adv_iso->adv = NULL;
477
478 /* Release allocated link buffers */
479 ll_rx_link_release(link_cmplt);
480 ll_rx_link_release(link_term);
481
482 return BT_HCI_ERR_CMD_DISALLOWED;
483 }
484
485 /* Associate the ISO instance with a Periodic Advertising */
486 lll_adv_sync->iso = lll_adv_iso;
487
488 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
489 /* Notify the sync instance */
490 ull_adv_iso_created(HDR_LLL2ULL(lll_adv_sync));
491 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
492
493 /* Commit the BIGInfo in the ACAD field of Periodic Advertising */
494 lll_adv_sync_data_enqueue(lll_adv_sync, ter_idx);
495
496 return BT_HCI_ERR_SUCCESS;
497 }
498
ll_big_test_create(uint8_t big_handle,uint8_t adv_handle,uint8_t num_bis,uint32_t sdu_interval,uint16_t iso_interval,uint8_t nse,uint16_t max_sdu,uint16_t max_pdu,uint8_t phy,uint8_t packing,uint8_t framing,uint8_t bn,uint8_t irc,uint8_t pto,uint8_t encryption,uint8_t * bcode)499 uint8_t ll_big_test_create(uint8_t big_handle, uint8_t adv_handle,
500 uint8_t num_bis, uint32_t sdu_interval,
501 uint16_t iso_interval, uint8_t nse, uint16_t max_sdu,
502 uint16_t max_pdu, uint8_t phy, uint8_t packing,
503 uint8_t framing, uint8_t bn, uint8_t irc,
504 uint8_t pto, uint8_t encryption, uint8_t *bcode)
505 {
506 /* TODO: Implement */
507 ARG_UNUSED(big_handle);
508 ARG_UNUSED(adv_handle);
509 ARG_UNUSED(num_bis);
510 ARG_UNUSED(sdu_interval);
511 ARG_UNUSED(iso_interval);
512 ARG_UNUSED(nse);
513 ARG_UNUSED(max_sdu);
514 ARG_UNUSED(max_pdu);
515 ARG_UNUSED(phy);
516 ARG_UNUSED(packing);
517 ARG_UNUSED(framing);
518 ARG_UNUSED(bn);
519 ARG_UNUSED(irc);
520 ARG_UNUSED(pto);
521 ARG_UNUSED(encryption);
522 ARG_UNUSED(bcode);
523
524 return BT_HCI_ERR_CMD_DISALLOWED;
525 }
526
ll_big_terminate(uint8_t big_handle,uint8_t reason)527 uint8_t ll_big_terminate(uint8_t big_handle, uint8_t reason)
528 {
529 struct lll_adv_sync *lll_adv_sync;
530 struct lll_adv_iso *lll_adv_iso;
531 struct ll_adv_iso_set *adv_iso;
532 struct pdu_adv *pdu_prev, *pdu;
533 struct node_rx_pdu *node_rx;
534 struct lll_adv *lll_adv;
535 struct ll_adv_set *adv;
536 uint16_t stream_handle;
537 uint16_t handle;
538 uint8_t num_bis;
539 uint8_t ter_idx;
540 uint8_t err;
541
542 adv_iso = adv_iso_get(big_handle);
543 if (!adv_iso) {
544 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
545 }
546
547 lll_adv_iso = &adv_iso->lll;
548 lll_adv = lll_adv_iso->adv;
549 if (!lll_adv) {
550 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
551 }
552
553 if (lll_adv_iso->term_req) {
554 return BT_HCI_ERR_CMD_DISALLOWED;
555 }
556
557 /* Remove ISO data path, keeping data from entering Tx pipeline */
558 num_bis = lll_adv_iso->num_bis;
559 while (num_bis--) {
560 stream_handle = lll_adv_iso->stream_handle[num_bis];
561 handle = LL_BIS_ADV_HANDLE_FROM_IDX(stream_handle);
562 err = ll_remove_iso_path(handle,
563 BIT(BT_HCI_DATAPATH_DIR_HOST_TO_CTLR));
564 if (err) {
565 return err;
566 }
567 }
568
569 lll_adv_sync = lll_adv->sync;
570 adv = HDR_LLL2ULL(lll_adv);
571
572 /* Allocate next PDU */
573 err = ull_adv_sync_pdu_alloc(adv, ULL_ADV_PDU_EXTRA_DATA_ALLOC_IF_EXIST,
574 &pdu_prev, &pdu, NULL, NULL, &ter_idx);
575 if (err) {
576 return err;
577 }
578
579 /* Remove ACAD to AUX_SYNC_IND */
580 err = ull_adv_sync_pdu_set_clear(lll_adv_sync, pdu_prev, pdu,
581 0U, ULL_ADV_PDU_HDR_FIELD_ACAD, NULL);
582 if (err) {
583 return err;
584 }
585
586 lll_adv_sync_data_enqueue(lll_adv_sync, ter_idx);
587
588 /* Prepare BIG terminate event, will be enqueued after tx flush */
589 node_rx = (void *)&adv_iso->node_rx_terminate;
590 node_rx->hdr.type = NODE_RX_TYPE_BIG_TERMINATE;
591 node_rx->hdr.handle = big_handle;
592 node_rx->hdr.rx_ftr.param = adv_iso;
593
594 if (reason == BT_HCI_ERR_REMOTE_USER_TERM_CONN) {
595 *((uint8_t *)node_rx->pdu) = BT_HCI_ERR_LOCALHOST_TERM_CONN;
596 } else {
597 *((uint8_t *)node_rx->pdu) = reason;
598 }
599
600 /* Request terminate procedure */
601 lll_adv_iso->term_reason = reason;
602 lll_adv_iso->term_req = 1U;
603
604 return BT_HCI_ERR_SUCCESS;
605 }
606
ull_adv_iso_init(void)607 int ull_adv_iso_init(void)
608 {
609 int err;
610
611 err = init_reset();
612 if (err) {
613 return err;
614 }
615
616 return 0;
617 }
618
ull_adv_iso_reset(void)619 int ull_adv_iso_reset(void)
620 {
621 int err;
622
623 err = init_reset();
624 if (err) {
625 return err;
626 }
627
628 return 0;
629 }
630
ull_adv_iso_get(uint8_t handle)631 struct ll_adv_iso_set *ull_adv_iso_get(uint8_t handle)
632 {
633 return adv_iso_get(handle);
634 }
635
ull_adv_iso_chm_update(void)636 uint8_t ull_adv_iso_chm_update(void)
637 {
638 uint8_t handle;
639
640 handle = CONFIG_BT_CTLR_ADV_ISO_SET;
641 while (handle--) {
642 (void)adv_iso_chm_update(handle);
643 }
644
645 /* TODO: Should failure due to Channel Map Update being already in
646 * progress be returned to caller?
647 */
648 return 0;
649 }
650
ull_adv_iso_chm_complete(struct node_rx_hdr * rx)651 void ull_adv_iso_chm_complete(struct node_rx_hdr *rx)
652 {
653 struct lll_adv_sync *sync_lll;
654 struct lll_adv_iso *iso_lll;
655 struct lll_adv *adv_lll;
656
657 iso_lll = rx->rx_ftr.param;
658 adv_lll = iso_lll->adv;
659 sync_lll = adv_lll->sync;
660
661 /* Update Channel Map in BIGInfo in the Periodic Advertising PDU */
662 while (sync_lll->iso_chm_done_req != sync_lll->iso_chm_done_ack) {
663 sync_lll->iso_chm_done_ack = sync_lll->iso_chm_done_req;
664
665 adv_iso_chm_complete_commit(iso_lll);
666 }
667 }
668
669 #if defined(CONFIG_BT_CTLR_HCI_ADV_HANDLE_MAPPING)
ll_adv_iso_by_hci_handle_get(uint8_t hci_handle,uint8_t * handle)670 uint8_t ll_adv_iso_by_hci_handle_get(uint8_t hci_handle, uint8_t *handle)
671 {
672 struct ll_adv_iso_set *adv_iso;
673 uint8_t idx;
674
675 adv_iso = &ll_adv_iso[0];
676
677 for (idx = 0U; idx < CONFIG_BT_CTLR_ADV_ISO_SET; idx++, adv_iso++) {
678 if (adv_iso->lll.adv &&
679 (adv_iso->hci_handle == hci_handle)) {
680 *handle = idx;
681 return 0U;
682 }
683 }
684
685 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
686 }
687
ll_adv_iso_by_hci_handle_new(uint8_t hci_handle,uint8_t * handle)688 uint8_t ll_adv_iso_by_hci_handle_new(uint8_t hci_handle, uint8_t *handle)
689 {
690 struct ll_adv_iso_set *adv_iso, *adv_iso_empty;
691 uint8_t idx;
692
693 adv_iso = &ll_adv_iso[0];
694 adv_iso_empty = NULL;
695
696 for (idx = 0U; idx < CONFIG_BT_CTLR_ADV_ISO_SET; idx++, adv_iso++) {
697 if (adv_iso->lll.adv) {
698 if (adv_iso->hci_handle == hci_handle) {
699 return BT_HCI_ERR_CMD_DISALLOWED;
700 }
701 } else if (!adv_iso_empty) {
702 adv_iso_empty = adv_iso;
703 *handle = idx;
704 }
705 }
706
707 if (adv_iso_empty) {
708 memset(adv_iso_empty, 0U, sizeof(*adv_iso_empty));
709 adv_iso_empty->hci_handle = hci_handle;
710 return 0U;
711 }
712
713 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
714 }
715 #endif /* CONFIG_BT_CTLR_HCI_ADV_HANDLE_MAPPING */
716
ull_adv_iso_offset_get(struct ll_adv_sync_set * sync)717 void ull_adv_iso_offset_get(struct ll_adv_sync_set *sync)
718 {
719 static memq_link_t link;
720 static struct mayfly mfy = {0U, 0U, &link, NULL, mfy_iso_offset_get};
721 uint32_t ret;
722
723 mfy.param = sync;
724 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1,
725 &mfy);
726 LL_ASSERT(!ret);
727 }
728
729 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
ull_adv_iso_lll_biginfo_fill(struct pdu_adv * pdu,struct lll_adv_sync * lll_sync)730 void ull_adv_iso_lll_biginfo_fill(struct pdu_adv *pdu, struct lll_adv_sync *lll_sync)
731 {
732 struct lll_adv_iso *lll_iso;
733 uint16_t latency_prepare;
734 struct pdu_big_info *bi;
735 uint64_t payload_count;
736
737 lll_iso = lll_sync->iso;
738
739 /* Calculate current payload count. If refcount is non-zero, we have called
740 * prepare and the LLL implementation has incremented latency_prepare already.
741 * In this case we need to subtract lazy + 1 from latency_prepare
742 */
743 latency_prepare = lll_iso->latency_prepare;
744 if (ull_ref_get(HDR_LLL2ULL(lll_iso))) {
745 /* We are in post-prepare. latency_prepare is already
746 * incremented by lazy + 1 for next event
747 */
748 latency_prepare -= lll_iso->iso_lazy + 1;
749 }
750
751 payload_count = lll_iso->payload_count + ((latency_prepare +
752 lll_iso->iso_lazy) * lll_iso->bn);
753
754 bi = big_info_get(pdu);
755 big_info_offset_fill(bi, lll_iso->ticks_sync_pdu_offset, 0U);
756 /* Assign the 39-bit payload count, retaining the 1 MS bit framing value */
757 bi->payload_count_framing[0] = payload_count;
758 bi->payload_count_framing[1] = payload_count >> 8;
759 bi->payload_count_framing[2] = payload_count >> 16;
760 bi->payload_count_framing[3] = payload_count >> 24;
761 bi->payload_count_framing[4] &= ~0x7F;
762 bi->payload_count_framing[4] |= (payload_count >> 32) & 0x7F;
763
764 /* Update Channel Map in the BIGInfo until Thread context gets a
765 * chance to update the PDU with new Channel Map.
766 */
767 if (lll_sync->iso_chm_done_req != lll_sync->iso_chm_done_ack) {
768 pdu_big_info_chan_map_phy_set(bi->chm_phy,
769 lll_iso->data_chan_map,
770 lll_iso->phy);
771 }
772 }
773 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
774
ull_adv_iso_done_complete(struct node_rx_event_done * done)775 void ull_adv_iso_done_complete(struct node_rx_event_done *done)
776 {
777 struct ll_adv_iso_set *adv_iso;
778 struct lll_adv_iso *lll;
779 struct node_rx_hdr *rx;
780 memq_link_t *link;
781
782 /* switch to normal prepare */
783 mfy_lll_prepare.fp = lll_adv_iso_prepare;
784
785 /* Get reference to ULL context */
786 adv_iso = CONTAINER_OF(done->param, struct ll_adv_iso_set, ull);
787 lll = &adv_iso->lll;
788
789 /* Prepare BIG complete event */
790 rx = (void *)&adv_iso->node_rx_complete;
791 link = rx->link;
792 if (!link) {
793 /* NOTE: When BIS events have overlapping prepare placed in
794 * in the pipeline, more than one done complete event
795 * will be generated, lets ignore the additional done
796 * events.
797 */
798 return;
799 }
800 rx->link = NULL;
801
802 rx->type = NODE_RX_TYPE_BIG_COMPLETE;
803 rx->handle = lll->handle;
804 rx->rx_ftr.param = adv_iso;
805
806 ll_rx_put_sched(link, rx);
807 }
808
ull_adv_iso_done_terminate(struct node_rx_event_done * done)809 void ull_adv_iso_done_terminate(struct node_rx_event_done *done)
810 {
811 struct ll_adv_iso_set *adv_iso;
812 struct lll_adv_iso *lll;
813 uint32_t ret;
814
815 /* Get reference to ULL context */
816 adv_iso = CONTAINER_OF(done->param, struct ll_adv_iso_set, ull);
817 lll = &adv_iso->lll;
818
819 /* Skip if terminated already (we come here if pipeline being flushed */
820 if (unlikely(lll->handle == LLL_ADV_HANDLE_INVALID)) {
821 return;
822 }
823
824 ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
825 (TICKER_ID_ADV_ISO_BASE + lll->handle),
826 ticker_stop_op_cb, adv_iso);
827 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
828 (ret == TICKER_STATUS_BUSY));
829
830 /* Invalidate the handle */
831 lll->handle = LLL_ADV_HANDLE_INVALID;
832 }
833
ull_adv_iso_by_stream_get(uint16_t handle)834 struct ll_adv_iso_set *ull_adv_iso_by_stream_get(uint16_t handle)
835 {
836 if (handle >= CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT) {
837 return NULL;
838 }
839
840 return adv_iso_get(stream_pool[handle].big_handle);
841 }
842
ull_adv_iso_stream_get(uint16_t handle)843 struct lll_adv_iso_stream *ull_adv_iso_stream_get(uint16_t handle)
844 {
845 if (handle >= CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT) {
846 return NULL;
847 }
848
849 return &stream_pool[handle];
850 }
851
ull_adv_iso_lll_stream_get(uint16_t handle)852 struct lll_adv_iso_stream *ull_adv_iso_lll_stream_get(uint16_t handle)
853 {
854 return ull_adv_iso_stream_get(handle);
855 }
856
ull_adv_iso_stream_release(struct ll_adv_iso_set * adv_iso)857 void ull_adv_iso_stream_release(struct ll_adv_iso_set *adv_iso)
858 {
859 struct lll_adv_iso *lll;
860
861 lll = &adv_iso->lll;
862 while (lll->num_bis--) {
863 struct lll_adv_iso_stream *stream;
864 struct ll_iso_datapath *dp;
865 uint16_t stream_handle;
866 memq_link_t *link;
867
868 stream_handle = lll->stream_handle[lll->num_bis];
869 stream = ull_adv_iso_stream_get(stream_handle);
870
871 LL_ASSERT(!stream->link_tx_free);
872 link = memq_deinit(&stream->memq_tx.head,
873 &stream->memq_tx.tail);
874 LL_ASSERT(link);
875 stream->link_tx_free = link;
876
877 dp = stream->dp;
878 if (dp) {
879 stream->dp = NULL;
880 isoal_source_destroy(dp->source_hdl);
881 ull_iso_datapath_release(dp);
882 }
883
884 mem_release(stream, &stream_free);
885 }
886
887 /* Remove Periodic Advertising association */
888 lll->adv->sync->iso = NULL;
889
890 /* Remove Extended Advertising association */
891 lll->adv = NULL;
892 }
893
init_reset(void)894 static int init_reset(void)
895 {
896 /* Add initializations common to power up initialization and HCI reset
897 * initializations.
898 */
899
900 mem_init((void *)stream_pool, sizeof(struct lll_adv_iso_stream),
901 CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT, &stream_free);
902
903 return 0;
904 }
905
adv_iso_get(uint8_t handle)906 static struct ll_adv_iso_set *adv_iso_get(uint8_t handle)
907 {
908 if (handle >= CONFIG_BT_CTLR_ADV_SET) {
909 return NULL;
910 }
911
912 return &ll_adv_iso[handle];
913 }
914
adv_iso_stream_acquire(void)915 static struct stream *adv_iso_stream_acquire(void)
916 {
917 return mem_acquire(&stream_free);
918 }
919
adv_iso_stream_handle_get(struct lll_adv_iso_stream * stream)920 static uint16_t adv_iso_stream_handle_get(struct lll_adv_iso_stream *stream)
921 {
922 return mem_index_get(stream, stream_pool, sizeof(*stream));
923 }
924
ptc_calc(const struct lll_adv_iso * lll,uint32_t event_spacing,uint32_t event_spacing_max)925 static uint8_t ptc_calc(const struct lll_adv_iso *lll, uint32_t event_spacing,
926 uint32_t event_spacing_max)
927 {
928 if (event_spacing < event_spacing_max) {
929 uint8_t ptc;
930
931 /* Possible maximum Pre-transmission Subevents per BIS */
932 ptc = ((event_spacing_max - event_spacing) /
933 (lll->sub_interval * lll->bn * lll->num_bis)) *
934 lll->bn;
935
936 /* FIXME: Here we retrict to a maximum of BN Pre-Transmission
937 * subevents per BIS
938 */
939 ptc = MIN(ptc, lll->bn);
940
941 return ptc;
942 }
943
944 return 0U;
945 }
946
adv_iso_start(struct ll_adv_iso_set * adv_iso,uint32_t iso_interval_us)947 static uint32_t adv_iso_start(struct ll_adv_iso_set *adv_iso,
948 uint32_t iso_interval_us)
949 {
950 uint32_t ticks_slot_overhead;
951 struct lll_adv_iso *lll_iso;
952 uint32_t ticks_slot_offset;
953 uint32_t volatile ret_cb;
954 uint32_t ticks_anchor;
955 uint32_t ctrl_spacing;
956 uint32_t pdu_spacing;
957 uint32_t ticks_slot;
958 uint32_t slot_us;
959 uint32_t ret;
960 int err;
961
962 ull_hdr_init(&adv_iso->ull);
963
964 lll_iso = &adv_iso->lll;
965
966 pdu_spacing = PDU_BIS_US(lll_iso->max_pdu, lll_iso->enc, lll_iso->phy,
967 lll_iso->phy_flags) +
968 EVENT_MSS_US;
969 ctrl_spacing = PDU_BIS_US(sizeof(struct pdu_big_ctrl), lll_iso->enc,
970 lll_iso->phy, lll_iso->phy_flags);
971 slot_us = (pdu_spacing * lll_iso->nse * lll_iso->num_bis) +
972 ctrl_spacing;
973 slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
974
975 adv_iso->ull.ticks_active_to_start = 0U;
976 adv_iso->ull.ticks_prepare_to_start =
977 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
978 adv_iso->ull.ticks_preempt_to_start =
979 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
980 adv_iso->ull.ticks_slot = HAL_TICKER_US_TO_TICKS(slot_us);
981
982 ticks_slot_offset = MAX(adv_iso->ull.ticks_active_to_start,
983 adv_iso->ull.ticks_prepare_to_start);
984 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
985 ticks_slot_overhead = ticks_slot_offset;
986 } else {
987 ticks_slot_overhead = 0U;
988 }
989 ticks_slot = adv_iso->ull.ticks_slot + ticks_slot_overhead;
990
991 /* Find the slot after Periodic Advertisings events */
992 ticks_anchor = ticker_ticks_now_get() +
993 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
994 err = ull_sched_adv_aux_sync_free_anchor_get(ticks_slot, &ticks_anchor);
995 if (!err) {
996 ticks_anchor += HAL_TICKER_US_TO_TICKS(
997 MAX(EVENT_MAFS_US,
998 EVENT_OVERHEAD_START_US) -
999 EVENT_OVERHEAD_START_US +
1000 (EVENT_TICKER_RES_MARGIN_US << 1));
1001 }
1002
1003 /* setup to use ISO create prepare function for first radio event */
1004 mfy_lll_prepare.fp = lll_adv_iso_create_prepare;
1005
1006 ret_cb = TICKER_STATUS_BUSY;
1007 ret = ticker_start(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
1008 (TICKER_ID_ADV_ISO_BASE + lll_iso->handle),
1009 ticks_anchor, 0U,
1010 HAL_TICKER_US_TO_TICKS(iso_interval_us),
1011 HAL_TICKER_REMAINDER(iso_interval_us),
1012 TICKER_NULL_LAZY, ticks_slot, ticker_cb, adv_iso,
1013 ull_ticker_status_give, (void *)&ret_cb);
1014 ret = ull_ticker_status_take(ret, &ret_cb);
1015
1016 return ret;
1017 }
1018
adv_iso_chm_update(uint8_t big_handle)1019 static uint8_t adv_iso_chm_update(uint8_t big_handle)
1020 {
1021 struct ll_adv_iso_set *adv_iso;
1022 struct lll_adv_iso *lll_iso;
1023
1024 adv_iso = adv_iso_get(big_handle);
1025 if (!adv_iso) {
1026 return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
1027 }
1028
1029 lll_iso = &adv_iso->lll;
1030 if (lll_iso->term_req ||
1031 (lll_iso->chm_req != lll_iso->chm_ack)) {
1032 return BT_HCI_ERR_CMD_DISALLOWED;
1033 }
1034
1035 /* Request channel map update procedure */
1036 lll_iso->chm_chan_count = ull_chan_map_get(lll_iso->chm_chan_map);
1037 lll_iso->chm_req++;
1038
1039 return BT_HCI_ERR_SUCCESS;
1040 }
1041
adv_iso_chm_complete_commit(struct lll_adv_iso * lll_iso)1042 static void adv_iso_chm_complete_commit(struct lll_adv_iso *lll_iso)
1043 {
1044 uint8_t hdr_data[ULL_ADV_HDR_DATA_LEN_SIZE +
1045 ULL_ADV_HDR_DATA_ACAD_PTR_SIZE];
1046 struct pdu_adv *pdu_prev, *pdu;
1047 struct lll_adv_sync *lll_sync;
1048 struct pdu_big_info *bi;
1049 struct ll_adv_set *adv;
1050 uint8_t acad_len;
1051 uint8_t ter_idx;
1052 uint8_t ad_len;
1053 uint8_t *acad;
1054 uint8_t *ad;
1055 uint8_t len;
1056 uint8_t err;
1057
1058 /* Allocate next PDU */
1059 adv = HDR_LLL2ULL(lll_iso->adv);
1060 err = ull_adv_sync_pdu_alloc(adv, ULL_ADV_PDU_EXTRA_DATA_ALLOC_IF_EXIST,
1061 &pdu_prev, &pdu, NULL, NULL, &ter_idx);
1062 LL_ASSERT(!err);
1063
1064 /* Get the size of current ACAD, first octet returns the old length and
1065 * followed by pointer to previous offset to ACAD in the PDU.
1066 */
1067 lll_sync = adv->lll.sync;
1068 hdr_data[ULL_ADV_HDR_DATA_LEN_OFFSET] = 0U;
1069 err = ull_adv_sync_pdu_set_clear(lll_sync, pdu_prev, pdu,
1070 ULL_ADV_PDU_HDR_FIELD_ACAD, 0U,
1071 &hdr_data);
1072 LL_ASSERT(!err);
1073
1074 /* Dev assert if ACAD empty */
1075 LL_ASSERT(hdr_data[ULL_ADV_HDR_DATA_LEN_OFFSET]);
1076
1077 /* Get the pointer, prev content and size of current ACAD */
1078 err = ull_adv_sync_pdu_set_clear(lll_sync, pdu_prev, pdu,
1079 ULL_ADV_PDU_HDR_FIELD_ACAD, 0U,
1080 &hdr_data);
1081 LL_ASSERT(!err);
1082
1083 /* Find the BIGInfo */
1084 acad_len = hdr_data[ULL_ADV_HDR_DATA_LEN_OFFSET];
1085 len = acad_len;
1086 (void)memcpy(&acad, &hdr_data[ULL_ADV_HDR_DATA_ACAD_PTR_OFFSET],
1087 sizeof(acad));
1088 ad = acad;
1089 do {
1090 ad_len = ad[PDU_ADV_DATA_HEADER_LEN_OFFSET];
1091 if (ad_len &&
1092 (ad[PDU_ADV_DATA_HEADER_TYPE_OFFSET] == BT_DATA_BIG_INFO)) {
1093 break;
1094 }
1095
1096 ad_len += 1U;
1097
1098 LL_ASSERT(ad_len <= len);
1099
1100 ad += ad_len;
1101 len -= ad_len;
1102 } while (len);
1103 LL_ASSERT(len);
1104
1105 /* Get reference to BIGInfo */
1106 bi = (void *)&ad[PDU_ADV_DATA_HEADER_DATA_OFFSET];
1107
1108 /* Copy the new/current Channel Map */
1109 pdu_big_info_chan_map_phy_set(bi->chm_phy, lll_iso->data_chan_map,
1110 lll_iso->phy);
1111
1112 /* Commit the new PDU Buffer */
1113 lll_adv_sync_data_enqueue(lll_sync, ter_idx);
1114 }
1115
mfy_iso_offset_get(void * param)1116 static void mfy_iso_offset_get(void *param)
1117 {
1118 struct lll_adv_sync *lll_sync;
1119 struct ll_adv_sync_set *sync;
1120 struct lll_adv_iso *lll_iso;
1121 uint32_t ticks_to_expire;
1122 struct pdu_big_info *bi;
1123 uint32_t ticks_current;
1124 uint64_t payload_count;
1125 struct pdu_adv *pdu;
1126 uint8_t ticker_id;
1127 uint16_t lazy;
1128 uint8_t retry;
1129 uint8_t id;
1130
1131 sync = param;
1132 lll_sync = &sync->lll;
1133 lll_iso = lll_sync->iso;
1134 ticker_id = TICKER_ID_ADV_ISO_BASE + lll_iso->handle;
1135
1136 id = TICKER_NULL;
1137 ticks_to_expire = 0U;
1138 ticks_current = 0U;
1139 retry = 4U;
1140 do {
1141 uint32_t volatile ret_cb;
1142 uint32_t ticks_previous;
1143 uint32_t ret;
1144 bool success;
1145
1146 ticks_previous = ticks_current;
1147
1148 ret_cb = TICKER_STATUS_BUSY;
1149 ret = ticker_next_slot_get_ext(TICKER_INSTANCE_ID_CTLR,
1150 TICKER_USER_ID_ULL_LOW,
1151 &id, &ticks_current,
1152 &ticks_to_expire, NULL, &lazy,
1153 NULL, NULL,
1154 ticker_op_cb, (void *)&ret_cb);
1155 if (ret == TICKER_STATUS_BUSY) {
1156 /* Busy wait until Ticker Job is enabled after any Radio
1157 * event is done using the Radio hardware. Ticker Job
1158 * ISR is disabled during Radio events in LOW_LAT
1159 * feature to avoid Radio ISR latencies.
1160 */
1161 while (ret_cb == TICKER_STATUS_BUSY) {
1162 ticker_job_sched(TICKER_INSTANCE_ID_CTLR,
1163 TICKER_USER_ID_ULL_LOW);
1164 }
1165 }
1166
1167 success = (ret_cb == TICKER_STATUS_SUCCESS);
1168 LL_ASSERT(success);
1169
1170 LL_ASSERT((ticks_current == ticks_previous) || retry--);
1171
1172 LL_ASSERT(id != TICKER_NULL);
1173 } while (id != ticker_id);
1174
1175 payload_count = lll_iso->payload_count + ((lll_iso->latency_prepare +
1176 lazy) * lll_iso->bn);
1177
1178 pdu = lll_adv_sync_data_latest_peek(lll_sync);
1179 bi = big_info_get(pdu);
1180 big_info_offset_fill(bi, ticks_to_expire, 0U);
1181 /* Assign the 39-bit payload count, retaining the 1 MS bit framing value */
1182 bi->payload_count_framing[0] = payload_count;
1183 bi->payload_count_framing[1] = payload_count >> 8;
1184 bi->payload_count_framing[2] = payload_count >> 16;
1185 bi->payload_count_framing[3] = payload_count >> 24;
1186 bi->payload_count_framing[4] &= ~0x7F;
1187 bi->payload_count_framing[4] |= (payload_count >> 32) & 0x7F;
1188
1189 /* Update Channel Map in the BIGInfo until Thread context gets a
1190 * chance to update the PDU with new Channel Map.
1191 */
1192 if (lll_sync->iso_chm_done_req != lll_sync->iso_chm_done_ack) {
1193 pdu_big_info_chan_map_phy_set(bi->chm_phy,
1194 lll_iso->data_chan_map,
1195 lll_iso->phy);
1196 }
1197 }
1198
pdu_big_info_chan_map_phy_set(uint8_t * chm_phy,uint8_t * chan_map,uint8_t phy)1199 static void pdu_big_info_chan_map_phy_set(uint8_t *chm_phy, uint8_t *chan_map,
1200 uint8_t phy)
1201 {
1202 (void)memcpy(chm_phy, chan_map, PDU_CHANNEL_MAP_SIZE);
1203 chm_phy[4] &= 0x1F;
1204 chm_phy[4] |= ((find_lsb_set(phy) - 1U) << 5);
1205 }
1206
big_info_get(struct pdu_adv * pdu)1207 static inline struct pdu_big_info *big_info_get(struct pdu_adv *pdu)
1208 {
1209 struct pdu_adv_com_ext_adv *p;
1210 struct pdu_adv_ext_hdr *h;
1211 uint8_t *ptr;
1212
1213 p = (void *)&pdu->adv_ext_ind;
1214 h = (void *)p->ext_hdr_adv_data;
1215 ptr = h->data;
1216
1217 /* No AdvA and TargetA */
1218
1219 /* traverse through CTE Info, if present */
1220 if (h->cte_info) {
1221 ptr += sizeof(struct pdu_cte_info);
1222 }
1223
1224 /* traverse through ADI, if present */
1225 if (h->adi) {
1226 ptr += sizeof(struct pdu_adv_adi);
1227 }
1228
1229 /* traverse through aux ptr, if present */
1230 if (h->aux_ptr) {
1231 ptr += sizeof(struct pdu_adv_aux_ptr);
1232 }
1233
1234 /* No SyncInfo */
1235
1236 /* traverse through Tx Power, if present */
1237 if (h->tx_pwr) {
1238 ptr++;
1239 }
1240
1241 /* FIXME: Parse and find the Length encoded AD Format */
1242 ptr += 2;
1243
1244 return (void *)ptr;
1245 }
1246
big_info_offset_fill(struct pdu_big_info * bi,uint32_t ticks_offset,uint32_t start_us)1247 static inline void big_info_offset_fill(struct pdu_big_info *bi,
1248 uint32_t ticks_offset,
1249 uint32_t start_us)
1250 {
1251 uint32_t offs;
1252
1253 offs = HAL_TICKER_TICKS_TO_US(ticks_offset) - start_us;
1254 offs = offs / OFFS_UNIT_30_US;
1255 if (!!(offs >> OFFS_UNIT_BITS)) {
1256 bi->offs = sys_cpu_to_le16(offs / (OFFS_UNIT_300_US /
1257 OFFS_UNIT_30_US));
1258 bi->offs_units = 1U;
1259 } else {
1260 bi->offs = sys_cpu_to_le16(offs);
1261 bi->offs_units = 0U;
1262 }
1263 }
1264
ticker_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)1265 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
1266 uint32_t remainder, uint16_t lazy, uint8_t force,
1267 void *param)
1268 {
1269 static struct lll_prepare_param p;
1270 struct ll_adv_iso_set *adv_iso = param;
1271 uint32_t remainder_us;
1272 uint32_t ret;
1273 uint8_t ref;
1274
1275 DEBUG_RADIO_PREPARE_A(1);
1276
1277 /* Increment prepare reference count */
1278 ref = ull_ref_inc(&adv_iso->ull);
1279 LL_ASSERT(ref);
1280
1281 /* Append timing parameters */
1282 p.ticks_at_expire = ticks_at_expire;
1283 p.remainder = remainder;
1284 p.lazy = lazy;
1285 p.force = force;
1286 p.param = &adv_iso->lll;
1287 mfy_lll_prepare.param = &p;
1288
1289 /* Kick LLL prepare */
1290 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0,
1291 &mfy_lll_prepare);
1292 LL_ASSERT(!ret);
1293
1294 /* Calculate the BIG reference point of current BIG event */
1295 remainder_us = remainder;
1296 hal_ticker_remove_jitter(&ticks_at_expire, &remainder_us);
1297 ticks_at_expire &= HAL_TICKER_CNTR_MASK;
1298 adv_iso->big_ref_point = isoal_get_wrapped_time_us(HAL_TICKER_TICKS_TO_US(ticks_at_expire),
1299 (remainder_us +
1300 EVENT_OVERHEAD_START_US));
1301
1302 DEBUG_RADIO_PREPARE_A(1);
1303 }
1304
ticker_op_cb(uint32_t status,void * param)1305 static void ticker_op_cb(uint32_t status, void *param)
1306 {
1307 *((uint32_t volatile *)param) = status;
1308 }
1309
ticker_stop_op_cb(uint32_t status,void * param)1310 static void ticker_stop_op_cb(uint32_t status, void *param)
1311 {
1312 static memq_link_t link;
1313 static struct mayfly mfy = {0U, 0U, &link, NULL, adv_iso_disable};
1314 uint32_t ret;
1315
1316 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1317
1318 /* Check if any pending LLL events that need to be aborted */
1319 mfy.param = param;
1320 ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
1321 TICKER_USER_ID_ULL_HIGH, 0U, &mfy);
1322 LL_ASSERT(!ret);
1323 }
1324
adv_iso_disable(void * param)1325 static void adv_iso_disable(void *param)
1326 {
1327 struct ll_adv_iso_set *adv_iso;
1328 struct ull_hdr *hdr;
1329
1330 /* Check ref count to determine if any pending LLL events in pipeline */
1331 adv_iso = param;
1332 hdr = &adv_iso->ull;
1333 if (ull_ref_get(hdr)) {
1334 static memq_link_t link;
1335 static struct mayfly mfy = {0U, 0U, &link, NULL, lll_disable};
1336 uint32_t ret;
1337
1338 mfy.param = &adv_iso->lll;
1339
1340 /* Setup disabled callback to be called when ref count
1341 * returns to zero.
1342 */
1343 LL_ASSERT(!hdr->disabled_cb);
1344 hdr->disabled_param = mfy.param;
1345 hdr->disabled_cb = disabled_cb;
1346
1347 /* Trigger LLL disable */
1348 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1349 TICKER_USER_ID_LLL, 0U, &mfy);
1350 LL_ASSERT(!ret);
1351 } else {
1352 /* No pending LLL events */
1353 disabled_cb(&adv_iso->lll);
1354 }
1355 }
1356
disabled_cb(void * param)1357 static void disabled_cb(void *param)
1358 {
1359 static memq_link_t link;
1360 static struct mayfly mfy = {0U, 0U, &link, NULL, tx_lll_flush};
1361 uint32_t ret;
1362
1363 mfy.param = param;
1364 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1365 TICKER_USER_ID_LLL, 0U, &mfy);
1366 LL_ASSERT(!ret);
1367 }
1368
tx_lll_flush(void * param)1369 static void tx_lll_flush(void *param)
1370 {
1371 struct ll_adv_iso_set *adv_iso;
1372 struct lll_adv_iso *lll;
1373 struct node_rx_pdu *rx;
1374 memq_link_t *link;
1375 uint8_t num_bis;
1376
1377 /* Get reference to ULL context */
1378 lll = param;
1379
1380 /* Flush TX */
1381 num_bis = lll->num_bis;
1382 while (num_bis--) {
1383 struct lll_adv_iso_stream *stream;
1384 struct node_tx_iso *tx;
1385 uint16_t stream_handle;
1386 memq_link_t *link;
1387 uint16_t handle;
1388
1389 stream_handle = lll->stream_handle[num_bis];
1390 handle = LL_BIS_ADV_HANDLE_FROM_IDX(stream_handle);
1391 stream = ull_adv_iso_stream_get(stream_handle);
1392
1393 link = memq_dequeue(stream->memq_tx.tail, &stream->memq_tx.head,
1394 (void **)&tx);
1395 while (link) {
1396 tx->next = link;
1397 ull_iso_lll_ack_enqueue(handle, tx);
1398
1399 link = memq_dequeue(stream->memq_tx.tail,
1400 &stream->memq_tx.head,
1401 (void **)&tx);
1402 }
1403 }
1404
1405 /* Get the terminate structure reserved in the ISO context.
1406 * The terminate reason and connection handle should already be
1407 * populated before this mayfly function was scheduled.
1408 */
1409 adv_iso = HDR_LLL2ULL(lll);
1410 rx = (void *)&adv_iso->node_rx_terminate;
1411 link = rx->hdr.link;
1412 LL_ASSERT(link);
1413 rx->hdr.link = NULL;
1414
1415 /* Enqueue the terminate towards ULL context */
1416 ull_rx_put_sched(link, rx);
1417 }
1418