1 /*
2  * Copyright (c) 2021 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <soc.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/sys/byteorder.h>
10 #include <zephyr/bluetooth/hci_types.h>
11 
12 #include "hal/cpu.h"
13 #include "hal/ccm.h"
14 #include "hal/ticker.h"
15 
16 #include "util/util.h"
17 #include "util/mem.h"
18 #include "util/memq.h"
19 #include "util/mfifo.h"
20 #include "util/mayfly.h"
21 
22 #include "ticker/ticker.h"
23 
24 #include "pdu_df.h"
25 #include "lll/pdu_vendor.h"
26 #include "pdu.h"
27 
28 #include "lll.h"
29 #include "lll/lll_vendor.h"
30 #include "lll/lll_adv_types.h"
31 #include "lll_adv.h"
32 #include "lll/lll_adv_pdu.h"
33 #include "lll_adv_iso.h"
34 #include "lll_iso_tx.h"
35 
36 #include "isoal.h"
37 
38 #include "ull_adv_types.h"
39 #include "ull_iso_types.h"
40 
41 #include "ull_internal.h"
42 #include "ull_adv_internal.h"
43 #include "ull_chan_internal.h"
44 #include "ull_sched_internal.h"
45 #include "ull_iso_internal.h"
46 
47 #include "ll.h"
48 #include "ll_feat.h"
49 
50 #include "bt_crypto.h"
51 
52 #include "hal/debug.h"
53 
54 static int init_reset(void);
55 static struct ll_adv_iso_set *adv_iso_get(uint8_t handle);
56 static struct stream *adv_iso_stream_acquire(void);
57 static uint16_t adv_iso_stream_handle_get(struct lll_adv_iso_stream *stream);
58 static uint8_t ptc_calc(const struct lll_adv_iso *lll, uint32_t event_spacing,
59 			uint32_t event_spacing_max);
60 static uint32_t adv_iso_time_get(const struct ll_adv_iso_set *adv_iso, bool max);
61 static uint32_t adv_iso_start(struct ll_adv_iso_set *adv_iso,
62 			      uint32_t iso_interval_us);
63 static uint8_t adv_iso_chm_update(uint8_t big_handle);
64 static void adv_iso_chm_complete_commit(struct lll_adv_iso *lll_iso);
65 static void mfy_iso_offset_get(void *param);
66 static void pdu_big_info_chan_map_phy_set(uint8_t *chm_phy, uint8_t *chan_map,
67 					  uint8_t phy);
68 static inline struct pdu_big_info *big_info_get(struct pdu_adv *pdu);
69 static inline void big_info_offset_fill(struct pdu_big_info *bi,
70 					uint32_t ticks_offset,
71 					uint32_t start_us);
72 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
73 		      uint32_t remainder, uint16_t lazy, uint8_t force,
74 		      void *param);
75 static void ticker_op_cb(uint32_t status, void *param);
76 static void ticker_stop_op_cb(uint32_t status, void *param);
77 static void adv_iso_disable(void *param);
78 static void disabled_cb(void *param);
79 static void tx_lll_flush(void *param);
80 
81 static memq_link_t link_lll_prepare;
82 static struct mayfly mfy_lll_prepare = {0U, 0U, &link_lll_prepare, NULL, NULL};
83 
84 static struct ll_adv_iso_set ll_adv_iso[CONFIG_BT_CTLR_ADV_ISO_SET];
85 static struct lll_adv_iso_stream
86 			stream_pool[CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT];
87 static void *stream_free;
88 
big_create(uint8_t big_handle,uint8_t adv_handle,uint8_t num_bis,uint32_t sdu_interval,uint16_t max_sdu,uint16_t max_latency,uint8_t rtn,uint8_t phy,uint8_t packing,uint8_t framing,uint8_t encryption,uint8_t * bcode,uint16_t iso_interval,uint8_t nse,uint16_t max_pdu,uint8_t bn,uint8_t irc,uint8_t pto,bool test_config)89 static uint8_t big_create(uint8_t big_handle, uint8_t adv_handle, uint8_t num_bis,
90 			  uint32_t sdu_interval, uint16_t max_sdu,
91 			  uint16_t max_latency, uint8_t rtn, uint8_t phy,
92 			  uint8_t packing, uint8_t framing, uint8_t encryption,
93 			  uint8_t *bcode,
94 			  uint16_t iso_interval, uint8_t nse, uint16_t max_pdu,
95 			  uint8_t bn, uint8_t irc, uint8_t pto, bool test_config)
96 {
97 	uint8_t bi_ad[PDU_BIG_INFO_ENCRYPTED_SIZE + 2U];
98 	struct lll_adv_sync *lll_adv_sync;
99 	struct lll_adv_iso *lll_adv_iso;
100 	struct ll_adv_iso_set *adv_iso;
101 	struct pdu_adv *pdu_prev, *pdu;
102 	struct pdu_big_info *big_info;
103 	uint32_t ticks_slot_overhead;
104 	struct ll_adv_sync_set *sync;
105 	struct ll_adv_aux_set *aux;
106 	uint32_t event_spacing_max;
107 	uint8_t pdu_big_info_size;
108 	uint32_t iso_interval_us;
109 	uint32_t latency_packing;
110 	uint32_t ticks_slot_sync;
111 	uint32_t ticks_slot_aux;
112 	memq_link_t *link_cmplt;
113 	memq_link_t *link_term;
114 	struct ll_adv_set *adv;
115 	uint32_t slot_overhead;
116 	uint32_t event_spacing;
117 	uint16_t ctrl_spacing;
118 	uint8_t sdu_per_event;
119 	uint8_t ter_idx;
120 	uint32_t ret;
121 	uint8_t err;
122 	int res;
123 
124 	adv_iso = adv_iso_get(big_handle);
125 
126 	/* Already created */
127 	if (!adv_iso || adv_iso->lll.adv) {
128 		return BT_HCI_ERR_CMD_DISALLOWED;
129 	}
130 
131 	/* No advertising set created */
132 	adv = ull_adv_is_created_get(adv_handle);
133 	if (!adv) {
134 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
135 	}
136 
137 	/* Does not identify a periodic advertising train or
138 	 * the periodic advertising trains is already associated
139 	 * with another BIG.
140 	 */
141 	lll_adv_sync = adv->lll.sync;
142 	if (!lll_adv_sync || lll_adv_sync->iso) {
143 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
144 	}
145 
146 	/* Check if encryption supported */
147 	if (!IS_ENABLED(CONFIG_BT_CTLR_BROADCAST_ISO_ENC) &&
148 	    encryption) {
149 		return BT_HCI_ERR_CMD_DISALLOWED;
150 	};
151 
152 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
153 		if (num_bis == 0U || num_bis > 0x1F) {
154 			return BT_HCI_ERR_INVALID_PARAM;
155 		}
156 
157 		if (sdu_interval < 0x000100 || sdu_interval > 0x0FFFFF) {
158 			return BT_HCI_ERR_INVALID_PARAM;
159 		}
160 
161 		if (max_sdu < 0x0001 || max_sdu > 0x0FFF) {
162 			return BT_HCI_ERR_INVALID_PARAM;
163 		}
164 
165 		if (phy > (BT_HCI_LE_EXT_SCAN_PHY_1M |
166 			   BT_HCI_LE_EXT_SCAN_PHY_2M |
167 			   BT_HCI_LE_EXT_SCAN_PHY_CODED)) {
168 			return BT_HCI_ERR_INVALID_PARAM;
169 		}
170 
171 		if (packing > 1U) {
172 			return BT_HCI_ERR_INVALID_PARAM;
173 		}
174 
175 		if (framing > 1U) {
176 			return BT_HCI_ERR_INVALID_PARAM;
177 		}
178 
179 		if (encryption > 1U) {
180 			return BT_HCI_ERR_INVALID_PARAM;
181 		}
182 
183 		if (test_config) {
184 			if (!IN_RANGE(iso_interval, 0x0004, 0x0C80)) {
185 				return BT_HCI_ERR_INVALID_PARAM;
186 			}
187 
188 			if (!IN_RANGE(nse, 0x01, 0x1F)) {
189 				return BT_HCI_ERR_INVALID_PARAM;
190 			}
191 
192 			if (!IN_RANGE(max_pdu, 0x01, MIN(0xFB, LL_BIS_OCTETS_TX_MAX))) {
193 				return BT_HCI_ERR_INVALID_PARAM;
194 			}
195 
196 			if (!IN_RANGE(bn, 0x01, 0x07)) {
197 				return BT_HCI_ERR_INVALID_PARAM;
198 			}
199 
200 			if (!IN_RANGE(irc, 0x01, 0x0F)) {
201 				return BT_HCI_ERR_INVALID_PARAM;
202 			}
203 
204 			/* FIXME: PTO is currently limited to BN */
205 			if (!IN_RANGE(pto, 0x00, bn /*0x0F*/)) {
206 				return BT_HCI_ERR_INVALID_PARAM;
207 			}
208 
209 			if (bn * irc + pto < nse) {
210 				return BT_HCI_ERR_INVALID_PARAM;
211 			}
212 		} else {
213 			if (max_latency > 0x0FA0) {
214 				return BT_HCI_ERR_INVALID_PARAM;
215 			}
216 
217 			if (rtn > 0x0F) {
218 				return BT_HCI_ERR_INVALID_PARAM;
219 			}
220 		}
221 	}
222 
223 	/* Check if free BISes available */
224 	if (mem_free_count_get(stream_free) < num_bis) {
225 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
226 	}
227 
228 	/* Allocate link buffer for created event */
229 	link_cmplt = ll_rx_link_alloc();
230 	if (!link_cmplt) {
231 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
232 	}
233 
234 	/* Allocate link buffer for sync lost event */
235 	link_term = ll_rx_link_alloc();
236 	if (!link_term) {
237 		ll_rx_link_release(link_cmplt);
238 
239 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
240 	}
241 
242 	/* Check if aux context allocated before we are creating ISO */
243 	if (adv->lll.aux) {
244 		aux = HDR_LLL2ULL(adv->lll.aux);
245 	} else {
246 		aux = NULL;
247 	}
248 
249 	/* Calculate overheads due to extended advertising. */
250 	if (aux && aux->is_started) {
251 		ticks_slot_aux = aux->ull.ticks_slot;
252 		if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
253 			ticks_slot_overhead =
254 				MAX(aux->ull.ticks_active_to_start,
255 				    aux->ull.ticks_prepare_to_start);
256 		} else {
257 			ticks_slot_overhead = 0U;
258 		}
259 	} else {
260 		uint32_t time_us;
261 
262 		time_us = PDU_AC_US(PDU_AC_PAYLOAD_SIZE_MAX, adv->lll.phy_s,
263 				    adv->lll.phy_flags) +
264 			  EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
265 		ticks_slot_aux = HAL_TICKER_US_TO_TICKS_CEIL(time_us);
266 		if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
267 			/* Assume primary overheads may be inherited by aux */
268 			ticks_slot_overhead =
269 				MAX(adv->ull.ticks_active_to_start,
270 				    adv->ull.ticks_prepare_to_start);
271 		} else {
272 			ticks_slot_overhead = 0U;
273 		}
274 	}
275 	ticks_slot_aux += ticks_slot_overhead;
276 
277 	/* Calculate overheads due to periodic advertising. */
278 	sync = HDR_LLL2ULL(lll_adv_sync);
279 	if (sync->is_started) {
280 		ticks_slot_sync = sync->ull.ticks_slot;
281 	} else {
282 		uint32_t time_us;
283 
284 		time_us = PDU_AC_US(PDU_AC_PAYLOAD_SIZE_MAX,
285 				    sync->lll.adv->phy_s,
286 				    sync->lll.adv->phy_flags) +
287 			  EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
288 		ticks_slot_sync = HAL_TICKER_US_TO_TICKS_CEIL(time_us);
289 	}
290 
291 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
292 		ticks_slot_overhead = MAX(sync->ull.ticks_active_to_start,
293 					  sync->ull.ticks_prepare_to_start);
294 	} else {
295 		ticks_slot_overhead = 0U;
296 	}
297 
298 	ticks_slot_sync += ticks_slot_overhead;
299 
300 	/* Calculate total overheads due to extended and periodic advertising */
301 	if (false) {
302 
303 #if defined(CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET)
304 	} else if (CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET > 0U) {
305 		ticks_slot_overhead = MAX(ticks_slot_aux, ticks_slot_sync);
306 #endif /* CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET */
307 
308 	} else {
309 		ticks_slot_overhead = ticks_slot_aux + ticks_slot_sync;
310 	}
311 
312 	/* Store parameters in LLL context */
313 	/* TODO: Move parameters to ULL if only accessed by ULL */
314 	lll_adv_iso = &adv_iso->lll;
315 	lll_adv_iso->handle = big_handle;
316 	lll_adv_iso->phy = phy;
317 	lll_adv_iso->phy_flags = PHY_FLAGS_S8;
318 
319 	/* Mandatory Num_BIS = 1 */
320 	lll_adv_iso->num_bis = num_bis;
321 
322 	/* Allocate streams */
323 	for (uint8_t i = 0U; i < num_bis; i++) {
324 		struct lll_adv_iso_stream *stream;
325 
326 		stream = (void *)adv_iso_stream_acquire();
327 		stream->big_handle = big_handle;
328 		stream->dp = NULL;
329 
330 		if (!stream->link_tx_free) {
331 			stream->link_tx_free = &stream->link_tx;
332 		}
333 		memq_init(stream->link_tx_free, &stream->memq_tx.head,
334 			  &stream->memq_tx.tail);
335 		stream->link_tx_free = NULL;
336 
337 		stream->pkt_seq_num = 0U;
338 
339 		lll_adv_iso->stream_handle[i] =
340 			adv_iso_stream_handle_get(stream);
341 	}
342 
343 	if (test_config) {
344 		lll_adv_iso->bn = bn;
345 		lll_adv_iso->iso_interval = iso_interval;
346 		lll_adv_iso->irc = irc;
347 		lll_adv_iso->nse = nse;
348 		lll_adv_iso->max_pdu = max_pdu;
349 		iso_interval_us = iso_interval * PERIODIC_INT_UNIT_US;
350 
351 	} else {
352 		if (framing) {
353 			/* Try to allocate room for one SDU + header */
354 			lll_adv_iso->max_pdu = MIN(LL_BIS_OCTETS_TX_MAX,
355 						   max_sdu + PDU_ISO_SEG_HDR_SIZE +
356 						    PDU_ISO_SEG_TIMEOFFSET_SIZE);
357 		} else {
358 			lll_adv_iso->max_pdu = MIN(LL_BIS_OCTETS_TX_MAX, max_sdu);
359 		}
360 
361 		/* FIXME: SDU per max latency */
362 		sdu_per_event = MAX((max_latency * USEC_PER_MSEC / sdu_interval), 2U) -
363 				1U;
364 
365 		/* BN (Burst Count), Mandatory BN = 1 */
366 		bn = DIV_ROUND_UP(max_sdu, lll_adv_iso->max_pdu) * sdu_per_event;
367 		if (bn > PDU_BIG_BN_MAX) {
368 			/* Restrict each BIG event to maximum burst per BIG event */
369 			lll_adv_iso->bn = PDU_BIG_BN_MAX;
370 
371 			/* Ceil the required burst count per SDU to next maximum burst
372 			 * per BIG event.
373 			 */
374 			bn = DIV_ROUND_UP(bn, PDU_BIG_BN_MAX) * PDU_BIG_BN_MAX;
375 		} else {
376 			lll_adv_iso->bn = bn;
377 		}
378 
379 		/* Calculate ISO interval */
380 		/* iso_interval shall be at least SDU interval,
381 		 * or integer multiple of SDU interval for unframed PDUs
382 		 */
383 		iso_interval_us = ((sdu_interval * lll_adv_iso->bn * sdu_per_event) /
384 				(bn * PERIODIC_INT_UNIT_US)) * PERIODIC_INT_UNIT_US;
385 		lll_adv_iso->iso_interval = iso_interval_us / PERIODIC_INT_UNIT_US;
386 	}
387 
388 	/* Calculate max available ISO event spacing */
389 	slot_overhead = HAL_TICKER_TICKS_TO_US(ticks_slot_overhead);
390 	if (slot_overhead < iso_interval_us) {
391 		event_spacing_max = iso_interval_us - slot_overhead;
392 	} else {
393 		event_spacing_max = 0U;
394 	}
395 
396 	/* Negotiate event spacing */
397 	do {
398 		if (!test_config) {
399 			/* Immediate Repetition Count (IRC), Mandatory IRC = 1 */
400 			lll_adv_iso->irc = rtn + 1U;
401 
402 			/* Calculate NSE (No. of Sub Events), Mandatory NSE = 1,
403 			 * without PTO added.
404 			 */
405 			lll_adv_iso->nse = lll_adv_iso->bn * lll_adv_iso->irc;
406 		}
407 
408 		/* NOTE: Calculate sub_interval, if interleaved then it is Num_BIS x
409 		 *       BIS_Spacing (by BT Spec.)
410 		 *       else if sequential, then by our implementation, lets keep it
411 		 *       max_tx_time for Max_PDU + tMSS.
412 		 */
413 		lll_adv_iso->sub_interval = PDU_BIS_US(lll_adv_iso->max_pdu, encryption,
414 						phy, lll_adv_iso->phy_flags) +
415 						EVENT_MSS_US;
416 		ctrl_spacing = PDU_BIS_US(sizeof(struct pdu_big_ctrl), encryption, phy,
417 					lll_adv_iso->phy_flags);
418 		latency_packing = lll_adv_iso->sub_interval * lll_adv_iso->nse *
419 					lll_adv_iso->num_bis;
420 		event_spacing = latency_packing + ctrl_spacing +
421 				EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
422 
423 		/* Check if ISO interval too small to fit the calculated BIG event
424 		 * timing required for the supplied BIG create parameters.
425 		 */
426 		if (event_spacing > event_spacing_max) {
427 			/* Check if we can reduce RTN to meet eventing spacing */
428 			if (!test_config && rtn) {
429 				rtn--;
430 			} else {
431 				break;
432 			}
433 		}
434 	} while (event_spacing > event_spacing_max);
435 
436 	/* Check if ISO interval too small to fit the calculated BIG event
437 	 * timing required for the supplied BIG create parameters.
438 	 */
439 	if (event_spacing > event_spacing_max) {
440 		/* Release allocated link buffers */
441 		ll_rx_link_release(link_cmplt);
442 		ll_rx_link_release(link_term);
443 
444 		return BT_HCI_ERR_INVALID_PARAM;
445 	}
446 
447 	lll_adv_iso->ptc = ptc_calc(lll_adv_iso, event_spacing, event_spacing_max);
448 
449 	if (test_config) {
450 		lll_adv_iso->pto = pto;
451 
452 		if (pto && !lll_adv_iso->ptc) {
453 			return BT_HCI_ERR_INVALID_PARAM;
454 		}
455 	} else {
456 		/* Pre-Transmission Offset (PTO) */
457 		if (lll_adv_iso->ptc) {
458 			lll_adv_iso->pto = bn / lll_adv_iso->bn;
459 		} else {
460 			lll_adv_iso->pto = 0U;
461 		}
462 
463 		/* Make room for pre-transmissions */
464 		lll_adv_iso->nse += lll_adv_iso->ptc;
465 	}
466 
467 	/* Based on packing requested, sequential or interleaved */
468 	if (packing) {
469 		/* Interleaved Packing */
470 		lll_adv_iso->bis_spacing = lll_adv_iso->sub_interval;
471 		lll_adv_iso->sub_interval = lll_adv_iso->bis_spacing *
472 					lll_adv_iso->nse;
473 	} else {
474 		/* Sequential Packing */
475 		lll_adv_iso->bis_spacing = lll_adv_iso->sub_interval *
476 					lll_adv_iso->nse;
477 	}
478 
479 	/* TODO: Group count, GC = NSE / BN; PTO = GC - IRC;
480 	 *       Is this required?
481 	 */
482 
483 	lll_adv_iso->sdu_interval = sdu_interval;
484 	lll_adv_iso->max_sdu = max_sdu;
485 
486 	res = util_saa_le32(lll_adv_iso->seed_access_addr, big_handle);
487 	LL_ASSERT(!res);
488 
489 	(void)lll_csrand_get(lll_adv_iso->base_crc_init,
490 			     sizeof(lll_adv_iso->base_crc_init));
491 	lll_adv_iso->data_chan_count =
492 		ull_chan_map_get(lll_adv_iso->data_chan_map);
493 	lll_adv_iso->payload_count = 0U;
494 	lll_adv_iso->latency_prepare = 0U;
495 	lll_adv_iso->latency_event = 0U;
496 	lll_adv_iso->term_req = 0U;
497 	lll_adv_iso->term_ack = 0U;
498 	lll_adv_iso->chm_req = 0U;
499 	lll_adv_iso->chm_ack = 0U;
500 	lll_adv_iso->ctrl_expire = 0U;
501 
502 	/* TODO: framing support */
503 	lll_adv_iso->framing = framing;
504 
505 	/* Allocate next PDU */
506 	err = ull_adv_sync_pdu_alloc(adv, ULL_ADV_PDU_EXTRA_DATA_ALLOC_IF_EXIST,
507 				     &pdu_prev, &pdu, NULL, NULL, &ter_idx);
508 	if (err) {
509 		/* Insufficient Advertising PDU buffers to allocate new PDU
510 		 * to add BIGInfo into the ACAD of the Periodic Advertising.
511 		 */
512 
513 		/* Release allocated link buffers */
514 		ll_rx_link_release(link_cmplt);
515 		ll_rx_link_release(link_term);
516 
517 		return err;
518 	}
519 
520 	/* Add ACAD to AUX_SYNC_IND */
521 	if (encryption) {
522 		pdu_big_info_size = PDU_BIG_INFO_ENCRYPTED_SIZE;
523 	} else {
524 		pdu_big_info_size = PDU_BIG_INFO_CLEARTEXT_SIZE;
525 	}
526 	bi_ad[PDU_ADV_DATA_HEADER_LEN_OFFSET] = pdu_big_info_size + (PDU_ADV_DATA_HEADER_SIZE -
527 						PDU_ADV_DATA_HEADER_LEN_SIZE);
528 	bi_ad[PDU_ADV_DATA_HEADER_TYPE_OFFSET] = BT_DATA_BIG_INFO;
529 	big_info = (void *)&bi_ad[PDU_ADV_DATA_HEADER_DATA_OFFSET];
530 
531 	/* big_info->offset, big_info->offset_units and
532 	 * big_info->payload_count_framing[] will be filled by periodic
533 	 * advertising event.
534 	 */
535 
536 	PDU_BIG_INFO_ISO_INTERVAL_SET(big_info, iso_interval_us / PERIODIC_INT_UNIT_US);
537 	PDU_BIG_INFO_NUM_BIS_SET(big_info, lll_adv_iso->num_bis);
538 	PDU_BIG_INFO_NSE_SET(big_info, lll_adv_iso->nse);
539 	PDU_BIG_INFO_BN_SET(big_info, lll_adv_iso->bn);
540 	PDU_BIG_INFO_SUB_INTERVAL_SET(big_info, lll_adv_iso->sub_interval);
541 	PDU_BIG_INFO_PTO_SET(big_info, lll_adv_iso->pto);
542 	PDU_BIG_INFO_SPACING_SET(big_info, lll_adv_iso->bis_spacing);
543 	PDU_BIG_INFO_IRC_SET(big_info, lll_adv_iso->irc);
544 
545 	big_info->max_pdu = lll_adv_iso->max_pdu;
546 	big_info->rfu = 0U;
547 
548 	(void)memcpy(&big_info->seed_access_addr, lll_adv_iso->seed_access_addr,
549 		     sizeof(big_info->seed_access_addr));
550 	PDU_BIG_INFO_SDU_INTERVAL_SET(big_info, sdu_interval);
551 	PDU_BIG_INFO_MAX_SDU_SET(big_info, max_sdu);
552 	(void)memcpy(&big_info->base_crc_init, lll_adv_iso->base_crc_init,
553 		     sizeof(big_info->base_crc_init));
554 	pdu_big_info_chan_map_phy_set(big_info->chm_phy,
555 				      lll_adv_iso->data_chan_map,
556 				      phy);
557 	/* Assign the 39-bit payload count, and 1-bit framing */
558 	big_info->payload_count_framing[0] = lll_adv_iso->payload_count;
559 	big_info->payload_count_framing[1] = lll_adv_iso->payload_count >> 8;
560 	big_info->payload_count_framing[2] = lll_adv_iso->payload_count >> 16;
561 	big_info->payload_count_framing[3] = lll_adv_iso->payload_count >> 24;
562 	big_info->payload_count_framing[4] = lll_adv_iso->payload_count >> 32;
563 	big_info->payload_count_framing[4] &= ~BIT(7);
564 	big_info->payload_count_framing[4] |= ((framing & 0x01) << 7);
565 
566 	if (IS_ENABLED(CONFIG_BT_CTLR_BROADCAST_ISO_ENC) && encryption) {
567 		const uint8_t BIG1[16] = {0x31, 0x47, 0x49, 0x42, };
568 		const uint8_t BIG2[4]  = {0x32, 0x47, 0x49, 0x42};
569 		const uint8_t BIG3[4]  = {0x33, 0x47, 0x49, 0x42};
570 		struct ccm *ccm_tx;
571 		uint8_t igltk[16];
572 		uint8_t gltk[16];
573 		uint8_t gsk[16];
574 
575 		/* Fill GIV and GSKD */
576 		(void)lll_csrand_get(lll_adv_iso->giv,
577 				     sizeof(lll_adv_iso->giv));
578 		(void)memcpy(big_info->giv, lll_adv_iso->giv,
579 			     sizeof(big_info->giv));
580 		(void)lll_csrand_get(big_info->gskd, sizeof(big_info->gskd));
581 
582 		/* Calculate GSK */
583 		err = bt_crypto_h7(BIG1, bcode, igltk);
584 		LL_ASSERT(!err);
585 		err = bt_crypto_h6(igltk, BIG2, gltk);
586 		LL_ASSERT(!err);
587 		err = bt_crypto_h8(gltk, big_info->gskd, BIG3, gsk);
588 		LL_ASSERT(!err);
589 
590 		/* Prepare the CCM parameters */
591 		ccm_tx = &lll_adv_iso->ccm_tx;
592 		ccm_tx->direction = 1U;
593 		(void)memcpy(&ccm_tx->iv[4], &lll_adv_iso->giv[4], 4U);
594 		(void)mem_rcopy(ccm_tx->key, gsk, sizeof(ccm_tx->key));
595 
596 		/* NOTE: counter is filled in LLL */
597 
598 		lll_adv_iso->enc = 1U;
599 	} else {
600 		lll_adv_iso->enc = 0U;
601 	}
602 
603 	err = ull_adv_sync_add_to_acad(lll_adv_sync, pdu_prev, pdu, bi_ad,
604 				       pdu_big_info_size + PDU_ADV_DATA_HEADER_SIZE);
605 	if (err) {
606 		/* Failed to add BIGInfo into the ACAD of the Periodic
607 		 * Advertising.
608 		 */
609 
610 		/* Release allocated link buffers */
611 		ll_rx_link_release(link_cmplt);
612 		ll_rx_link_release(link_term);
613 
614 		return err;
615 	}
616 
617 	/* Associate the ISO instance with an Extended Advertising instance */
618 	lll_adv_iso->adv = &adv->lll;
619 
620 	/* Store the link buffer for ISO create and terminate complete event */
621 	adv_iso->node_rx_complete.hdr.link = link_cmplt;
622 	adv_iso->node_rx_terminate.rx.hdr.link = link_term;
623 
624 	/* Initialise LLL header members */
625 	lll_hdr_init(lll_adv_iso, adv_iso);
626 
627 	/* Start sending BIS empty data packet for each BIS */
628 	ret = adv_iso_start(adv_iso, iso_interval_us);
629 	if (ret) {
630 		/* Failed to schedule BIG events */
631 
632 		/* Reset the association of ISO instance with the Extended
633 		 * Advertising Instance
634 		 */
635 		lll_adv_iso->adv = NULL;
636 
637 		/* Release allocated link buffers */
638 		ll_rx_link_release(link_cmplt);
639 		ll_rx_link_release(link_term);
640 
641 		return BT_HCI_ERR_CMD_DISALLOWED;
642 	}
643 
644 	/* Associate the ISO instance with a Periodic Advertising */
645 	lll_adv_sync->iso = lll_adv_iso;
646 
647 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
648 	/* Notify the sync instance */
649 	ull_adv_sync_iso_created(HDR_LLL2ULL(lll_adv_sync));
650 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
651 
652 	/* Commit the BIGInfo in the ACAD field of Periodic Advertising */
653 	lll_adv_sync_data_enqueue(lll_adv_sync, ter_idx);
654 
655 	return BT_HCI_ERR_SUCCESS;
656 }
657 
ll_big_create(uint8_t big_handle,uint8_t adv_handle,uint8_t num_bis,uint32_t sdu_interval,uint16_t max_sdu,uint16_t max_latency,uint8_t rtn,uint8_t phy,uint8_t packing,uint8_t framing,uint8_t encryption,uint8_t * bcode)658 uint8_t ll_big_create(uint8_t big_handle, uint8_t adv_handle, uint8_t num_bis,
659 		      uint32_t sdu_interval, uint16_t max_sdu,
660 		      uint16_t max_latency, uint8_t rtn, uint8_t phy,
661 		      uint8_t packing, uint8_t framing, uint8_t encryption,
662 		      uint8_t *bcode)
663 {
664 	return big_create(big_handle, adv_handle, num_bis, sdu_interval, max_sdu,
665 			  max_latency, rtn, phy, packing, framing, encryption, bcode,
666 			  0 /*iso_interval*/,
667 			  0 /*nse*/,
668 			  0 /*max_pdu*/,
669 			  0 /*bn*/,
670 			  0 /*irc*/,
671 			  0 /*pto*/,
672 			  false);
673 }
674 
ll_big_test_create(uint8_t big_handle,uint8_t adv_handle,uint8_t num_bis,uint32_t sdu_interval,uint16_t iso_interval,uint8_t nse,uint16_t max_sdu,uint16_t max_pdu,uint8_t phy,uint8_t packing,uint8_t framing,uint8_t bn,uint8_t irc,uint8_t pto,uint8_t encryption,uint8_t * bcode)675 uint8_t ll_big_test_create(uint8_t big_handle, uint8_t adv_handle,
676 			   uint8_t num_bis, uint32_t sdu_interval,
677 			   uint16_t iso_interval, uint8_t nse, uint16_t max_sdu,
678 			   uint16_t max_pdu, uint8_t phy, uint8_t packing,
679 			   uint8_t framing, uint8_t bn, uint8_t irc,
680 			   uint8_t pto, uint8_t encryption, uint8_t *bcode)
681 {
682 	return big_create(big_handle, adv_handle, num_bis, sdu_interval, max_sdu,
683 			  0 /*max_latency*/,
684 			  0 /*rtn*/,
685 			  phy, packing, framing, encryption, bcode,
686 			  iso_interval, nse, max_pdu, bn, irc, pto, true);
687 }
688 
ll_big_terminate(uint8_t big_handle,uint8_t reason)689 uint8_t ll_big_terminate(uint8_t big_handle, uint8_t reason)
690 {
691 	struct lll_adv_sync *lll_adv_sync;
692 	struct lll_adv_iso *lll_adv_iso;
693 	struct ll_adv_iso_set *adv_iso;
694 	struct pdu_adv *pdu_prev, *pdu;
695 	struct node_rx_pdu *node_rx;
696 	struct lll_adv *lll_adv;
697 	struct ll_adv_set *adv;
698 	uint16_t stream_handle;
699 	uint16_t handle;
700 	uint8_t num_bis;
701 	uint8_t ter_idx;
702 	uint8_t err;
703 
704 	adv_iso = adv_iso_get(big_handle);
705 	if (!adv_iso) {
706 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
707 	}
708 
709 	lll_adv_iso = &adv_iso->lll;
710 	lll_adv = lll_adv_iso->adv;
711 	if (!lll_adv) {
712 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
713 	}
714 
715 	if (lll_adv_iso->term_req) {
716 		return BT_HCI_ERR_CMD_DISALLOWED;
717 	}
718 
719 	/* Remove ISO data path, keeping data from entering Tx pipeline */
720 	num_bis = lll_adv_iso->num_bis;
721 	while (num_bis--) {
722 		stream_handle = lll_adv_iso->stream_handle[num_bis];
723 		handle = LL_BIS_ADV_HANDLE_FROM_IDX(stream_handle);
724 		(void)ll_remove_iso_path(handle,
725 					 BIT(BT_HCI_DATAPATH_DIR_HOST_TO_CTLR));
726 	}
727 
728 	lll_adv_sync = lll_adv->sync;
729 	adv = HDR_LLL2ULL(lll_adv);
730 
731 	/* Allocate next PDU */
732 	err = ull_adv_sync_pdu_alloc(adv, ULL_ADV_PDU_EXTRA_DATA_ALLOC_IF_EXIST,
733 				     &pdu_prev, &pdu, NULL, NULL, &ter_idx);
734 	if (err) {
735 		return err;
736 	}
737 
738 	/* Remove BigInfo from ACAD in AUX_SYNC_IND */
739 	err = ull_adv_sync_remove_from_acad(lll_adv_sync, pdu_prev, pdu, BT_DATA_BIG_INFO);
740 	if (err) {
741 		return err;
742 	}
743 
744 	lll_adv_sync_data_enqueue(lll_adv_sync, ter_idx);
745 
746 	/* Prepare BIG terminate event, will be enqueued after tx flush  */
747 	node_rx = (void *)&adv_iso->node_rx_terminate;
748 	node_rx->hdr.type = NODE_RX_TYPE_BIG_TERMINATE;
749 	node_rx->hdr.handle = big_handle;
750 	node_rx->rx_ftr.param = adv_iso;
751 
752 	if (reason == BT_HCI_ERR_REMOTE_USER_TERM_CONN) {
753 		*((uint8_t *)node_rx->pdu) = BT_HCI_ERR_LOCALHOST_TERM_CONN;
754 	} else {
755 		*((uint8_t *)node_rx->pdu) = reason;
756 	}
757 
758 	/* Request terminate procedure */
759 	lll_adv_iso->term_reason = reason;
760 	lll_adv_iso->term_req = 1U;
761 
762 	return BT_HCI_ERR_SUCCESS;
763 }
764 
ull_adv_iso_init(void)765 int ull_adv_iso_init(void)
766 {
767 	int err;
768 
769 	err = init_reset();
770 	if (err) {
771 		return err;
772 	}
773 
774 	return 0;
775 }
776 
ull_adv_iso_reset(void)777 int ull_adv_iso_reset(void)
778 {
779 	uint8_t handle;
780 	int err;
781 
782 	handle = CONFIG_BT_CTLR_ADV_ISO_SET;
783 	while (handle--) {
784 		struct lll_adv_sync *adv_sync_lll;
785 		struct lll_adv_iso *adv_iso_lll;
786 		struct ll_adv_iso_set *adv_iso;
787 		volatile uint32_t ret_cb;
788 		struct lll_adv *adv_lll;
789 		uint32_t ret;
790 		void *mark;
791 
792 		adv_iso = &ll_adv_iso[handle];
793 		adv_iso_lll = &adv_iso->lll;
794 		adv_lll = adv_iso_lll->adv;
795 		if (!adv_lll) {
796 			continue;
797 		}
798 
799 		mark = ull_disable_mark(adv_iso);
800 		LL_ASSERT(mark == adv_iso);
801 
802 		/* Stop event scheduling */
803 		ret_cb = TICKER_STATUS_BUSY;
804 		ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
805 				  TICKER_ID_ADV_ISO_BASE + adv_iso_lll->handle,
806 				  ull_ticker_status_give, (void *)&ret_cb);
807 		ret = ull_ticker_status_take(ret, &ret_cb);
808 		if (ret) {
809 			mark = ull_disable_unmark(adv_iso);
810 			LL_ASSERT(mark == adv_iso);
811 
812 			/* Assert as there shall be a ticker instance active */
813 			LL_ASSERT(false);
814 
815 			return BT_HCI_ERR_CMD_DISALLOWED;
816 		}
817 
818 		/* Abort any events in LLL pipeline */
819 		err = ull_disable(adv_iso_lll);
820 		LL_ASSERT(!err || (err == -EALREADY));
821 
822 		mark = ull_disable_unmark(adv_iso);
823 		LL_ASSERT(mark == adv_iso);
824 
825 		/* Reset associated streams */
826 		while (adv_iso_lll->num_bis--) {
827 			struct lll_adv_iso_stream *stream;
828 			uint16_t stream_handle;
829 
830 			stream_handle = adv_iso_lll->stream_handle[adv_iso_lll->num_bis];
831 			stream = ull_adv_iso_stream_get(stream_handle);
832 			if (stream) {
833 				stream->link_tx_free = NULL;
834 			}
835 		}
836 
837 		/* Remove Periodic Advertising association */
838 		adv_sync_lll = adv_lll->sync;
839 		if (adv_sync_lll) {
840 			adv_sync_lll->iso = NULL;
841 		}
842 
843 		/* Remove Extended Advertising association */
844 		adv_iso_lll->adv = NULL;
845 	}
846 
847 	err = init_reset();
848 	if (err) {
849 		return err;
850 	}
851 
852 	return 0;
853 }
854 
ull_adv_iso_get(uint8_t handle)855 struct ll_adv_iso_set *ull_adv_iso_get(uint8_t handle)
856 {
857 	return adv_iso_get(handle);
858 }
859 
ull_adv_iso_chm_update(void)860 uint8_t ull_adv_iso_chm_update(void)
861 {
862 	uint8_t handle;
863 
864 	handle = CONFIG_BT_CTLR_ADV_ISO_SET;
865 	while (handle--) {
866 		(void)adv_iso_chm_update(handle);
867 	}
868 
869 	/* TODO: Should failure due to Channel Map Update being already in
870 	 *       progress be returned to caller?
871 	 */
872 	return 0;
873 }
874 
ull_adv_iso_chm_complete(struct node_rx_pdu * rx)875 void ull_adv_iso_chm_complete(struct node_rx_pdu *rx)
876 {
877 	struct lll_adv_sync *sync_lll;
878 	struct lll_adv_iso *iso_lll;
879 	struct lll_adv *adv_lll;
880 
881 	iso_lll = rx->rx_ftr.param;
882 	adv_lll = iso_lll->adv;
883 	sync_lll = adv_lll->sync;
884 
885 	/* Update Channel Map in BIGInfo in the Periodic Advertising PDU */
886 	while (sync_lll->iso_chm_done_req != sync_lll->iso_chm_done_ack) {
887 		sync_lll->iso_chm_done_ack = sync_lll->iso_chm_done_req;
888 
889 		adv_iso_chm_complete_commit(iso_lll);
890 	}
891 }
892 
893 #if defined(CONFIG_BT_CTLR_HCI_ADV_HANDLE_MAPPING)
ll_adv_iso_by_hci_handle_get(uint8_t hci_handle,uint8_t * handle)894 uint8_t ll_adv_iso_by_hci_handle_get(uint8_t hci_handle, uint8_t *handle)
895 {
896 	struct ll_adv_iso_set *adv_iso;
897 	uint8_t idx;
898 
899 	adv_iso =  &ll_adv_iso[0];
900 
901 	for (idx = 0U; idx < CONFIG_BT_CTLR_ADV_ISO_SET; idx++, adv_iso++) {
902 		if (adv_iso->lll.adv &&
903 		    (adv_iso->hci_handle == hci_handle)) {
904 			*handle = idx;
905 			return 0U;
906 		}
907 	}
908 
909 	return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
910 }
911 
ll_adv_iso_by_hci_handle_new(uint8_t hci_handle,uint8_t * handle)912 uint8_t ll_adv_iso_by_hci_handle_new(uint8_t hci_handle, uint8_t *handle)
913 {
914 	struct ll_adv_iso_set *adv_iso, *adv_iso_empty;
915 	uint8_t idx;
916 
917 	adv_iso = &ll_adv_iso[0];
918 	adv_iso_empty = NULL;
919 
920 	for (idx = 0U; idx < CONFIG_BT_CTLR_ADV_ISO_SET; idx++, adv_iso++) {
921 		if (adv_iso->lll.adv) {
922 			if (adv_iso->hci_handle == hci_handle) {
923 				return BT_HCI_ERR_CMD_DISALLOWED;
924 			}
925 		} else if (!adv_iso_empty) {
926 			adv_iso_empty = adv_iso;
927 			*handle = idx;
928 		}
929 	}
930 
931 	if (adv_iso_empty) {
932 		memset(adv_iso_empty, 0U, sizeof(*adv_iso_empty));
933 		adv_iso_empty->hci_handle = hci_handle;
934 		return 0U;
935 	}
936 
937 	return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
938 }
939 #endif /* CONFIG_BT_CTLR_HCI_ADV_HANDLE_MAPPING */
940 
ull_adv_iso_offset_get(struct ll_adv_sync_set * sync)941 void ull_adv_iso_offset_get(struct ll_adv_sync_set *sync)
942 {
943 	static memq_link_t link;
944 	static struct mayfly mfy = {0U, 0U, &link, NULL, mfy_iso_offset_get};
945 	uint32_t ret;
946 
947 	mfy.param = sync;
948 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1,
949 			     &mfy);
950 	LL_ASSERT(!ret);
951 }
952 
953 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
ull_adv_iso_lll_biginfo_fill(struct pdu_adv * pdu,struct lll_adv_sync * lll_sync)954 void ull_adv_iso_lll_biginfo_fill(struct pdu_adv *pdu, struct lll_adv_sync *lll_sync)
955 {
956 	struct lll_adv_iso *lll_iso;
957 	uint16_t latency_prepare;
958 	struct pdu_big_info *bi;
959 	uint64_t payload_count;
960 
961 	lll_iso = lll_sync->iso;
962 
963 	/* Calculate current payload count. If refcount is non-zero, we have called
964 	 * prepare and the LLL implementation has incremented latency_prepare already.
965 	 * In this case we need to subtract lazy + 1 from latency_prepare
966 	 */
967 	latency_prepare = lll_iso->latency_prepare;
968 	if (ull_ref_get(HDR_LLL2ULL(lll_iso))) {
969 		/* We are in post-prepare. latency_prepare is already
970 		 * incremented by lazy + 1 for next event
971 		 */
972 		latency_prepare -= lll_iso->iso_lazy + 1;
973 	}
974 
975 	payload_count = lll_iso->payload_count + ((latency_prepare +
976 						   lll_iso->iso_lazy) * lll_iso->bn);
977 
978 	bi = big_info_get(pdu);
979 	big_info_offset_fill(bi, lll_iso->ticks_sync_pdu_offset, 0U);
980 	/* Assign the 39-bit payload count, retaining the 1 MS bit framing value */
981 	bi->payload_count_framing[0] = payload_count;
982 	bi->payload_count_framing[1] = payload_count >> 8;
983 	bi->payload_count_framing[2] = payload_count >> 16;
984 	bi->payload_count_framing[3] = payload_count >> 24;
985 	bi->payload_count_framing[4] &= ~0x7F;
986 	bi->payload_count_framing[4] |= (payload_count >> 32) & 0x7F;
987 
988 	/* Update Channel Map in the BIGInfo until Thread context gets a
989 	 * chance to update the PDU with new Channel Map.
990 	 */
991 	if (lll_sync->iso_chm_done_req != lll_sync->iso_chm_done_ack) {
992 		pdu_big_info_chan_map_phy_set(bi->chm_phy,
993 					      lll_iso->data_chan_map,
994 					      lll_iso->phy);
995 	}
996 }
997 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
998 
ull_adv_iso_done_complete(struct node_rx_event_done * done)999 void ull_adv_iso_done_complete(struct node_rx_event_done *done)
1000 {
1001 	struct ll_adv_iso_set *adv_iso;
1002 	struct lll_adv_iso *lll;
1003 	struct node_rx_pdu *rx;
1004 	memq_link_t *link;
1005 
1006 	/* switch to normal prepare */
1007 	mfy_lll_prepare.fp = lll_adv_iso_prepare;
1008 
1009 	/* Get reference to ULL context */
1010 	adv_iso = CONTAINER_OF(done->param, struct ll_adv_iso_set, ull);
1011 	lll = &adv_iso->lll;
1012 
1013 	/* Prepare BIG complete event */
1014 	rx = (void *)&adv_iso->node_rx_complete;
1015 	link = rx->hdr.link;
1016 	if (!link) {
1017 		/* NOTE: When BIS events have overlapping prepare placed in
1018 		 *       in the pipeline, more than one done complete event
1019 		 *       will be generated, lets ignore the additional done
1020 		 *       events.
1021 		 */
1022 		return;
1023 	}
1024 	rx->hdr.link = NULL;
1025 
1026 	rx->hdr.type = NODE_RX_TYPE_BIG_COMPLETE;
1027 	rx->hdr.handle = lll->handle;
1028 	rx->rx_ftr.param = adv_iso;
1029 
1030 	ll_rx_put_sched(link, rx);
1031 }
1032 
ull_adv_iso_done_terminate(struct node_rx_event_done * done)1033 void ull_adv_iso_done_terminate(struct node_rx_event_done *done)
1034 {
1035 	struct ll_adv_iso_set *adv_iso;
1036 	struct lll_adv_iso *lll;
1037 	uint32_t ret;
1038 
1039 	/* Get reference to ULL context */
1040 	adv_iso = CONTAINER_OF(done->param, struct ll_adv_iso_set, ull);
1041 	lll = &adv_iso->lll;
1042 
1043 	/* Skip if terminated already (we come here if pipeline being flushed */
1044 	if (unlikely(lll->handle == LLL_ADV_HANDLE_INVALID)) {
1045 		return;
1046 	}
1047 
1048 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
1049 			  (TICKER_ID_ADV_ISO_BASE + lll->handle),
1050 			  ticker_stop_op_cb, adv_iso);
1051 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1052 		  (ret == TICKER_STATUS_BUSY));
1053 
1054 	/* Invalidate the handle */
1055 	lll->handle = LLL_ADV_HANDLE_INVALID;
1056 }
1057 
ull_adv_iso_by_stream_get(uint16_t handle)1058 struct ll_adv_iso_set *ull_adv_iso_by_stream_get(uint16_t handle)
1059 {
1060 	if (handle >= CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT) {
1061 		return NULL;
1062 	}
1063 
1064 	return adv_iso_get(stream_pool[handle].big_handle);
1065 }
1066 
ull_adv_iso_stream_get(uint16_t handle)1067 struct lll_adv_iso_stream *ull_adv_iso_stream_get(uint16_t handle)
1068 {
1069 	if (handle >= CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT) {
1070 		return NULL;
1071 	}
1072 
1073 	return &stream_pool[handle];
1074 }
1075 
ull_adv_iso_lll_stream_get(uint16_t handle)1076 struct lll_adv_iso_stream *ull_adv_iso_lll_stream_get(uint16_t handle)
1077 {
1078 	return ull_adv_iso_stream_get(handle);
1079 }
1080 
ull_adv_iso_stream_release(struct ll_adv_iso_set * adv_iso)1081 void ull_adv_iso_stream_release(struct ll_adv_iso_set *adv_iso)
1082 {
1083 	struct lll_adv_iso *lll;
1084 
1085 	lll = &adv_iso->lll;
1086 	while (lll->num_bis--) {
1087 		struct lll_adv_iso_stream *stream;
1088 		struct ll_iso_datapath *dp;
1089 		uint16_t stream_handle;
1090 		memq_link_t *link;
1091 
1092 		stream_handle = lll->stream_handle[lll->num_bis];
1093 		stream = ull_adv_iso_stream_get(stream_handle);
1094 
1095 		LL_ASSERT(!stream->link_tx_free);
1096 		link = memq_deinit(&stream->memq_tx.head,
1097 				   &stream->memq_tx.tail);
1098 		LL_ASSERT(link);
1099 		stream->link_tx_free = link;
1100 
1101 		dp = stream->dp;
1102 		if (dp) {
1103 			stream->dp = NULL;
1104 			isoal_source_destroy(dp->source_hdl);
1105 			ull_iso_datapath_release(dp);
1106 		}
1107 
1108 		mem_release(stream, &stream_free);
1109 	}
1110 
1111 	/* Remove Periodic Advertising association */
1112 	lll->adv->sync->iso = NULL;
1113 
1114 	/* Remove Extended Advertising association */
1115 	lll->adv = NULL;
1116 }
1117 
ull_adv_iso_max_time_get(const struct ll_adv_iso_set * adv_iso)1118 uint32_t ull_adv_iso_max_time_get(const struct ll_adv_iso_set *adv_iso)
1119 {
1120 	return adv_iso_time_get(adv_iso, true);
1121 }
1122 
init_reset(void)1123 static int init_reset(void)
1124 {
1125 	/* Add initializations common to power up initialization and HCI reset
1126 	 * initializations.
1127 	 */
1128 
1129 	mem_init((void *)stream_pool, sizeof(struct lll_adv_iso_stream),
1130 		 CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT, &stream_free);
1131 
1132 	return 0;
1133 }
1134 
adv_iso_get(uint8_t handle)1135 static struct ll_adv_iso_set *adv_iso_get(uint8_t handle)
1136 {
1137 	if (handle >= CONFIG_BT_CTLR_ADV_ISO_SET) {
1138 		return NULL;
1139 	}
1140 
1141 	return &ll_adv_iso[handle];
1142 }
1143 
adv_iso_stream_acquire(void)1144 static struct stream *adv_iso_stream_acquire(void)
1145 {
1146 	return mem_acquire(&stream_free);
1147 }
1148 
adv_iso_stream_handle_get(struct lll_adv_iso_stream * stream)1149 static uint16_t adv_iso_stream_handle_get(struct lll_adv_iso_stream *stream)
1150 {
1151 	return mem_index_get(stream, stream_pool, sizeof(*stream));
1152 }
1153 
ptc_calc(const struct lll_adv_iso * lll,uint32_t event_spacing,uint32_t event_spacing_max)1154 static uint8_t ptc_calc(const struct lll_adv_iso *lll, uint32_t event_spacing,
1155 			uint32_t event_spacing_max)
1156 {
1157 	if (event_spacing < event_spacing_max) {
1158 		uint8_t ptc;
1159 
1160 		/* Possible maximum Pre-transmission Subevents per BIS */
1161 		ptc = ((event_spacing_max - event_spacing) /
1162 		       (lll->sub_interval * lll->bn * lll->num_bis)) *
1163 		      lll->bn;
1164 
1165 		/* FIXME: Here we restrict to a maximum of BN Pre-Transmission
1166 		 * subevents per BIS
1167 		 */
1168 		ptc = MIN(ptc, lll->bn);
1169 
1170 		return ptc;
1171 	}
1172 
1173 	return 0U;
1174 }
1175 
adv_iso_time_get(const struct ll_adv_iso_set * adv_iso,bool max)1176 static uint32_t adv_iso_time_get(const struct ll_adv_iso_set *adv_iso, bool max)
1177 {
1178 	const struct lll_adv_iso *lll_iso;
1179 	uint32_t ctrl_spacing;
1180 	uint32_t pdu_spacing;
1181 	uint32_t time_us;
1182 
1183 	lll_iso = &adv_iso->lll;
1184 
1185 	pdu_spacing = PDU_BIS_US(lll_iso->max_pdu, lll_iso->enc, lll_iso->phy,
1186 				 lll_iso->phy_flags) +
1187 		      EVENT_MSS_US;
1188 	ctrl_spacing = PDU_BIS_US(sizeof(struct pdu_big_ctrl), lll_iso->enc,
1189 				  lll_iso->phy, lll_iso->phy_flags);
1190 
1191 	/* 1. Maximum PDU transmission time in 1M/2M/S8 PHY is 17040 us, or
1192 	 * represented in 15-bits.
1193 	 * 2. NSE in the range 1 to 31 is represented in 5-bits
1194 	 * 3. num_bis in the range 1 to 31 is represented in 5-bits
1195 	 *
1196 	 * Hence, worst case event time can be represented in 25-bits plus
1197 	 * one each bit for added ctrl_spacing and radio event overheads. I.e.
1198 	 * 27-bits required and sufficiently covered by using 32-bit data type
1199 	 * for time_us.
1200 	 */
1201 
1202 	if (IS_ENABLED(CONFIG_BT_CTLR_ADV_ISO_RESERVE_MAX) || max) {
1203 		time_us = (pdu_spacing * lll_iso->nse * lll_iso->num_bis) +
1204 			  ctrl_spacing;
1205 	} else {
1206 		time_us = pdu_spacing * ((lll_iso->nse * lll_iso->num_bis) -
1207 					 lll_iso->ptc);
1208 	}
1209 
1210 	/* Add implementation defined radio event overheads */
1211 	time_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
1212 
1213 	return time_us;
1214 }
1215 
adv_iso_start(struct ll_adv_iso_set * adv_iso,uint32_t iso_interval_us)1216 static uint32_t adv_iso_start(struct ll_adv_iso_set *adv_iso,
1217 			      uint32_t iso_interval_us)
1218 {
1219 	uint32_t ticks_slot_overhead;
1220 	uint32_t ticks_slot_offset;
1221 	volatile uint32_t ret_cb;
1222 	uint32_t ticks_anchor;
1223 	uint32_t ticks_slot;
1224 	uint32_t slot_us;
1225 	uint32_t ret;
1226 	int err;
1227 
1228 	ull_hdr_init(&adv_iso->ull);
1229 
1230 	slot_us = adv_iso_time_get(adv_iso, false);
1231 
1232 	adv_iso->ull.ticks_active_to_start = 0U;
1233 	adv_iso->ull.ticks_prepare_to_start =
1234 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
1235 	adv_iso->ull.ticks_preempt_to_start =
1236 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
1237 	adv_iso->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
1238 
1239 	ticks_slot_offset = MAX(adv_iso->ull.ticks_active_to_start,
1240 				adv_iso->ull.ticks_prepare_to_start);
1241 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
1242 		ticks_slot_overhead = ticks_slot_offset;
1243 	} else {
1244 		ticks_slot_overhead = 0U;
1245 	}
1246 	ticks_slot = adv_iso->ull.ticks_slot + ticks_slot_overhead;
1247 
1248 	/* Find the slot after Periodic Advertisings events */
1249 	ticks_anchor = ticker_ticks_now_get() +
1250 		       HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
1251 	err = ull_sched_adv_aux_sync_free_anchor_get(ticks_slot, &ticks_anchor);
1252 	if (!err) {
1253 		ticks_anchor += HAL_TICKER_US_TO_TICKS(
1254 					MAX(EVENT_MAFS_US,
1255 					    EVENT_OVERHEAD_START_US) -
1256 					EVENT_OVERHEAD_START_US +
1257 					(EVENT_TICKER_RES_MARGIN_US << 1));
1258 	}
1259 
1260 	/* setup to use ISO create prepare function for first radio event */
1261 	mfy_lll_prepare.fp = lll_adv_iso_create_prepare;
1262 
1263 	ret_cb = TICKER_STATUS_BUSY;
1264 	ret = ticker_start(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
1265 			   (TICKER_ID_ADV_ISO_BASE + adv_iso->lll.handle),
1266 			   ticks_anchor, 0U,
1267 			   HAL_TICKER_US_TO_TICKS(iso_interval_us),
1268 			   HAL_TICKER_REMAINDER(iso_interval_us),
1269 			   TICKER_NULL_LAZY, ticks_slot, ticker_cb, adv_iso,
1270 			   ull_ticker_status_give, (void *)&ret_cb);
1271 	ret = ull_ticker_status_take(ret, &ret_cb);
1272 
1273 	return ret;
1274 }
1275 
adv_iso_chm_update(uint8_t big_handle)1276 static uint8_t adv_iso_chm_update(uint8_t big_handle)
1277 {
1278 	struct ll_adv_iso_set *adv_iso;
1279 	struct lll_adv_iso *lll_iso;
1280 
1281 	adv_iso = adv_iso_get(big_handle);
1282 	if (!adv_iso) {
1283 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
1284 	}
1285 
1286 	lll_iso = &adv_iso->lll;
1287 	if (lll_iso->term_req ||
1288 	    (lll_iso->chm_req != lll_iso->chm_ack)) {
1289 		return BT_HCI_ERR_CMD_DISALLOWED;
1290 	}
1291 
1292 	/* Request channel map update procedure */
1293 	lll_iso->chm_chan_count = ull_chan_map_get(lll_iso->chm_chan_map);
1294 	lll_iso->chm_req++;
1295 
1296 	return BT_HCI_ERR_SUCCESS;
1297 }
1298 
adv_iso_chm_complete_commit(struct lll_adv_iso * lll_iso)1299 static void adv_iso_chm_complete_commit(struct lll_adv_iso *lll_iso)
1300 {
1301 	struct pdu_adv *pdu_prev, *pdu;
1302 	struct lll_adv_sync *lll_sync;
1303 	struct pdu_big_info *bi;
1304 	struct ll_adv_set *adv;
1305 	uint8_t acad_len;
1306 	uint8_t ter_idx;
1307 	uint8_t ad_len;
1308 	uint8_t *acad;
1309 	uint8_t *ad;
1310 	uint8_t len;
1311 	uint8_t err;
1312 
1313 	/* Allocate next PDU */
1314 	adv = HDR_LLL2ULL(lll_iso->adv);
1315 	err = ull_adv_sync_pdu_alloc(adv, ULL_ADV_PDU_EXTRA_DATA_ALLOC_IF_EXIST,
1316 				     &pdu_prev, &pdu, NULL, NULL, &ter_idx);
1317 	LL_ASSERT(!err);
1318 
1319 	/* Copy content */
1320 	err = ull_adv_sync_duplicate(pdu_prev, pdu);
1321 	LL_ASSERT(!err);
1322 
1323 	/* Get the current ACAD */
1324 	acad = ull_adv_sync_get_acad(pdu, &acad_len);
1325 
1326 	lll_sync = adv->lll.sync;
1327 
1328 	/* Dev assert if ACAD empty */
1329 	LL_ASSERT(acad_len);
1330 
1331 	/* Find the BIGInfo */
1332 	len = acad_len;
1333 	ad = acad;
1334 	do {
1335 		ad_len = ad[PDU_ADV_DATA_HEADER_LEN_OFFSET];
1336 		if (ad_len &&
1337 		    (ad[PDU_ADV_DATA_HEADER_TYPE_OFFSET] == BT_DATA_BIG_INFO)) {
1338 			break;
1339 		}
1340 
1341 		ad_len += 1U;
1342 
1343 		LL_ASSERT(ad_len <= len);
1344 
1345 		ad += ad_len;
1346 		len -= ad_len;
1347 	} while (len);
1348 	LL_ASSERT(len);
1349 
1350 	/* Get reference to BIGInfo */
1351 	bi = (void *)&ad[PDU_ADV_DATA_HEADER_DATA_OFFSET];
1352 
1353 	/* Copy the new/current Channel Map */
1354 	pdu_big_info_chan_map_phy_set(bi->chm_phy, lll_iso->data_chan_map,
1355 				      lll_iso->phy);
1356 
1357 	/* Commit the new PDU Buffer */
1358 	lll_adv_sync_data_enqueue(lll_sync, ter_idx);
1359 }
1360 
mfy_iso_offset_get(void * param)1361 static void mfy_iso_offset_get(void *param)
1362 {
1363 	struct lll_adv_sync *lll_sync;
1364 	struct ll_adv_sync_set *sync;
1365 	struct lll_adv_iso *lll_iso;
1366 	uint32_t ticks_to_expire;
1367 	struct pdu_big_info *bi;
1368 	uint32_t ticks_current;
1369 	uint64_t payload_count;
1370 	struct pdu_adv *pdu;
1371 	uint8_t ticker_id;
1372 	uint16_t lazy;
1373 	uint8_t retry;
1374 	uint8_t id;
1375 
1376 	sync = param;
1377 	lll_sync = &sync->lll;
1378 	lll_iso = lll_sync->iso;
1379 	ticker_id = TICKER_ID_ADV_ISO_BASE + lll_iso->handle;
1380 
1381 	id = TICKER_NULL;
1382 	ticks_to_expire = 0U;
1383 	ticks_current = 0U;
1384 	retry = 4U;
1385 	do {
1386 		uint32_t volatile ret_cb;
1387 		uint32_t ticks_previous;
1388 		uint32_t ret;
1389 		bool success;
1390 
1391 		ticks_previous = ticks_current;
1392 
1393 		ret_cb = TICKER_STATUS_BUSY;
1394 		ret = ticker_next_slot_get_ext(TICKER_INSTANCE_ID_CTLR,
1395 					       TICKER_USER_ID_ULL_LOW,
1396 					       &id, &ticks_current,
1397 					       &ticks_to_expire, NULL, &lazy,
1398 					       NULL, NULL,
1399 					       ticker_op_cb, (void *)&ret_cb);
1400 		if (ret == TICKER_STATUS_BUSY) {
1401 			/* Busy wait until Ticker Job is enabled after any Radio
1402 			 * event is done using the Radio hardware. Ticker Job
1403 			 * ISR is disabled during Radio events in LOW_LAT
1404 			 * feature to avoid Radio ISR latencies.
1405 			 */
1406 			while (ret_cb == TICKER_STATUS_BUSY) {
1407 				ticker_job_sched(TICKER_INSTANCE_ID_CTLR,
1408 						 TICKER_USER_ID_ULL_LOW);
1409 			}
1410 		}
1411 
1412 		success = (ret_cb == TICKER_STATUS_SUCCESS);
1413 		LL_ASSERT(success);
1414 
1415 		LL_ASSERT((ticks_current == ticks_previous) || retry--);
1416 
1417 		LL_ASSERT(id != TICKER_NULL);
1418 	} while (id != ticker_id);
1419 
1420 	payload_count = lll_iso->payload_count +
1421 			(((uint64_t)lll_iso->latency_prepare + lazy) * lll_iso->bn);
1422 
1423 	pdu = lll_adv_sync_data_latest_peek(lll_sync);
1424 	bi = big_info_get(pdu);
1425 	big_info_offset_fill(bi, ticks_to_expire, 0U);
1426 	/* Assign the 39-bit payload count, retaining the 1 MS bit framing value */
1427 	bi->payload_count_framing[0] = payload_count;
1428 	bi->payload_count_framing[1] = payload_count >> 8;
1429 	bi->payload_count_framing[2] = payload_count >> 16;
1430 	bi->payload_count_framing[3] = payload_count >> 24;
1431 	bi->payload_count_framing[4] &= ~0x7F;
1432 	bi->payload_count_framing[4] |= (payload_count >> 32) & 0x7F;
1433 
1434 	/* Update Channel Map in the BIGInfo until Thread context gets a
1435 	 * chance to update the PDU with new Channel Map.
1436 	 */
1437 	if (lll_sync->iso_chm_done_req != lll_sync->iso_chm_done_ack) {
1438 		pdu_big_info_chan_map_phy_set(bi->chm_phy,
1439 					      lll_iso->data_chan_map,
1440 					      lll_iso->phy);
1441 	}
1442 }
1443 
pdu_big_info_chan_map_phy_set(uint8_t * chm_phy,uint8_t * chan_map,uint8_t phy)1444 static void pdu_big_info_chan_map_phy_set(uint8_t *chm_phy, uint8_t *chan_map,
1445 					  uint8_t phy)
1446 {
1447 	(void)memcpy(chm_phy, chan_map, PDU_CHANNEL_MAP_SIZE);
1448 	chm_phy[4] &= 0x1F;
1449 	chm_phy[4] |= ((find_lsb_set(phy) - 1U) << 5);
1450 }
1451 
big_info_get(struct pdu_adv * pdu)1452 static inline struct pdu_big_info *big_info_get(struct pdu_adv *pdu)
1453 {
1454 	struct pdu_adv_com_ext_adv *p;
1455 	struct pdu_adv_ext_hdr *h;
1456 	uint8_t *ptr;
1457 
1458 	p = (void *)&pdu->adv_ext_ind;
1459 	h = (void *)p->ext_hdr_adv_data;
1460 	ptr = h->data;
1461 
1462 	/* No AdvA and TargetA */
1463 
1464 	/* traverse through CTE Info, if present */
1465 	if (h->cte_info) {
1466 		ptr += sizeof(struct pdu_cte_info);
1467 	}
1468 
1469 	/* traverse through ADI, if present */
1470 	if (h->adi) {
1471 		ptr += sizeof(struct pdu_adv_adi);
1472 	}
1473 
1474 	/* traverse through aux ptr, if present */
1475 	if (h->aux_ptr) {
1476 		ptr += sizeof(struct pdu_adv_aux_ptr);
1477 	}
1478 
1479 	/* No SyncInfo */
1480 
1481 	/* traverse through Tx Power, if present */
1482 	if (h->tx_pwr) {
1483 		ptr++;
1484 	}
1485 
1486 	/* FIXME: Parse and find the Length encoded AD Format */
1487 	ptr += 2;
1488 
1489 	return (void *)ptr;
1490 }
1491 
big_info_offset_fill(struct pdu_big_info * bi,uint32_t ticks_offset,uint32_t start_us)1492 static inline void big_info_offset_fill(struct pdu_big_info *bi,
1493 					uint32_t ticks_offset,
1494 					uint32_t start_us)
1495 {
1496 	uint32_t offs;
1497 
1498 	offs = HAL_TICKER_TICKS_TO_US(ticks_offset) - start_us;
1499 	offs = offs / OFFS_UNIT_30_US;
1500 	if (!!(offs >> OFFS_UNIT_BITS)) {
1501 		PDU_BIG_INFO_OFFS_SET(bi, offs / (OFFS_UNIT_300_US /
1502 						  OFFS_UNIT_30_US));
1503 		PDU_BIG_INFO_OFFS_UNITS_SET(bi, 1U);
1504 	} else {
1505 		PDU_BIG_INFO_OFFS_SET(bi, offs);
1506 		PDU_BIG_INFO_OFFS_UNITS_SET(bi, 0U);
1507 	}
1508 }
1509 
ticker_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)1510 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
1511 		      uint32_t remainder, uint16_t lazy, uint8_t force,
1512 		      void *param)
1513 {
1514 	static struct lll_prepare_param p;
1515 	struct ll_adv_iso_set *adv_iso = param;
1516 	uint32_t remainder_us;
1517 	uint64_t event_count;
1518 	uint32_t ret;
1519 	uint8_t ref;
1520 
1521 	DEBUG_RADIO_PREPARE_A(1);
1522 
1523 	event_count = adv_iso->lll.payload_count / adv_iso->lll.bn;
1524 	for (int i = 0; i < adv_iso->lll.num_bis; i++)  {
1525 		uint16_t stream_handle = adv_iso->lll.stream_handle[i];
1526 
1527 		ull_iso_lll_event_prepare(LL_BIS_ADV_HANDLE_FROM_IDX(stream_handle), event_count);
1528 	}
1529 
1530 	/* Increment prepare reference count */
1531 	ref = ull_ref_inc(&adv_iso->ull);
1532 	LL_ASSERT(ref);
1533 
1534 	/* Append timing parameters */
1535 	p.ticks_at_expire = ticks_at_expire;
1536 	p.remainder = remainder;
1537 	p.lazy = lazy;
1538 	p.force = force;
1539 	p.param = &adv_iso->lll;
1540 	mfy_lll_prepare.param = &p;
1541 
1542 	/* Kick LLL prepare */
1543 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0,
1544 			     &mfy_lll_prepare);
1545 	LL_ASSERT(!ret);
1546 
1547 	/* Calculate the BIG reference point of current BIG event */
1548 	remainder_us = remainder;
1549 	hal_ticker_remove_jitter(&ticks_at_expire, &remainder_us);
1550 	ticks_at_expire &= HAL_TICKER_CNTR_MASK;
1551 	adv_iso->big_ref_point = isoal_get_wrapped_time_us(HAL_TICKER_TICKS_TO_US(ticks_at_expire),
1552 							   (remainder_us +
1553 							    EVENT_OVERHEAD_START_US));
1554 
1555 	DEBUG_RADIO_PREPARE_A(1);
1556 }
1557 
ticker_op_cb(uint32_t status,void * param)1558 static void ticker_op_cb(uint32_t status, void *param)
1559 {
1560 	*((uint32_t volatile *)param) = status;
1561 }
1562 
ticker_stop_op_cb(uint32_t status,void * param)1563 static void ticker_stop_op_cb(uint32_t status, void *param)
1564 {
1565 	static memq_link_t link;
1566 	static struct mayfly mfy = {0U, 0U, &link, NULL, adv_iso_disable};
1567 	uint32_t ret;
1568 
1569 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1570 
1571 	/* Check if any pending LLL events that need to be aborted */
1572 	mfy.param = param;
1573 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
1574 			     TICKER_USER_ID_ULL_HIGH, 0U, &mfy);
1575 	LL_ASSERT(!ret);
1576 }
1577 
adv_iso_disable(void * param)1578 static void adv_iso_disable(void *param)
1579 {
1580 	struct ll_adv_iso_set *adv_iso;
1581 	struct ull_hdr *hdr;
1582 
1583 	/* Check ref count to determine if any pending LLL events in pipeline */
1584 	adv_iso = param;
1585 	hdr = &adv_iso->ull;
1586 	if (ull_ref_get(hdr)) {
1587 		static memq_link_t link;
1588 		static struct mayfly mfy = {0U, 0U, &link, NULL, lll_disable};
1589 		uint32_t ret;
1590 
1591 		mfy.param = &adv_iso->lll;
1592 
1593 		/* Setup disabled callback to be called when ref count
1594 		 * returns to zero.
1595 		 */
1596 		LL_ASSERT(!hdr->disabled_cb);
1597 		hdr->disabled_param = mfy.param;
1598 		hdr->disabled_cb = disabled_cb;
1599 
1600 		/* Trigger LLL disable */
1601 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1602 				     TICKER_USER_ID_LLL, 0U, &mfy);
1603 		LL_ASSERT(!ret);
1604 	} else {
1605 		/* No pending LLL events */
1606 		disabled_cb(&adv_iso->lll);
1607 	}
1608 }
1609 
disabled_cb(void * param)1610 static void disabled_cb(void *param)
1611 {
1612 	static memq_link_t link;
1613 	static struct mayfly mfy = {0U, 0U, &link, NULL, tx_lll_flush};
1614 	uint32_t ret;
1615 
1616 	mfy.param = param;
1617 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1618 			     TICKER_USER_ID_LLL, 0U, &mfy);
1619 	LL_ASSERT(!ret);
1620 }
1621 
tx_lll_flush(void * param)1622 static void tx_lll_flush(void *param)
1623 {
1624 	struct ll_adv_iso_set *adv_iso;
1625 	struct lll_adv_iso *lll;
1626 	struct node_rx_pdu *rx;
1627 	memq_link_t *link;
1628 	uint8_t num_bis;
1629 
1630 	/* Get reference to ULL context */
1631 	lll = param;
1632 
1633 	/* Flush TX */
1634 	num_bis = lll->num_bis;
1635 	while (num_bis--) {
1636 		struct lll_adv_iso_stream *stream;
1637 		struct node_tx_iso *tx;
1638 		uint16_t stream_handle;
1639 		memq_link_t *link2;
1640 		uint16_t handle;
1641 
1642 		stream_handle = lll->stream_handle[num_bis];
1643 		handle = LL_BIS_ADV_HANDLE_FROM_IDX(stream_handle);
1644 		stream = ull_adv_iso_stream_get(stream_handle);
1645 
1646 		link2 = memq_dequeue(stream->memq_tx.tail, &stream->memq_tx.head,
1647 				     (void **)&tx);
1648 		while (link2) {
1649 			tx->next = link2;
1650 			ull_iso_lll_ack_enqueue(handle, tx);
1651 
1652 			link2 = memq_dequeue(stream->memq_tx.tail,
1653 					    &stream->memq_tx.head,
1654 					    (void **)&tx);
1655 		}
1656 	}
1657 
1658 	/* Get the terminate structure reserved in the ISO context.
1659 	 * The terminate reason and connection handle should already be
1660 	 * populated before this mayfly function was scheduled.
1661 	 */
1662 	adv_iso = HDR_LLL2ULL(lll);
1663 	rx = (void *)&adv_iso->node_rx_terminate;
1664 	link = rx->hdr.link;
1665 	LL_ASSERT(link);
1666 	rx->hdr.link = NULL;
1667 
1668 	/* Enqueue the terminate towards ULL context */
1669 	ull_rx_put_sched(link, rx);
1670 }
1671