1 /*
2  * Copyright (c) 2021 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <soc.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/sys/byteorder.h>
10 #include <zephyr/bluetooth/hci_types.h>
11 
12 #include "hal/cpu.h"
13 #include "hal/ccm.h"
14 #include "hal/ticker.h"
15 
16 #include "util/util.h"
17 #include "util/mem.h"
18 #include "util/memq.h"
19 #include "util/mfifo.h"
20 #include "util/mayfly.h"
21 
22 #include "ticker/ticker.h"
23 
24 #include "pdu_df.h"
25 #include "lll/pdu_vendor.h"
26 #include "pdu.h"
27 
28 #include "lll.h"
29 #include "lll/lll_vendor.h"
30 #include "lll/lll_adv_types.h"
31 #include "lll_adv.h"
32 #include "lll/lll_adv_pdu.h"
33 #include "lll_adv_iso.h"
34 #include "lll_iso_tx.h"
35 
36 #include "isoal.h"
37 
38 #include "ull_adv_types.h"
39 #include "ull_iso_types.h"
40 
41 #include "ull_internal.h"
42 #include "ull_adv_internal.h"
43 #include "ull_chan_internal.h"
44 #include "ull_sched_internal.h"
45 #include "ull_iso_internal.h"
46 
47 #include "ll.h"
48 #include "ll_feat.h"
49 
50 #include "bt_crypto.h"
51 
52 #include "hal/debug.h"
53 
54 /* Controller implementation dependent minimum Pre-Transmission Offset and
55  * Pre-Transmission Group Count to use when there is available time space in the
56  * BIG events.
57  * The number of Pre-Transmission Group Count configure how many future ISO SDUs
58  * from the Offset will be Pre-Transmitted in advance in the current BIG event.
59  *
60  * TODO: These could be a Kconfig option.
61  */
62 #define BT_CTLR_ADV_ISO_PTO_MIN         1U
63 #define BT_CTLR_ADV_ISO_PTO_GROUP_COUNT 1U
64 
65 static int init_reset(void);
66 static struct ll_adv_iso_set *adv_iso_get(uint8_t handle);
67 static struct stream *adv_iso_stream_acquire(void);
68 static uint16_t adv_iso_stream_handle_get(struct lll_adv_iso_stream *stream);
69 static uint8_t ptc_calc(const struct lll_adv_iso *lll, uint32_t event_spacing,
70 			uint32_t event_spacing_max);
71 static uint32_t adv_iso_time_get(const struct ll_adv_iso_set *adv_iso, bool max);
72 static uint32_t adv_iso_start(struct ll_adv_iso_set *adv_iso,
73 			      uint32_t iso_interval_us);
74 static uint8_t adv_iso_chm_update(uint8_t big_handle);
75 static void adv_iso_chm_complete_commit(struct lll_adv_iso *lll_iso);
76 static void mfy_iso_offset_get(void *param);
77 static void pdu_big_info_chan_map_phy_set(uint8_t *chm_phy, uint8_t *chan_map,
78 					  uint8_t phy);
79 static inline struct pdu_big_info *big_info_get(struct pdu_adv *pdu);
80 static inline void big_info_offset_fill(struct pdu_big_info *bi,
81 					uint32_t ticks_offset,
82 					uint32_t start_us);
83 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
84 		      uint32_t remainder, uint16_t lazy, uint8_t force,
85 		      void *param);
86 static void ticker_op_cb(uint32_t status, void *param);
87 static void ticker_stop_op_cb(uint32_t status, void *param);
88 static void adv_iso_disable(void *param);
89 static void disabled_cb(void *param);
90 static void tx_lll_flush(void *param);
91 
92 static memq_link_t link_lll_prepare;
93 static struct mayfly mfy_lll_prepare = {0U, 0U, &link_lll_prepare, NULL, NULL};
94 
95 static struct ll_adv_iso_set ll_adv_iso[CONFIG_BT_CTLR_ADV_ISO_SET];
96 static struct lll_adv_iso_stream
97 			stream_pool[CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT];
98 static void *stream_free;
99 
big_create(uint8_t big_handle,uint8_t adv_handle,uint8_t num_bis,uint32_t sdu_interval,uint16_t max_sdu,uint16_t max_latency,uint8_t rtn,uint8_t phy,uint8_t packing,uint8_t framing,uint8_t encryption,uint8_t * bcode,uint16_t iso_interval,uint8_t nse,uint16_t max_pdu,uint8_t bn,uint8_t irc,uint8_t pto,bool test_config)100 static uint8_t big_create(uint8_t big_handle, uint8_t adv_handle, uint8_t num_bis,
101 			  uint32_t sdu_interval, uint16_t max_sdu,
102 			  uint16_t max_latency, uint8_t rtn, uint8_t phy,
103 			  uint8_t packing, uint8_t framing, uint8_t encryption,
104 			  uint8_t *bcode,
105 			  uint16_t iso_interval, uint8_t nse, uint16_t max_pdu,
106 			  uint8_t bn, uint8_t irc, uint8_t pto, bool test_config)
107 {
108 	uint8_t bi_ad[PDU_BIG_INFO_ENCRYPTED_SIZE + 2U];
109 	struct lll_adv_sync *lll_adv_sync;
110 	struct lll_adv_iso *lll_adv_iso;
111 	struct ll_adv_iso_set *adv_iso;
112 	struct pdu_adv *pdu_prev, *pdu;
113 	struct pdu_big_info *big_info;
114 	uint32_t ticks_slot_overhead;
115 	struct ll_adv_sync_set *sync;
116 	struct ll_adv_aux_set *aux;
117 	uint32_t event_spacing_max;
118 	uint8_t pdu_big_info_size;
119 	uint32_t iso_interval_us;
120 	uint32_t latency_packing;
121 	uint32_t ticks_slot_sync;
122 	uint32_t ticks_slot_aux;
123 	memq_link_t *link_cmplt;
124 	memq_link_t *link_term;
125 	struct ll_adv_set *adv;
126 	uint32_t slot_overhead;
127 	uint32_t event_spacing;
128 	uint16_t ctrl_spacing;
129 	uint8_t sdu_per_event;
130 	uint8_t ter_idx;
131 	uint32_t ret;
132 	uint8_t err;
133 	int res;
134 
135 	adv_iso = adv_iso_get(big_handle);
136 
137 	/* Already created */
138 	if (!adv_iso || adv_iso->lll.adv) {
139 		return BT_HCI_ERR_CMD_DISALLOWED;
140 	}
141 
142 	/* No advertising set created */
143 	adv = ull_adv_is_created_get(adv_handle);
144 	if (!adv) {
145 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
146 	}
147 
148 	/* Does not identify a periodic advertising train or
149 	 * the periodic advertising trains is already associated
150 	 * with another BIG.
151 	 */
152 	lll_adv_sync = adv->lll.sync;
153 	if (!lll_adv_sync || lll_adv_sync->iso) {
154 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
155 	}
156 
157 	/* Check if encryption supported */
158 	if (!IS_ENABLED(CONFIG_BT_CTLR_BROADCAST_ISO_ENC) &&
159 	    encryption) {
160 		return BT_HCI_ERR_CMD_DISALLOWED;
161 	};
162 
163 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
164 		if (num_bis == 0U || num_bis > 0x1F) {
165 			return BT_HCI_ERR_INVALID_PARAM;
166 		}
167 
168 		if (sdu_interval < 0x000100 || sdu_interval > 0x0FFFFF) {
169 			return BT_HCI_ERR_INVALID_PARAM;
170 		}
171 
172 		if (max_sdu < 0x0001 || max_sdu > 0x0FFF) {
173 			return BT_HCI_ERR_INVALID_PARAM;
174 		}
175 
176 		if (phy > (BT_HCI_LE_EXT_SCAN_PHY_1M |
177 			   BT_HCI_LE_EXT_SCAN_PHY_2M |
178 			   BT_HCI_LE_EXT_SCAN_PHY_CODED)) {
179 			return BT_HCI_ERR_INVALID_PARAM;
180 		}
181 
182 		if (packing > 1U) {
183 			return BT_HCI_ERR_INVALID_PARAM;
184 		}
185 
186 		if (framing > 1U) {
187 			return BT_HCI_ERR_INVALID_PARAM;
188 		}
189 
190 		if (encryption > 1U) {
191 			return BT_HCI_ERR_INVALID_PARAM;
192 		}
193 
194 		if (test_config) {
195 			if (!IN_RANGE(iso_interval, 0x0004, 0x0C80)) {
196 				return BT_HCI_ERR_INVALID_PARAM;
197 			}
198 
199 			if (!IN_RANGE(nse, 0x01, 0x1F)) {
200 				return BT_HCI_ERR_INVALID_PARAM;
201 			}
202 
203 			if (!IN_RANGE(max_pdu, 0x01, MIN(0xFB, LL_BIS_OCTETS_TX_MAX))) {
204 				return BT_HCI_ERR_INVALID_PARAM;
205 			}
206 
207 			if (!IN_RANGE(bn, 0x01, 0x07)) {
208 				return BT_HCI_ERR_INVALID_PARAM;
209 			}
210 
211 			if (!IN_RANGE(irc, 0x01, 0x0F)) {
212 				return BT_HCI_ERR_INVALID_PARAM;
213 			}
214 
215 			if (pto > 0x0F) {
216 				return BT_HCI_ERR_INVALID_PARAM;
217 			}
218 
219 			if (pto && !(bn * irc < nse)) {
220 				return BT_HCI_ERR_INVALID_PARAM;
221 			}
222 		} else {
223 			if (max_latency > 0x0FA0) {
224 				return BT_HCI_ERR_INVALID_PARAM;
225 			}
226 
227 			if (rtn > 0x0F) {
228 				return BT_HCI_ERR_INVALID_PARAM;
229 			}
230 		}
231 	}
232 
233 	/* Check if free BISes available */
234 	if (mem_free_count_get(stream_free) < num_bis) {
235 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
236 	}
237 
238 	/* Allocate link buffer for created event */
239 	link_cmplt = ll_rx_link_alloc();
240 	if (!link_cmplt) {
241 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
242 	}
243 
244 	/* Allocate link buffer for sync lost event */
245 	link_term = ll_rx_link_alloc();
246 	if (!link_term) {
247 		ll_rx_link_release(link_cmplt);
248 
249 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
250 	}
251 
252 	/* Check if aux context allocated before we are creating ISO */
253 	if (adv->lll.aux) {
254 		aux = HDR_LLL2ULL(adv->lll.aux);
255 	} else {
256 		aux = NULL;
257 	}
258 
259 	/* Calculate overheads due to extended advertising. */
260 	if (aux && aux->is_started) {
261 		ticks_slot_aux = aux->ull.ticks_slot;
262 		if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
263 			ticks_slot_overhead =
264 				MAX(aux->ull.ticks_active_to_start,
265 				    aux->ull.ticks_prepare_to_start);
266 		} else {
267 			ticks_slot_overhead = 0U;
268 		}
269 	} else {
270 		uint32_t time_us;
271 
272 		time_us = PDU_AC_US(PDU_AC_PAYLOAD_SIZE_MAX, adv->lll.phy_s,
273 				    adv->lll.phy_flags) +
274 			  EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
275 		ticks_slot_aux = HAL_TICKER_US_TO_TICKS_CEIL(time_us);
276 		if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
277 			/* Assume primary overheads may be inherited by aux */
278 			ticks_slot_overhead =
279 				MAX(adv->ull.ticks_active_to_start,
280 				    adv->ull.ticks_prepare_to_start);
281 		} else {
282 			ticks_slot_overhead = 0U;
283 		}
284 	}
285 	ticks_slot_aux += ticks_slot_overhead;
286 
287 	/* Calculate overheads due to periodic advertising. */
288 	sync = HDR_LLL2ULL(lll_adv_sync);
289 	if (sync->is_started) {
290 		ticks_slot_sync = sync->ull.ticks_slot;
291 	} else {
292 		uint32_t time_us;
293 
294 		time_us = PDU_AC_US(PDU_AC_PAYLOAD_SIZE_MAX,
295 				    sync->lll.adv->phy_s,
296 				    sync->lll.adv->phy_flags) +
297 			  EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
298 		ticks_slot_sync = HAL_TICKER_US_TO_TICKS_CEIL(time_us);
299 	}
300 
301 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
302 		ticks_slot_overhead = MAX(sync->ull.ticks_active_to_start,
303 					  sync->ull.ticks_prepare_to_start);
304 	} else {
305 		ticks_slot_overhead = 0U;
306 	}
307 
308 	ticks_slot_sync += ticks_slot_overhead;
309 
310 	/* Calculate total overheads due to extended and periodic advertising */
311 	if (false) {
312 
313 #if defined(CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET)
314 	} else if (CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET > 0U) {
315 		ticks_slot_overhead = MAX(ticks_slot_aux, ticks_slot_sync);
316 #endif /* CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET */
317 
318 	} else {
319 		ticks_slot_overhead = ticks_slot_aux + ticks_slot_sync;
320 	}
321 
322 	/* Store parameters in LLL context */
323 	/* TODO: Move parameters to ULL if only accessed by ULL */
324 	lll_adv_iso = &adv_iso->lll;
325 	lll_adv_iso->handle = big_handle;
326 	lll_adv_iso->phy = phy;
327 	lll_adv_iso->phy_flags = PHY_FLAGS_S8;
328 
329 	/* Mandatory Num_BIS = 1 */
330 	lll_adv_iso->num_bis = num_bis;
331 
332 	/* Allocate streams */
333 	for (uint8_t i = 0U; i < num_bis; i++) {
334 		struct lll_adv_iso_stream *stream;
335 
336 		stream = (void *)adv_iso_stream_acquire();
337 		stream->big_handle = big_handle;
338 		stream->dp = NULL;
339 
340 		if (!stream->link_tx_free) {
341 			stream->link_tx_free = &stream->link_tx;
342 		}
343 		memq_init(stream->link_tx_free, &stream->memq_tx.head,
344 			  &stream->memq_tx.tail);
345 		stream->link_tx_free = NULL;
346 
347 		stream->pkt_seq_num = 0U;
348 
349 		lll_adv_iso->stream_handle[i] =
350 			adv_iso_stream_handle_get(stream);
351 	}
352 
353 	if (test_config) {
354 		lll_adv_iso->bn = bn;
355 		lll_adv_iso->iso_interval = iso_interval;
356 		lll_adv_iso->irc = irc;
357 		lll_adv_iso->nse = nse;
358 		lll_adv_iso->max_pdu = max_pdu;
359 		iso_interval_us = iso_interval * PERIODIC_INT_UNIT_US;
360 
361 	} else {
362 		if (framing) {
363 			/* Try to allocate room for one SDU + header */
364 			lll_adv_iso->max_pdu = MIN(LL_BIS_OCTETS_TX_MAX,
365 						   max_sdu + PDU_ISO_SEG_HDR_SIZE +
366 						    PDU_ISO_SEG_TIMEOFFSET_SIZE);
367 		} else {
368 			lll_adv_iso->max_pdu = MIN(LL_BIS_OCTETS_TX_MAX, max_sdu);
369 		}
370 
371 		/* FIXME: SDU per max latency, consider how to use Pre-transmission in the
372 		 *        calculations.
373 		 *        Take decision based on how ptc_calc function forces the use of
374 		 *        Pre-Transmission when not using test command. Refer to comments in
375 		 *        ptc_calc function.
376 		 */
377 		sdu_per_event = MAX((max_latency * USEC_PER_MSEC / sdu_interval), 2U) -
378 				1U;
379 
380 		/* BN (Burst Count), Mandatory BN = 1 */
381 		bn = DIV_ROUND_UP(max_sdu, lll_adv_iso->max_pdu) * sdu_per_event;
382 		if (bn > PDU_BIG_BN_MAX) {
383 			/* Restrict each BIG event to maximum burst per BIG event */
384 			lll_adv_iso->bn = PDU_BIG_BN_MAX;
385 
386 			/* Ceil the required burst count per SDU to next maximum burst
387 			 * per BIG event.
388 			 */
389 			bn = DIV_ROUND_UP(bn, PDU_BIG_BN_MAX) * PDU_BIG_BN_MAX;
390 		} else {
391 			lll_adv_iso->bn = bn;
392 		}
393 
394 		/* Calculate ISO interval */
395 		/* iso_interval shall be at least SDU interval,
396 		 * or integer multiple of SDU interval for unframed PDUs
397 		 */
398 		iso_interval_us = ((sdu_interval * lll_adv_iso->bn * sdu_per_event) /
399 				(bn * PERIODIC_INT_UNIT_US)) * PERIODIC_INT_UNIT_US;
400 		lll_adv_iso->iso_interval = iso_interval_us / PERIODIC_INT_UNIT_US;
401 	}
402 
403 	/* Calculate max available ISO event spacing */
404 	slot_overhead = HAL_TICKER_TICKS_TO_US(ticks_slot_overhead);
405 	if (slot_overhead < iso_interval_us) {
406 		event_spacing_max = iso_interval_us - slot_overhead;
407 	} else {
408 		event_spacing_max = 0U;
409 	}
410 
411 	/* Negotiate event spacing */
412 	do {
413 		if (!test_config) {
414 			/* Immediate Repetition Count (IRC), Mandatory IRC = 1 */
415 			lll_adv_iso->irc = rtn + 1U;
416 
417 			/* Calculate NSE (No. of Sub Events), Mandatory NSE = 1,
418 			 * without PTO added.
419 			 */
420 			lll_adv_iso->nse = lll_adv_iso->bn * lll_adv_iso->irc;
421 		}
422 
423 		/* NOTE: Calculate sub_interval, if interleaved then it is Num_BIS x
424 		 *       BIS_Spacing (by BT Spec.)
425 		 *       else if sequential, then by our implementation, lets keep it
426 		 *       max_tx_time for Max_PDU + tMSS.
427 		 */
428 		lll_adv_iso->sub_interval = PDU_BIS_US(lll_adv_iso->max_pdu, encryption,
429 						phy, lll_adv_iso->phy_flags) +
430 						EVENT_MSS_US;
431 		ctrl_spacing = PDU_BIS_US(sizeof(struct pdu_big_ctrl), encryption, phy,
432 					lll_adv_iso->phy_flags);
433 		latency_packing = lll_adv_iso->sub_interval * lll_adv_iso->nse *
434 					lll_adv_iso->num_bis;
435 		event_spacing = latency_packing + ctrl_spacing +
436 				EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
437 
438 		/* Check if ISO interval too small to fit the calculated BIG event
439 		 * timing required for the supplied BIG create parameters.
440 		 */
441 		if (event_spacing > event_spacing_max) {
442 			/* Check if we can reduce RTN to meet eventing spacing */
443 			if (!test_config && rtn) {
444 				rtn--;
445 			} else {
446 				break;
447 			}
448 		}
449 	} while (event_spacing > event_spacing_max);
450 
451 	/* Check if ISO interval too small to fit the calculated BIG event
452 	 * timing required for the supplied BIG create parameters.
453 	 */
454 	if (event_spacing > event_spacing_max) {
455 		/* Release allocated link buffers */
456 		ll_rx_link_release(link_cmplt);
457 		ll_rx_link_release(link_term);
458 
459 		return BT_HCI_ERR_INVALID_PARAM;
460 	}
461 
462 	/* Decision to use requested Pre-Transmission Offset or force Pre-Transmission when
463 	 * possible (Zephyr Controller decision).
464 	 */
465 	lll_adv_iso->ptc = ptc_calc(lll_adv_iso, event_spacing, event_spacing_max);
466 
467 	if (test_config) {
468 		lll_adv_iso->pto = pto;
469 
470 		if (pto && !lll_adv_iso->ptc) {
471 			return BT_HCI_ERR_INVALID_PARAM;
472 		}
473 	} else {
474 		/* Pre-Transmission Offset (PTO) */
475 		if (lll_adv_iso->ptc) {
476 			lll_adv_iso->pto = MAX((bn / lll_adv_iso->bn), BT_CTLR_ADV_ISO_PTO_MIN);
477 		} else {
478 			lll_adv_iso->pto = 0U;
479 		}
480 
481 		/* Make room for pre-transmissions */
482 		lll_adv_iso->nse += lll_adv_iso->ptc;
483 	}
484 
485 	/* Based on packing requested, sequential or interleaved */
486 	if (false) {
487 
488 #if defined(CONFIG_BT_CTLR_ADV_ISO_INTERLEAVED)
489 	} else if (packing) {
490 		/* Interleaved Packing */
491 		lll_adv_iso->bis_spacing = lll_adv_iso->sub_interval;
492 		lll_adv_iso->sub_interval = lll_adv_iso->bis_spacing *
493 					    lll_adv_iso->num_bis;
494 #endif /* CONFIG_BT_CTLR_ADV_ISO_INTERLEAVED */
495 
496 #if defined(CONFIG_BT_CTLR_ADV_ISO_SEQUENTIAL)
497 	} else if (true) {
498 		/* Sequential Packing */
499 		lll_adv_iso->bis_spacing = lll_adv_iso->sub_interval *
500 					   lll_adv_iso->nse;
501 #endif /* CONFIG_BT_CTLR_ADV_ISO_SEQUENTIAL */
502 
503 	} else {
504 		return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
505 	}
506 
507 	/* TODO: Group count, GC = NSE / BN; PTO = GC - IRC;
508 	 *       Is this required?
509 	 */
510 
511 	lll_adv_iso->sdu_interval = sdu_interval;
512 	lll_adv_iso->max_sdu = max_sdu;
513 
514 	res = util_saa_le32(lll_adv_iso->seed_access_addr, big_handle);
515 	LL_ASSERT(!res);
516 
517 	(void)lll_csrand_get(lll_adv_iso->base_crc_init,
518 			     sizeof(lll_adv_iso->base_crc_init));
519 	lll_adv_iso->data_chan_count =
520 		ull_chan_map_get(lll_adv_iso->data_chan_map);
521 	lll_adv_iso->payload_count = 0U;
522 	lll_adv_iso->latency_prepare = 0U;
523 	lll_adv_iso->latency_event = 0U;
524 	lll_adv_iso->term_req = 0U;
525 	lll_adv_iso->term_ack = 0U;
526 	lll_adv_iso->chm_req = 0U;
527 	lll_adv_iso->chm_ack = 0U;
528 	lll_adv_iso->ctrl_expire = 0U;
529 
530 	/* TODO: framing support */
531 	lll_adv_iso->framing = framing;
532 
533 	/* Allocate next PDU */
534 	err = ull_adv_sync_pdu_alloc(adv, ULL_ADV_PDU_EXTRA_DATA_ALLOC_IF_EXIST,
535 				     &pdu_prev, &pdu, NULL, NULL, &ter_idx);
536 	if (err) {
537 		/* Insufficient Advertising PDU buffers to allocate new PDU
538 		 * to add BIGInfo into the ACAD of the Periodic Advertising.
539 		 */
540 
541 		/* Release allocated link buffers */
542 		ll_rx_link_release(link_cmplt);
543 		ll_rx_link_release(link_term);
544 
545 		return err;
546 	}
547 
548 	/* Add ACAD to AUX_SYNC_IND */
549 	if (encryption) {
550 		pdu_big_info_size = PDU_BIG_INFO_ENCRYPTED_SIZE;
551 	} else {
552 		pdu_big_info_size = PDU_BIG_INFO_CLEARTEXT_SIZE;
553 	}
554 	bi_ad[PDU_ADV_DATA_HEADER_LEN_OFFSET] = pdu_big_info_size + (PDU_ADV_DATA_HEADER_SIZE -
555 						PDU_ADV_DATA_HEADER_LEN_SIZE);
556 	bi_ad[PDU_ADV_DATA_HEADER_TYPE_OFFSET] = BT_DATA_BIG_INFO;
557 	big_info = (void *)&bi_ad[PDU_ADV_DATA_HEADER_DATA_OFFSET];
558 
559 	/* big_info->offset, big_info->offset_units and
560 	 * big_info->payload_count_framing[] will be filled by periodic
561 	 * advertising event.
562 	 */
563 
564 	PDU_BIG_INFO_ISO_INTERVAL_SET(big_info, iso_interval_us / PERIODIC_INT_UNIT_US);
565 	PDU_BIG_INFO_NUM_BIS_SET(big_info, lll_adv_iso->num_bis);
566 	PDU_BIG_INFO_NSE_SET(big_info, lll_adv_iso->nse);
567 	PDU_BIG_INFO_BN_SET(big_info, lll_adv_iso->bn);
568 	PDU_BIG_INFO_SUB_INTERVAL_SET(big_info, lll_adv_iso->sub_interval);
569 	PDU_BIG_INFO_PTO_SET(big_info, lll_adv_iso->pto);
570 	PDU_BIG_INFO_SPACING_SET(big_info, lll_adv_iso->bis_spacing);
571 	PDU_BIG_INFO_IRC_SET(big_info, lll_adv_iso->irc);
572 
573 	big_info->max_pdu = lll_adv_iso->max_pdu;
574 	big_info->rfu = 0U;
575 
576 	(void)memcpy(&big_info->seed_access_addr, lll_adv_iso->seed_access_addr,
577 		     sizeof(big_info->seed_access_addr));
578 	PDU_BIG_INFO_SDU_INTERVAL_SET(big_info, sdu_interval);
579 	PDU_BIG_INFO_MAX_SDU_SET(big_info, max_sdu);
580 	(void)memcpy(&big_info->base_crc_init, lll_adv_iso->base_crc_init,
581 		     sizeof(big_info->base_crc_init));
582 	pdu_big_info_chan_map_phy_set(big_info->chm_phy,
583 				      lll_adv_iso->data_chan_map,
584 				      phy);
585 	/* Assign the 39-bit payload count, and 1-bit framing */
586 	big_info->payload_count_framing[0] = lll_adv_iso->payload_count;
587 	big_info->payload_count_framing[1] = lll_adv_iso->payload_count >> 8;
588 	big_info->payload_count_framing[2] = lll_adv_iso->payload_count >> 16;
589 	big_info->payload_count_framing[3] = lll_adv_iso->payload_count >> 24;
590 	big_info->payload_count_framing[4] = lll_adv_iso->payload_count >> 32;
591 	big_info->payload_count_framing[4] &= ~BIT(7);
592 	big_info->payload_count_framing[4] |= ((framing & 0x01) << 7);
593 
594 	if (IS_ENABLED(CONFIG_BT_CTLR_BROADCAST_ISO_ENC) && encryption) {
595 		const uint8_t BIG1[16] = {0x31, 0x47, 0x49, 0x42, };
596 		const uint8_t BIG2[4]  = {0x32, 0x47, 0x49, 0x42};
597 		const uint8_t BIG3[4]  = {0x33, 0x47, 0x49, 0x42};
598 		struct ccm *ccm_tx;
599 		uint8_t igltk[16];
600 		uint8_t gltk[16];
601 		uint8_t gsk[16];
602 
603 		/* Fill GIV and GSKD */
604 		(void)lll_csrand_get(lll_adv_iso->giv,
605 				     sizeof(lll_adv_iso->giv));
606 		(void)memcpy(big_info->giv, lll_adv_iso->giv,
607 			     sizeof(big_info->giv));
608 		(void)lll_csrand_get(big_info->gskd, sizeof(big_info->gskd));
609 
610 		/* Calculate GSK */
611 		err = bt_crypto_h7(BIG1, bcode, igltk);
612 		LL_ASSERT(!err);
613 		err = bt_crypto_h6(igltk, BIG2, gltk);
614 		LL_ASSERT(!err);
615 		err = bt_crypto_h8(gltk, big_info->gskd, BIG3, gsk);
616 		LL_ASSERT(!err);
617 
618 		/* Prepare the CCM parameters */
619 		ccm_tx = &lll_adv_iso->ccm_tx;
620 		ccm_tx->direction = 1U;
621 		(void)memcpy(&ccm_tx->iv[4], &lll_adv_iso->giv[4], 4U);
622 		(void)mem_rcopy(ccm_tx->key, gsk, sizeof(ccm_tx->key));
623 
624 		/* NOTE: counter is filled in LLL */
625 
626 		lll_adv_iso->enc = 1U;
627 	} else {
628 		lll_adv_iso->enc = 0U;
629 	}
630 
631 	err = ull_adv_sync_add_to_acad(lll_adv_sync, pdu_prev, pdu, bi_ad,
632 				       pdu_big_info_size + PDU_ADV_DATA_HEADER_SIZE);
633 	if (err) {
634 		/* Failed to add BIGInfo into the ACAD of the Periodic
635 		 * Advertising.
636 		 */
637 
638 		/* Release allocated link buffers */
639 		ll_rx_link_release(link_cmplt);
640 		ll_rx_link_release(link_term);
641 
642 		return err;
643 	}
644 
645 	/* Associate the ISO instance with an Extended Advertising instance */
646 	lll_adv_iso->adv = &adv->lll;
647 
648 	/* Store the link buffer for ISO create and terminate complete event */
649 	adv_iso->node_rx_complete.hdr.link = link_cmplt;
650 	adv_iso->node_rx_terminate.rx.hdr.link = link_term;
651 
652 	/* Initialise LLL header members */
653 	lll_hdr_init(lll_adv_iso, adv_iso);
654 
655 	/* Start sending BIS empty data packet for each BIS */
656 	ret = adv_iso_start(adv_iso, iso_interval_us);
657 	if (ret) {
658 		/* Failed to schedule BIG events */
659 
660 		/* Reset the association of ISO instance with the Extended
661 		 * Advertising Instance
662 		 */
663 		lll_adv_iso->adv = NULL;
664 
665 		/* Release allocated link buffers */
666 		ll_rx_link_release(link_cmplt);
667 		ll_rx_link_release(link_term);
668 
669 		return BT_HCI_ERR_CMD_DISALLOWED;
670 	}
671 
672 	/* Associate the ISO instance with a Periodic Advertising */
673 	lll_adv_sync->iso = lll_adv_iso;
674 
675 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
676 	/* Notify the sync instance */
677 	ull_adv_sync_iso_created(HDR_LLL2ULL(lll_adv_sync));
678 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
679 
680 	/* Commit the BIGInfo in the ACAD field of Periodic Advertising */
681 	lll_adv_sync_data_enqueue(lll_adv_sync, ter_idx);
682 
683 	return BT_HCI_ERR_SUCCESS;
684 }
685 
ll_big_create(uint8_t big_handle,uint8_t adv_handle,uint8_t num_bis,uint32_t sdu_interval,uint16_t max_sdu,uint16_t max_latency,uint8_t rtn,uint8_t phy,uint8_t packing,uint8_t framing,uint8_t encryption,uint8_t * bcode)686 uint8_t ll_big_create(uint8_t big_handle, uint8_t adv_handle, uint8_t num_bis,
687 		      uint32_t sdu_interval, uint16_t max_sdu,
688 		      uint16_t max_latency, uint8_t rtn, uint8_t phy,
689 		      uint8_t packing, uint8_t framing, uint8_t encryption,
690 		      uint8_t *bcode)
691 {
692 	return big_create(big_handle, adv_handle, num_bis, sdu_interval, max_sdu,
693 			  max_latency, rtn, phy, packing, framing, encryption, bcode,
694 			  0 /*iso_interval*/,
695 			  0 /*nse*/,
696 			  0 /*max_pdu*/,
697 			  0 /*bn*/,
698 			  0 /*irc*/,
699 			  0 /*pto*/,
700 			  false);
701 }
702 
ll_big_test_create(uint8_t big_handle,uint8_t adv_handle,uint8_t num_bis,uint32_t sdu_interval,uint16_t iso_interval,uint8_t nse,uint16_t max_sdu,uint16_t max_pdu,uint8_t phy,uint8_t packing,uint8_t framing,uint8_t bn,uint8_t irc,uint8_t pto,uint8_t encryption,uint8_t * bcode)703 uint8_t ll_big_test_create(uint8_t big_handle, uint8_t adv_handle,
704 			   uint8_t num_bis, uint32_t sdu_interval,
705 			   uint16_t iso_interval, uint8_t nse, uint16_t max_sdu,
706 			   uint16_t max_pdu, uint8_t phy, uint8_t packing,
707 			   uint8_t framing, uint8_t bn, uint8_t irc,
708 			   uint8_t pto, uint8_t encryption, uint8_t *bcode)
709 {
710 	return big_create(big_handle, adv_handle, num_bis, sdu_interval, max_sdu,
711 			  0 /*max_latency*/,
712 			  0 /*rtn*/,
713 			  phy, packing, framing, encryption, bcode,
714 			  iso_interval, nse, max_pdu, bn, irc, pto, true);
715 }
716 
ll_big_terminate(uint8_t big_handle,uint8_t reason)717 uint8_t ll_big_terminate(uint8_t big_handle, uint8_t reason)
718 {
719 	struct lll_adv_sync *lll_adv_sync;
720 	struct lll_adv_iso *lll_adv_iso;
721 	struct ll_adv_iso_set *adv_iso;
722 	struct pdu_adv *pdu_prev, *pdu;
723 	struct node_rx_pdu *node_rx;
724 	struct lll_adv *lll_adv;
725 	struct ll_adv_set *adv;
726 	uint16_t stream_handle;
727 	uint16_t handle;
728 	uint8_t num_bis;
729 	uint8_t ter_idx;
730 	uint8_t err;
731 
732 	adv_iso = adv_iso_get(big_handle);
733 	if (!adv_iso) {
734 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
735 	}
736 
737 	lll_adv_iso = &adv_iso->lll;
738 	lll_adv = lll_adv_iso->adv;
739 	if (!lll_adv) {
740 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
741 	}
742 
743 	if (lll_adv_iso->term_req) {
744 		return BT_HCI_ERR_CMD_DISALLOWED;
745 	}
746 
747 	/* Remove ISO data path, keeping data from entering Tx pipeline */
748 	num_bis = lll_adv_iso->num_bis;
749 	while (num_bis--) {
750 		stream_handle = lll_adv_iso->stream_handle[num_bis];
751 		handle = LL_BIS_ADV_HANDLE_FROM_IDX(stream_handle);
752 		(void)ll_remove_iso_path(handle,
753 					 BIT(BT_HCI_DATAPATH_DIR_HOST_TO_CTLR));
754 	}
755 
756 	lll_adv_sync = lll_adv->sync;
757 	adv = HDR_LLL2ULL(lll_adv);
758 
759 	/* Allocate next PDU */
760 	err = ull_adv_sync_pdu_alloc(adv, ULL_ADV_PDU_EXTRA_DATA_ALLOC_IF_EXIST,
761 				     &pdu_prev, &pdu, NULL, NULL, &ter_idx);
762 	if (err) {
763 		return err;
764 	}
765 
766 	/* Remove BigInfo from ACAD in AUX_SYNC_IND */
767 	err = ull_adv_sync_remove_from_acad(lll_adv_sync, pdu_prev, pdu, BT_DATA_BIG_INFO);
768 	if (err) {
769 		return err;
770 	}
771 
772 	lll_adv_sync_data_enqueue(lll_adv_sync, ter_idx);
773 
774 	/* Prepare BIG terminate event, will be enqueued after tx flush  */
775 	node_rx = (void *)&adv_iso->node_rx_terminate;
776 	node_rx->hdr.type = NODE_RX_TYPE_BIG_TERMINATE;
777 	node_rx->hdr.handle = big_handle;
778 	node_rx->rx_ftr.param = adv_iso;
779 
780 	if (reason == BT_HCI_ERR_REMOTE_USER_TERM_CONN) {
781 		*((uint8_t *)node_rx->pdu) = BT_HCI_ERR_LOCALHOST_TERM_CONN;
782 	} else {
783 		*((uint8_t *)node_rx->pdu) = reason;
784 	}
785 
786 	/* Request terminate procedure */
787 	lll_adv_iso->term_reason = reason;
788 	lll_adv_iso->term_req = 1U;
789 
790 	return BT_HCI_ERR_SUCCESS;
791 }
792 
ull_adv_iso_init(void)793 int ull_adv_iso_init(void)
794 {
795 	int err;
796 
797 	err = init_reset();
798 	if (err) {
799 		return err;
800 	}
801 
802 	return 0;
803 }
804 
ull_adv_iso_reset(void)805 int ull_adv_iso_reset(void)
806 {
807 	uint8_t handle;
808 	int err;
809 
810 	handle = CONFIG_BT_CTLR_ADV_ISO_SET;
811 	while (handle--) {
812 		struct lll_adv_sync *adv_sync_lll;
813 		struct lll_adv_iso *adv_iso_lll;
814 		struct ll_adv_iso_set *adv_iso;
815 		volatile uint32_t ret_cb;
816 		struct lll_adv *adv_lll;
817 		uint32_t ret;
818 		void *mark;
819 
820 		adv_iso = &ll_adv_iso[handle];
821 		adv_iso_lll = &adv_iso->lll;
822 		adv_lll = adv_iso_lll->adv;
823 		if (!adv_lll) {
824 			continue;
825 		}
826 
827 		mark = ull_disable_mark(adv_iso);
828 		LL_ASSERT(mark == adv_iso);
829 
830 		/* Stop event scheduling */
831 		ret_cb = TICKER_STATUS_BUSY;
832 		ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
833 				  TICKER_ID_ADV_ISO_BASE + adv_iso_lll->handle,
834 				  ull_ticker_status_give, (void *)&ret_cb);
835 		ret = ull_ticker_status_take(ret, &ret_cb);
836 		if (ret) {
837 			mark = ull_disable_unmark(adv_iso);
838 			LL_ASSERT(mark == adv_iso);
839 
840 			/* Assert as there shall be a ticker instance active */
841 			LL_ASSERT(false);
842 
843 			return BT_HCI_ERR_CMD_DISALLOWED;
844 		}
845 
846 		/* Abort any events in LLL pipeline */
847 		err = ull_disable(adv_iso_lll);
848 		LL_ASSERT(!err || (err == -EALREADY));
849 
850 		mark = ull_disable_unmark(adv_iso);
851 		LL_ASSERT(mark == adv_iso);
852 
853 		/* Reset associated streams */
854 		while (adv_iso_lll->num_bis--) {
855 			struct lll_adv_iso_stream *stream;
856 			uint16_t stream_handle;
857 
858 			stream_handle = adv_iso_lll->stream_handle[adv_iso_lll->num_bis];
859 			stream = ull_adv_iso_stream_get(stream_handle);
860 			if (stream) {
861 				stream->link_tx_free = NULL;
862 			}
863 		}
864 
865 		/* Remove Periodic Advertising association */
866 		adv_sync_lll = adv_lll->sync;
867 		if (adv_sync_lll) {
868 			adv_sync_lll->iso = NULL;
869 		}
870 
871 		/* Remove Extended Advertising association */
872 		adv_iso_lll->adv = NULL;
873 	}
874 
875 	err = init_reset();
876 	if (err) {
877 		return err;
878 	}
879 
880 	return 0;
881 }
882 
ull_adv_iso_get(uint8_t handle)883 struct ll_adv_iso_set *ull_adv_iso_get(uint8_t handle)
884 {
885 	return adv_iso_get(handle);
886 }
887 
ull_adv_iso_chm_update(void)888 uint8_t ull_adv_iso_chm_update(void)
889 {
890 	uint8_t handle;
891 
892 	handle = CONFIG_BT_CTLR_ADV_ISO_SET;
893 	while (handle--) {
894 		(void)adv_iso_chm_update(handle);
895 	}
896 
897 	/* TODO: Should failure due to Channel Map Update being already in
898 	 *       progress be returned to caller?
899 	 */
900 	return 0;
901 }
902 
ull_adv_iso_chm_complete(struct node_rx_pdu * rx)903 void ull_adv_iso_chm_complete(struct node_rx_pdu *rx)
904 {
905 	struct lll_adv_sync *sync_lll;
906 	struct lll_adv_iso *iso_lll;
907 	struct lll_adv *adv_lll;
908 
909 	iso_lll = rx->rx_ftr.param;
910 	adv_lll = iso_lll->adv;
911 	sync_lll = adv_lll->sync;
912 
913 	/* Update Channel Map in BIGInfo in the Periodic Advertising PDU */
914 	while (sync_lll->iso_chm_done_req != sync_lll->iso_chm_done_ack) {
915 		sync_lll->iso_chm_done_ack = sync_lll->iso_chm_done_req;
916 
917 		adv_iso_chm_complete_commit(iso_lll);
918 	}
919 }
920 
921 #if defined(CONFIG_BT_CTLR_HCI_ADV_HANDLE_MAPPING)
ll_adv_iso_by_hci_handle_get(uint8_t hci_handle,uint8_t * handle)922 uint8_t ll_adv_iso_by_hci_handle_get(uint8_t hci_handle, uint8_t *handle)
923 {
924 	struct ll_adv_iso_set *adv_iso;
925 	uint8_t idx;
926 
927 	adv_iso =  &ll_adv_iso[0];
928 
929 	for (idx = 0U; idx < CONFIG_BT_CTLR_ADV_ISO_SET; idx++, adv_iso++) {
930 		if (adv_iso->lll.adv &&
931 		    (adv_iso->hci_handle == hci_handle)) {
932 			*handle = idx;
933 			return 0U;
934 		}
935 	}
936 
937 	return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
938 }
939 
ll_adv_iso_by_hci_handle_new(uint8_t hci_handle,uint8_t * handle)940 uint8_t ll_adv_iso_by_hci_handle_new(uint8_t hci_handle, uint8_t *handle)
941 {
942 	struct ll_adv_iso_set *adv_iso, *adv_iso_empty;
943 	uint8_t idx;
944 
945 	adv_iso = &ll_adv_iso[0];
946 	adv_iso_empty = NULL;
947 
948 	for (idx = 0U; idx < CONFIG_BT_CTLR_ADV_ISO_SET; idx++, adv_iso++) {
949 		if (adv_iso->lll.adv) {
950 			if (adv_iso->hci_handle == hci_handle) {
951 				return BT_HCI_ERR_CMD_DISALLOWED;
952 			}
953 		} else if (!adv_iso_empty) {
954 			adv_iso_empty = adv_iso;
955 			*handle = idx;
956 		}
957 	}
958 
959 	if (adv_iso_empty) {
960 		memset(adv_iso_empty, 0U, sizeof(*adv_iso_empty));
961 		adv_iso_empty->hci_handle = hci_handle;
962 		return 0U;
963 	}
964 
965 	return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
966 }
967 #endif /* CONFIG_BT_CTLR_HCI_ADV_HANDLE_MAPPING */
968 
ull_adv_iso_offset_get(struct ll_adv_sync_set * sync)969 void ull_adv_iso_offset_get(struct ll_adv_sync_set *sync)
970 {
971 	static memq_link_t link;
972 	static struct mayfly mfy = {0U, 0U, &link, NULL, mfy_iso_offset_get};
973 	uint32_t ret;
974 
975 	mfy.param = sync;
976 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1,
977 			     &mfy);
978 	LL_ASSERT(!ret);
979 }
980 
981 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
ull_adv_iso_lll_biginfo_fill(struct pdu_adv * pdu,struct lll_adv_sync * lll_sync)982 void ull_adv_iso_lll_biginfo_fill(struct pdu_adv *pdu, struct lll_adv_sync *lll_sync)
983 {
984 	struct lll_adv_iso *lll_iso;
985 	uint16_t latency_prepare;
986 	struct pdu_big_info *bi;
987 	uint64_t payload_count;
988 
989 	lll_iso = lll_sync->iso;
990 
991 	/* Calculate current payload count. If refcount is non-zero, we have called
992 	 * prepare and the LLL implementation has incremented latency_prepare already.
993 	 * In this case we need to subtract lazy + 1 from latency_prepare
994 	 */
995 	latency_prepare = lll_iso->latency_prepare;
996 	if (ull_ref_get(HDR_LLL2ULL(lll_iso))) {
997 		/* We are in post-prepare. latency_prepare is already
998 		 * incremented by lazy + 1 for next event
999 		 */
1000 		latency_prepare -= lll_iso->iso_lazy + 1;
1001 	}
1002 
1003 	payload_count = lll_iso->payload_count + ((latency_prepare +
1004 						   lll_iso->iso_lazy) * lll_iso->bn);
1005 
1006 	bi = big_info_get(pdu);
1007 	big_info_offset_fill(bi, lll_iso->ticks_sync_pdu_offset, 0U);
1008 	/* Assign the 39-bit payload count, retaining the 1 MS bit framing value */
1009 	bi->payload_count_framing[0] = payload_count;
1010 	bi->payload_count_framing[1] = payload_count >> 8;
1011 	bi->payload_count_framing[2] = payload_count >> 16;
1012 	bi->payload_count_framing[3] = payload_count >> 24;
1013 	bi->payload_count_framing[4] &= ~0x7F;
1014 	bi->payload_count_framing[4] |= (payload_count >> 32) & 0x7F;
1015 
1016 	/* Update Channel Map in the BIGInfo until Thread context gets a
1017 	 * chance to update the PDU with new Channel Map.
1018 	 */
1019 	if (lll_sync->iso_chm_done_req != lll_sync->iso_chm_done_ack) {
1020 		pdu_big_info_chan_map_phy_set(bi->chm_phy,
1021 					      lll_iso->data_chan_map,
1022 					      lll_iso->phy);
1023 	}
1024 }
1025 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1026 
ull_adv_iso_done_complete(struct node_rx_event_done * done)1027 void ull_adv_iso_done_complete(struct node_rx_event_done *done)
1028 {
1029 	struct ll_adv_iso_set *adv_iso;
1030 	struct lll_adv_iso *lll;
1031 	struct node_rx_pdu *rx;
1032 	memq_link_t *link;
1033 
1034 	/* switch to normal prepare */
1035 	mfy_lll_prepare.fp = lll_adv_iso_prepare;
1036 
1037 	/* Get reference to ULL context */
1038 	adv_iso = CONTAINER_OF(done->param, struct ll_adv_iso_set, ull);
1039 	lll = &adv_iso->lll;
1040 
1041 	/* Prepare BIG complete event */
1042 	rx = (void *)&adv_iso->node_rx_complete;
1043 	link = rx->hdr.link;
1044 	if (!link) {
1045 		/* NOTE: When BIS events have overlapping prepare placed in
1046 		 *       in the pipeline, more than one done complete event
1047 		 *       will be generated, lets ignore the additional done
1048 		 *       events.
1049 		 */
1050 		return;
1051 	}
1052 	rx->hdr.link = NULL;
1053 
1054 	rx->hdr.type = NODE_RX_TYPE_BIG_COMPLETE;
1055 	rx->hdr.handle = lll->handle;
1056 	rx->rx_ftr.param = adv_iso;
1057 
1058 	ll_rx_put_sched(link, rx);
1059 }
1060 
ull_adv_iso_done_terminate(struct node_rx_event_done * done)1061 void ull_adv_iso_done_terminate(struct node_rx_event_done *done)
1062 {
1063 	struct ll_adv_iso_set *adv_iso;
1064 	struct lll_adv_iso *lll;
1065 	uint32_t ret;
1066 
1067 	/* Get reference to ULL context */
1068 	adv_iso = CONTAINER_OF(done->param, struct ll_adv_iso_set, ull);
1069 	lll = &adv_iso->lll;
1070 
1071 	/* Skip if terminated already (we come here if pipeline being flushed */
1072 	if (unlikely(lll->handle == LLL_ADV_HANDLE_INVALID)) {
1073 		return;
1074 	}
1075 
1076 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
1077 			  (TICKER_ID_ADV_ISO_BASE + lll->handle),
1078 			  ticker_stop_op_cb, adv_iso);
1079 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1080 		  (ret == TICKER_STATUS_BUSY));
1081 
1082 	/* Invalidate the handle */
1083 	lll->handle = LLL_ADV_HANDLE_INVALID;
1084 }
1085 
ull_adv_iso_by_stream_get(uint16_t handle)1086 struct ll_adv_iso_set *ull_adv_iso_by_stream_get(uint16_t handle)
1087 {
1088 	if (handle >= CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT) {
1089 		return NULL;
1090 	}
1091 
1092 	return adv_iso_get(stream_pool[handle].big_handle);
1093 }
1094 
ull_adv_iso_stream_get(uint16_t handle)1095 struct lll_adv_iso_stream *ull_adv_iso_stream_get(uint16_t handle)
1096 {
1097 	if (handle >= CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT) {
1098 		return NULL;
1099 	}
1100 
1101 	return &stream_pool[handle];
1102 }
1103 
ull_adv_iso_lll_stream_get(uint16_t handle)1104 struct lll_adv_iso_stream *ull_adv_iso_lll_stream_get(uint16_t handle)
1105 {
1106 	return ull_adv_iso_stream_get(handle);
1107 }
1108 
ull_adv_iso_stream_release(struct ll_adv_iso_set * adv_iso)1109 void ull_adv_iso_stream_release(struct ll_adv_iso_set *adv_iso)
1110 {
1111 	struct lll_adv_iso *lll;
1112 
1113 	lll = &adv_iso->lll;
1114 	while (lll->num_bis--) {
1115 		struct lll_adv_iso_stream *stream;
1116 		struct ll_iso_datapath *dp;
1117 		uint16_t stream_handle;
1118 		memq_link_t *link;
1119 
1120 		stream_handle = lll->stream_handle[lll->num_bis];
1121 		stream = ull_adv_iso_stream_get(stream_handle);
1122 
1123 		LL_ASSERT(!stream->link_tx_free);
1124 		link = memq_deinit(&stream->memq_tx.head,
1125 				   &stream->memq_tx.tail);
1126 		LL_ASSERT(link);
1127 		stream->link_tx_free = link;
1128 
1129 		dp = stream->dp;
1130 		if (dp) {
1131 			stream->dp = NULL;
1132 			isoal_source_destroy(dp->source_hdl);
1133 			ull_iso_datapath_release(dp);
1134 		}
1135 
1136 		mem_release(stream, &stream_free);
1137 	}
1138 
1139 	/* Remove Periodic Advertising association */
1140 	lll->adv->sync->iso = NULL;
1141 
1142 	/* Remove Extended Advertising association */
1143 	lll->adv = NULL;
1144 }
1145 
ull_adv_iso_max_time_get(const struct ll_adv_iso_set * adv_iso)1146 uint32_t ull_adv_iso_max_time_get(const struct ll_adv_iso_set *adv_iso)
1147 {
1148 	return adv_iso_time_get(adv_iso, true);
1149 }
1150 
init_reset(void)1151 static int init_reset(void)
1152 {
1153 	/* Add initializations common to power up initialization and HCI reset
1154 	 * initializations.
1155 	 */
1156 
1157 	mem_init((void *)stream_pool, sizeof(struct lll_adv_iso_stream),
1158 		 CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT, &stream_free);
1159 
1160 	return 0;
1161 }
1162 
adv_iso_get(uint8_t handle)1163 static struct ll_adv_iso_set *adv_iso_get(uint8_t handle)
1164 {
1165 	if (handle >= CONFIG_BT_CTLR_ADV_ISO_SET) {
1166 		return NULL;
1167 	}
1168 
1169 	return &ll_adv_iso[handle];
1170 }
1171 
adv_iso_stream_acquire(void)1172 static struct stream *adv_iso_stream_acquire(void)
1173 {
1174 	return mem_acquire(&stream_free);
1175 }
1176 
adv_iso_stream_handle_get(struct lll_adv_iso_stream * stream)1177 static uint16_t adv_iso_stream_handle_get(struct lll_adv_iso_stream *stream)
1178 {
1179 	return mem_index_get(stream, stream_pool, sizeof(*stream));
1180 }
1181 
ptc_calc(const struct lll_adv_iso * lll,uint32_t event_spacing,uint32_t event_spacing_max)1182 static uint8_t ptc_calc(const struct lll_adv_iso *lll, uint32_t event_spacing,
1183 			uint32_t event_spacing_max)
1184 {
1185 	if (event_spacing < event_spacing_max) {
1186 		uint32_t ptc;
1187 		uint8_t nse;
1188 
1189 		/* Possible maximum Pre-transmission Subevents per BIS.
1190 		 * sub_interval is at least T_MSS_150 + MPT (hence a value in 8 bits or more), i.e.
1191 		 * the below division and the subsequent multiplication with lll->bn does not
1192 		 * overflow.
1193 		 */
1194 		ptc = ((event_spacing_max - event_spacing) /
1195 		       (lll->sub_interval * lll->bn * lll->num_bis)) *
1196 		      lll->bn;
1197 
1198 		/* Required NSE */
1199 		nse = lll->bn * lll->irc; /* 3 bits * 4 bits, total 7 bits */
1200 
1201 		/* Requested NSE is greater than Required NSE, Pre-Transmission offset has been
1202 		 * provided.
1203 		 *
1204 		 * NOTE: This is the case under HCI test command use to create BIG, i.e. test_config
1205 		 *       variable is true.
1206 		 */
1207 		if (lll->nse > nse) {
1208 			/* Restrict PTC to number of available subevents */
1209 			ptc = MIN(ptc, lll->nse - nse);
1210 		} else {
1211 			/* No PTO requested, Zephyr Controller implementation here will try using
1212 			 * Pre-Transmisson offset of BT_CTLR_ADV_ISO_PTO_MIN, i.e. restrict to a
1213 			 * maximum of BN Pre-Transmission subevents per BIS. This allows for a
1214 			 * better time diversity ensuring skipped or missing reception at the ISO
1215 			 * Sync Receiver so it can still have another chance at receiving the ISO
1216 			 * PDUs within the permitted maximum transport latency.
1217 			 *
1218 			 * Usecases where BAP Broadcast Audio Assistant role device has a drifting
1219 			 * ACL Peripheral role active in the BAP Broadcast Audio Sink device.
1220 			 */
1221 			ptc = MIN(ptc, (lll->bn * BT_CTLR_ADV_ISO_PTO_GROUP_COUNT));
1222 		}
1223 
1224 		/* FIXME: Do not remember why ptc is 4 bits, it should be 5 bits as ptc is a
1225 		 *        running buffer offset related to nse. Fix ptc and ptc_curr definitions,
1226 		 *        until then lets have an assert check here.
1227 		 */
1228 		LL_ASSERT(ptc <= BIT_MASK(4));
1229 
1230 		return ptc;
1231 	}
1232 
1233 	return 0U;
1234 }
1235 
adv_iso_time_get(const struct ll_adv_iso_set * adv_iso,bool max)1236 static uint32_t adv_iso_time_get(const struct ll_adv_iso_set *adv_iso, bool max)
1237 {
1238 	const struct lll_adv_iso *lll_iso;
1239 	uint32_t ctrl_spacing;
1240 	uint32_t pdu_spacing;
1241 	uint32_t time_us;
1242 
1243 	lll_iso = &adv_iso->lll;
1244 
1245 	pdu_spacing = PDU_BIS_US(lll_iso->max_pdu, lll_iso->enc, lll_iso->phy,
1246 				 lll_iso->phy_flags) +
1247 		      EVENT_MSS_US;
1248 	ctrl_spacing = PDU_BIS_US(sizeof(struct pdu_big_ctrl), lll_iso->enc,
1249 				  lll_iso->phy, lll_iso->phy_flags);
1250 
1251 	/* 1. Maximum PDU transmission time in 1M/2M/S8 PHY is 17040 us, or
1252 	 * represented in 15-bits.
1253 	 * 2. NSE in the range 1 to 31 is represented in 5-bits
1254 	 * 3. num_bis in the range 1 to 31 is represented in 5-bits
1255 	 *
1256 	 * Hence, worst case event time can be represented in 25-bits plus
1257 	 * one each bit for added ctrl_spacing and radio event overheads. I.e.
1258 	 * 27-bits required and sufficiently covered by using 32-bit data type
1259 	 * for time_us.
1260 	 */
1261 
1262 	if (IS_ENABLED(CONFIG_BT_CTLR_ADV_ISO_RESERVE_MAX) || max) {
1263 		/* Maximum time reservation for both sequential and interleaved
1264 		 * packing.
1265 		 */
1266 		time_us = (pdu_spacing * lll_iso->nse * lll_iso->num_bis) +
1267 			  ctrl_spacing;
1268 
1269 	} else if (lll_iso->bis_spacing >=
1270 		   (lll_iso->sub_interval * lll_iso->nse)) {
1271 		/* Time reservation omitting PTC subevents in sequetial
1272 		 * packing.
1273 		 */
1274 		time_us = pdu_spacing * ((lll_iso->nse * lll_iso->num_bis) -
1275 					 lll_iso->ptc);
1276 
1277 	} else {
1278 		/* Time reservation omitting PTC subevents in interleaved
1279 		 * packing.
1280 		 */
1281 		time_us = pdu_spacing * ((lll_iso->nse - lll_iso->ptc) *
1282 					 lll_iso->num_bis);
1283 	}
1284 
1285 	/* Add implementation defined radio event overheads */
1286 	time_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
1287 
1288 	return time_us;
1289 }
1290 
adv_iso_start(struct ll_adv_iso_set * adv_iso,uint32_t iso_interval_us)1291 static uint32_t adv_iso_start(struct ll_adv_iso_set *adv_iso,
1292 			      uint32_t iso_interval_us)
1293 {
1294 	uint32_t ticks_slot_overhead;
1295 	uint32_t ticks_slot_offset;
1296 	volatile uint32_t ret_cb;
1297 	uint32_t ticks_anchor;
1298 	uint32_t ticks_slot;
1299 	uint32_t slot_us;
1300 	uint32_t ret;
1301 	int err;
1302 
1303 	ull_hdr_init(&adv_iso->ull);
1304 
1305 	slot_us = adv_iso_time_get(adv_iso, false);
1306 
1307 	adv_iso->ull.ticks_active_to_start = 0U;
1308 	adv_iso->ull.ticks_prepare_to_start =
1309 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
1310 	adv_iso->ull.ticks_preempt_to_start =
1311 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
1312 	adv_iso->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
1313 
1314 	ticks_slot_offset = MAX(adv_iso->ull.ticks_active_to_start,
1315 				adv_iso->ull.ticks_prepare_to_start);
1316 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
1317 		ticks_slot_overhead = ticks_slot_offset;
1318 	} else {
1319 		ticks_slot_overhead = 0U;
1320 	}
1321 	ticks_slot = adv_iso->ull.ticks_slot + ticks_slot_overhead;
1322 
1323 	/* Find the slot after Periodic Advertisings events */
1324 	ticks_anchor = ticker_ticks_now_get() +
1325 		       HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
1326 	err = ull_sched_adv_aux_sync_free_anchor_get(ticks_slot, &ticks_anchor);
1327 	if (!err) {
1328 		ticks_anchor += HAL_TICKER_US_TO_TICKS(
1329 					MAX(EVENT_MAFS_US,
1330 					    EVENT_OVERHEAD_START_US) -
1331 					EVENT_OVERHEAD_START_US +
1332 					(EVENT_TICKER_RES_MARGIN_US << 1));
1333 	}
1334 
1335 	/* setup to use ISO create prepare function for first radio event */
1336 	mfy_lll_prepare.fp = lll_adv_iso_create_prepare;
1337 
1338 	ret_cb = TICKER_STATUS_BUSY;
1339 	ret = ticker_start(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
1340 			   (TICKER_ID_ADV_ISO_BASE + adv_iso->lll.handle),
1341 			   ticks_anchor, 0U,
1342 			   HAL_TICKER_US_TO_TICKS(iso_interval_us),
1343 			   HAL_TICKER_REMAINDER(iso_interval_us),
1344 			   TICKER_NULL_LAZY, ticks_slot, ticker_cb, adv_iso,
1345 			   ull_ticker_status_give, (void *)&ret_cb);
1346 	ret = ull_ticker_status_take(ret, &ret_cb);
1347 
1348 	return ret;
1349 }
1350 
adv_iso_chm_update(uint8_t big_handle)1351 static uint8_t adv_iso_chm_update(uint8_t big_handle)
1352 {
1353 	struct ll_adv_iso_set *adv_iso;
1354 	struct lll_adv_iso *lll_iso;
1355 
1356 	adv_iso = adv_iso_get(big_handle);
1357 	if (!adv_iso) {
1358 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
1359 	}
1360 
1361 	lll_iso = &adv_iso->lll;
1362 	if (lll_iso->term_req ||
1363 	    (lll_iso->chm_req != lll_iso->chm_ack)) {
1364 		return BT_HCI_ERR_CMD_DISALLOWED;
1365 	}
1366 
1367 	/* Request channel map update procedure */
1368 	lll_iso->chm_chan_count = ull_chan_map_get(lll_iso->chm_chan_map);
1369 	lll_iso->chm_req++;
1370 
1371 	return BT_HCI_ERR_SUCCESS;
1372 }
1373 
adv_iso_chm_complete_commit(struct lll_adv_iso * lll_iso)1374 static void adv_iso_chm_complete_commit(struct lll_adv_iso *lll_iso)
1375 {
1376 	struct pdu_adv *pdu_prev, *pdu;
1377 	struct lll_adv_sync *lll_sync;
1378 	struct pdu_big_info *bi;
1379 	struct ll_adv_set *adv;
1380 	uint8_t acad_len;
1381 	uint8_t ter_idx;
1382 	uint8_t ad_len;
1383 	uint8_t *acad;
1384 	uint8_t *ad;
1385 	uint8_t len;
1386 	uint8_t err;
1387 
1388 	/* Allocate next PDU */
1389 	adv = HDR_LLL2ULL(lll_iso->adv);
1390 	err = ull_adv_sync_pdu_alloc(adv, ULL_ADV_PDU_EXTRA_DATA_ALLOC_IF_EXIST,
1391 				     &pdu_prev, &pdu, NULL, NULL, &ter_idx);
1392 	LL_ASSERT(!err);
1393 
1394 	/* Copy content */
1395 	err = ull_adv_sync_duplicate(pdu_prev, pdu);
1396 	LL_ASSERT(!err);
1397 
1398 	/* Get the current ACAD */
1399 	acad = ull_adv_sync_get_acad(pdu, &acad_len);
1400 
1401 	lll_sync = adv->lll.sync;
1402 
1403 	/* Dev assert if ACAD empty */
1404 	LL_ASSERT(acad_len);
1405 
1406 	/* Find the BIGInfo */
1407 	len = acad_len;
1408 	ad = acad;
1409 	do {
1410 		ad_len = ad[PDU_ADV_DATA_HEADER_LEN_OFFSET];
1411 		if (ad_len &&
1412 		    (ad[PDU_ADV_DATA_HEADER_TYPE_OFFSET] == BT_DATA_BIG_INFO)) {
1413 			break;
1414 		}
1415 
1416 		ad_len += 1U;
1417 
1418 		LL_ASSERT(ad_len <= len);
1419 
1420 		ad += ad_len;
1421 		len -= ad_len;
1422 	} while (len);
1423 	LL_ASSERT(len);
1424 
1425 	/* Get reference to BIGInfo */
1426 	bi = (void *)&ad[PDU_ADV_DATA_HEADER_DATA_OFFSET];
1427 
1428 	/* Copy the new/current Channel Map */
1429 	pdu_big_info_chan_map_phy_set(bi->chm_phy, lll_iso->data_chan_map,
1430 				      lll_iso->phy);
1431 
1432 	/* Commit the new PDU Buffer */
1433 	lll_adv_sync_data_enqueue(lll_sync, ter_idx);
1434 }
1435 
mfy_iso_offset_get(void * param)1436 static void mfy_iso_offset_get(void *param)
1437 {
1438 	struct lll_adv_sync *lll_sync;
1439 	struct ll_adv_sync_set *sync;
1440 	struct lll_adv_iso *lll_iso;
1441 	uint32_t ticks_to_expire;
1442 	struct pdu_big_info *bi;
1443 	uint32_t ticks_current;
1444 	uint64_t payload_count;
1445 	struct pdu_adv *pdu;
1446 	uint8_t ticker_id;
1447 	uint16_t lazy;
1448 	uint8_t retry;
1449 	uint8_t id;
1450 
1451 	sync = param;
1452 	lll_sync = &sync->lll;
1453 	lll_iso = lll_sync->iso;
1454 	ticker_id = TICKER_ID_ADV_ISO_BASE + lll_iso->handle;
1455 
1456 	id = TICKER_NULL;
1457 	ticks_to_expire = 0U;
1458 	ticks_current = 0U;
1459 	retry = 4U;
1460 	do {
1461 		uint32_t volatile ret_cb;
1462 		uint32_t ticks_previous;
1463 		uint32_t ret;
1464 		bool success;
1465 
1466 		ticks_previous = ticks_current;
1467 
1468 		ret_cb = TICKER_STATUS_BUSY;
1469 		ret = ticker_next_slot_get_ext(TICKER_INSTANCE_ID_CTLR,
1470 					       TICKER_USER_ID_ULL_LOW,
1471 					       &id, &ticks_current,
1472 					       &ticks_to_expire, NULL, &lazy,
1473 					       NULL, NULL,
1474 					       ticker_op_cb, (void *)&ret_cb);
1475 		if (ret == TICKER_STATUS_BUSY) {
1476 			/* Busy wait until Ticker Job is enabled after any Radio
1477 			 * event is done using the Radio hardware. Ticker Job
1478 			 * ISR is disabled during Radio events in LOW_LAT
1479 			 * feature to avoid Radio ISR latencies.
1480 			 */
1481 			while (ret_cb == TICKER_STATUS_BUSY) {
1482 				ticker_job_sched(TICKER_INSTANCE_ID_CTLR,
1483 						 TICKER_USER_ID_ULL_LOW);
1484 			}
1485 		}
1486 
1487 		success = (ret_cb == TICKER_STATUS_SUCCESS);
1488 		LL_ASSERT(success);
1489 
1490 		LL_ASSERT((ticks_current == ticks_previous) || retry--);
1491 
1492 		LL_ASSERT(id != TICKER_NULL);
1493 	} while (id != ticker_id);
1494 
1495 	payload_count = lll_iso->payload_count +
1496 			(((uint64_t)lll_iso->latency_prepare + lazy) * lll_iso->bn);
1497 
1498 	pdu = lll_adv_sync_data_latest_peek(lll_sync);
1499 	bi = big_info_get(pdu);
1500 	big_info_offset_fill(bi, ticks_to_expire, 0U);
1501 	/* Assign the 39-bit payload count, retaining the 1 MS bit framing value */
1502 	bi->payload_count_framing[0] = payload_count;
1503 	bi->payload_count_framing[1] = payload_count >> 8;
1504 	bi->payload_count_framing[2] = payload_count >> 16;
1505 	bi->payload_count_framing[3] = payload_count >> 24;
1506 	bi->payload_count_framing[4] &= ~0x7F;
1507 	bi->payload_count_framing[4] |= (payload_count >> 32) & 0x7F;
1508 
1509 	/* Update Channel Map in the BIGInfo until Thread context gets a
1510 	 * chance to update the PDU with new Channel Map.
1511 	 */
1512 	if (lll_sync->iso_chm_done_req != lll_sync->iso_chm_done_ack) {
1513 		pdu_big_info_chan_map_phy_set(bi->chm_phy,
1514 					      lll_iso->data_chan_map,
1515 					      lll_iso->phy);
1516 	}
1517 }
1518 
pdu_big_info_chan_map_phy_set(uint8_t * chm_phy,uint8_t * chan_map,uint8_t phy)1519 static void pdu_big_info_chan_map_phy_set(uint8_t *chm_phy, uint8_t *chan_map,
1520 					  uint8_t phy)
1521 {
1522 	(void)memcpy(chm_phy, chan_map, PDU_CHANNEL_MAP_SIZE);
1523 	chm_phy[4] &= 0x1F;
1524 	chm_phy[4] |= ((find_lsb_set(phy) - 1U) << 5);
1525 }
1526 
big_info_get(struct pdu_adv * pdu)1527 static inline struct pdu_big_info *big_info_get(struct pdu_adv *pdu)
1528 {
1529 	struct pdu_adv_com_ext_adv *p;
1530 	struct pdu_adv_ext_hdr *h;
1531 	uint8_t *ptr;
1532 
1533 	p = (void *)&pdu->adv_ext_ind;
1534 	h = (void *)p->ext_hdr_adv_data;
1535 	ptr = h->data;
1536 
1537 	/* No AdvA and TargetA */
1538 
1539 	/* traverse through CTE Info, if present */
1540 	if (h->cte_info) {
1541 		ptr += sizeof(struct pdu_cte_info);
1542 	}
1543 
1544 	/* traverse through ADI, if present */
1545 	if (h->adi) {
1546 		ptr += sizeof(struct pdu_adv_adi);
1547 	}
1548 
1549 	/* traverse through aux ptr, if present */
1550 	if (h->aux_ptr) {
1551 		ptr += sizeof(struct pdu_adv_aux_ptr);
1552 	}
1553 
1554 	/* No SyncInfo */
1555 
1556 	/* traverse through Tx Power, if present */
1557 	if (h->tx_pwr) {
1558 		ptr++;
1559 	}
1560 
1561 	/* FIXME: Parse and find the Length encoded AD Format */
1562 	ptr += 2;
1563 
1564 	return (void *)ptr;
1565 }
1566 
big_info_offset_fill(struct pdu_big_info * bi,uint32_t ticks_offset,uint32_t start_us)1567 static inline void big_info_offset_fill(struct pdu_big_info *bi,
1568 					uint32_t ticks_offset,
1569 					uint32_t start_us)
1570 {
1571 	uint32_t offs;
1572 
1573 	offs = HAL_TICKER_TICKS_TO_US(ticks_offset) - start_us;
1574 	offs = offs / OFFS_UNIT_30_US;
1575 	if (!!(offs >> OFFS_UNIT_BITS)) {
1576 		PDU_BIG_INFO_OFFS_SET(bi, offs / (OFFS_UNIT_300_US /
1577 						  OFFS_UNIT_30_US));
1578 		PDU_BIG_INFO_OFFS_UNITS_SET(bi, 1U);
1579 	} else {
1580 		PDU_BIG_INFO_OFFS_SET(bi, offs);
1581 		PDU_BIG_INFO_OFFS_UNITS_SET(bi, 0U);
1582 	}
1583 }
1584 
ticker_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)1585 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
1586 		      uint32_t remainder, uint16_t lazy, uint8_t force,
1587 		      void *param)
1588 {
1589 	static struct lll_prepare_param p;
1590 	struct ll_adv_iso_set *adv_iso = param;
1591 	uint32_t remainder_us;
1592 	uint64_t event_count;
1593 	uint32_t ret;
1594 	uint8_t ref;
1595 
1596 	DEBUG_RADIO_PREPARE_A(1);
1597 
1598 	event_count = adv_iso->lll.payload_count / adv_iso->lll.bn;
1599 	for (int i = 0; i < adv_iso->lll.num_bis; i++)  {
1600 		uint16_t stream_handle = adv_iso->lll.stream_handle[i];
1601 
1602 		ull_iso_lll_event_prepare(LL_BIS_ADV_HANDLE_FROM_IDX(stream_handle), event_count);
1603 	}
1604 
1605 	/* Increment prepare reference count */
1606 	ref = ull_ref_inc(&adv_iso->ull);
1607 	LL_ASSERT(ref);
1608 
1609 	/* Append timing parameters */
1610 	p.ticks_at_expire = ticks_at_expire;
1611 	p.remainder = remainder;
1612 	p.lazy = lazy;
1613 	p.force = force;
1614 	p.param = &adv_iso->lll;
1615 	mfy_lll_prepare.param = &p;
1616 
1617 	/* Kick LLL prepare */
1618 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0,
1619 			     &mfy_lll_prepare);
1620 	LL_ASSERT(!ret);
1621 
1622 	/* Calculate the BIG reference point of current BIG event */
1623 	remainder_us = remainder;
1624 	hal_ticker_remove_jitter(&ticks_at_expire, &remainder_us);
1625 	ticks_at_expire &= HAL_TICKER_CNTR_MASK;
1626 	adv_iso->big_ref_point = isoal_get_wrapped_time_us(HAL_TICKER_TICKS_TO_US(ticks_at_expire),
1627 							   (remainder_us +
1628 							    EVENT_OVERHEAD_START_US));
1629 
1630 	DEBUG_RADIO_PREPARE_A(1);
1631 }
1632 
ticker_op_cb(uint32_t status,void * param)1633 static void ticker_op_cb(uint32_t status, void *param)
1634 {
1635 	*((uint32_t volatile *)param) = status;
1636 }
1637 
ticker_stop_op_cb(uint32_t status,void * param)1638 static void ticker_stop_op_cb(uint32_t status, void *param)
1639 {
1640 	static memq_link_t link;
1641 	static struct mayfly mfy = {0U, 0U, &link, NULL, adv_iso_disable};
1642 	uint32_t ret;
1643 
1644 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1645 
1646 	/* Check if any pending LLL events that need to be aborted */
1647 	mfy.param = param;
1648 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
1649 			     TICKER_USER_ID_ULL_HIGH, 0U, &mfy);
1650 	LL_ASSERT(!ret);
1651 }
1652 
adv_iso_disable(void * param)1653 static void adv_iso_disable(void *param)
1654 {
1655 	struct ll_adv_iso_set *adv_iso;
1656 	struct ull_hdr *hdr;
1657 
1658 	/* Check ref count to determine if any pending LLL events in pipeline */
1659 	adv_iso = param;
1660 	hdr = &adv_iso->ull;
1661 	if (ull_ref_get(hdr)) {
1662 		static memq_link_t link;
1663 		static struct mayfly mfy = {0U, 0U, &link, NULL, lll_disable};
1664 		uint32_t ret;
1665 
1666 		mfy.param = &adv_iso->lll;
1667 
1668 		/* Setup disabled callback to be called when ref count
1669 		 * returns to zero.
1670 		 */
1671 		LL_ASSERT(!hdr->disabled_cb);
1672 		hdr->disabled_param = mfy.param;
1673 		hdr->disabled_cb = disabled_cb;
1674 
1675 		/* Trigger LLL disable */
1676 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1677 				     TICKER_USER_ID_LLL, 0U, &mfy);
1678 		LL_ASSERT(!ret);
1679 	} else {
1680 		/* No pending LLL events */
1681 		disabled_cb(&adv_iso->lll);
1682 	}
1683 }
1684 
disabled_cb(void * param)1685 static void disabled_cb(void *param)
1686 {
1687 	static memq_link_t link;
1688 	static struct mayfly mfy = {0U, 0U, &link, NULL, tx_lll_flush};
1689 	uint32_t ret;
1690 
1691 	mfy.param = param;
1692 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1693 			     TICKER_USER_ID_LLL, 0U, &mfy);
1694 	LL_ASSERT(!ret);
1695 }
1696 
tx_lll_flush(void * param)1697 static void tx_lll_flush(void *param)
1698 {
1699 	struct ll_adv_iso_set *adv_iso;
1700 	struct lll_adv_iso *lll;
1701 	struct node_rx_pdu *rx;
1702 	memq_link_t *link;
1703 	uint8_t num_bis;
1704 
1705 	/* Get reference to ULL context */
1706 	lll = param;
1707 
1708 	/* Flush TX */
1709 	num_bis = lll->num_bis;
1710 	while (num_bis--) {
1711 		struct lll_adv_iso_stream *stream;
1712 		struct node_tx_iso *tx;
1713 		uint16_t stream_handle;
1714 		memq_link_t *link2;
1715 		uint16_t handle;
1716 
1717 		stream_handle = lll->stream_handle[num_bis];
1718 		handle = LL_BIS_ADV_HANDLE_FROM_IDX(stream_handle);
1719 		stream = ull_adv_iso_stream_get(stream_handle);
1720 
1721 		link2 = memq_dequeue(stream->memq_tx.tail, &stream->memq_tx.head,
1722 				     (void **)&tx);
1723 		while (link2) {
1724 			tx->next = link2;
1725 			ull_iso_lll_ack_enqueue(handle, tx);
1726 
1727 			link2 = memq_dequeue(stream->memq_tx.tail,
1728 					    &stream->memq_tx.head,
1729 					    (void **)&tx);
1730 		}
1731 	}
1732 
1733 	/* Get the terminate structure reserved in the ISO context.
1734 	 * The terminate reason and connection handle should already be
1735 	 * populated before this mayfly function was scheduled.
1736 	 */
1737 	adv_iso = HDR_LLL2ULL(lll);
1738 	rx = (void *)&adv_iso->node_rx_terminate;
1739 	link = rx->hdr.link;
1740 	LL_ASSERT(link);
1741 	rx->hdr.link = NULL;
1742 
1743 	/* Enqueue the terminate towards ULL context */
1744 	ull_rx_put_sched(link, rx);
1745 }
1746