1 /*
2  * Copyright (c) 2021 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <soc.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/sys/byteorder.h>
10 #include <zephyr/bluetooth/hci_types.h>
11 
12 #include "hal/cpu.h"
13 #include "hal/ccm.h"
14 #include "hal/ticker.h"
15 
16 #include "util/util.h"
17 #include "util/mem.h"
18 #include "util/memq.h"
19 #include "util/mfifo.h"
20 #include "util/mayfly.h"
21 
22 #include "ticker/ticker.h"
23 
24 #include "pdu_df.h"
25 #include "lll/pdu_vendor.h"
26 #include "pdu.h"
27 
28 #include "lll.h"
29 #include "lll/lll_vendor.h"
30 #include "lll/lll_adv_types.h"
31 #include "lll_adv.h"
32 #include "lll/lll_adv_pdu.h"
33 #include "lll_adv_iso.h"
34 #include "lll_iso_tx.h"
35 
36 #include "isoal.h"
37 
38 #include "ull_adv_types.h"
39 #include "ull_iso_types.h"
40 
41 #include "ull_internal.h"
42 #include "ull_adv_internal.h"
43 #include "ull_chan_internal.h"
44 #include "ull_sched_internal.h"
45 #include "ull_iso_internal.h"
46 
47 #include "ll.h"
48 #include "ll_feat.h"
49 
50 #include "bt_crypto.h"
51 
52 #include "hal/debug.h"
53 
54 static int init_reset(void);
55 static struct ll_adv_iso_set *adv_iso_get(uint8_t handle);
56 static struct stream *adv_iso_stream_acquire(void);
57 static uint16_t adv_iso_stream_handle_get(struct lll_adv_iso_stream *stream);
58 static uint8_t ptc_calc(const struct lll_adv_iso *lll, uint32_t event_spacing,
59 			uint32_t event_spacing_max);
60 static uint32_t adv_iso_time_get(const struct ll_adv_iso_set *adv_iso, bool max);
61 static uint32_t adv_iso_start(struct ll_adv_iso_set *adv_iso,
62 			      uint32_t iso_interval_us);
63 static uint8_t adv_iso_chm_update(uint8_t big_handle);
64 static void adv_iso_chm_complete_commit(struct lll_adv_iso *lll_iso);
65 static void mfy_iso_offset_get(void *param);
66 static void pdu_big_info_chan_map_phy_set(uint8_t *chm_phy, uint8_t *chan_map,
67 					  uint8_t phy);
68 static inline struct pdu_big_info *big_info_get(struct pdu_adv *pdu);
69 static inline void big_info_offset_fill(struct pdu_big_info *bi,
70 					uint32_t ticks_offset,
71 					uint32_t start_us);
72 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
73 		      uint32_t remainder, uint16_t lazy, uint8_t force,
74 		      void *param);
75 static void ticker_op_cb(uint32_t status, void *param);
76 static void ticker_stop_op_cb(uint32_t status, void *param);
77 static void adv_iso_disable(void *param);
78 static void disabled_cb(void *param);
79 static void tx_lll_flush(void *param);
80 
81 static memq_link_t link_lll_prepare;
82 static struct mayfly mfy_lll_prepare = {0U, 0U, &link_lll_prepare, NULL, NULL};
83 
84 static struct ll_adv_iso_set ll_adv_iso[CONFIG_BT_CTLR_ADV_ISO_SET];
85 static struct lll_adv_iso_stream
86 			stream_pool[CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT];
87 static void *stream_free;
88 
ll_big_create(uint8_t big_handle,uint8_t adv_handle,uint8_t num_bis,uint32_t sdu_interval,uint16_t max_sdu,uint16_t max_latency,uint8_t rtn,uint8_t phy,uint8_t packing,uint8_t framing,uint8_t encryption,uint8_t * bcode)89 uint8_t ll_big_create(uint8_t big_handle, uint8_t adv_handle, uint8_t num_bis,
90 		      uint32_t sdu_interval, uint16_t max_sdu,
91 		      uint16_t max_latency, uint8_t rtn, uint8_t phy,
92 		      uint8_t packing, uint8_t framing, uint8_t encryption,
93 		      uint8_t *bcode)
94 {
95 	uint8_t hdr_data[1 + sizeof(uint8_t *)];
96 	struct lll_adv_sync *lll_adv_sync;
97 	struct lll_adv_iso *lll_adv_iso;
98 	struct ll_adv_iso_set *adv_iso;
99 	struct pdu_adv *pdu_prev, *pdu;
100 	struct pdu_big_info *big_info;
101 	uint32_t ticks_slot_overhead;
102 	struct ll_adv_sync_set *sync;
103 	struct ll_adv_aux_set *aux;
104 	uint32_t event_spacing_max;
105 	uint8_t pdu_big_info_size;
106 	uint32_t iso_interval_us;
107 	uint32_t latency_packing;
108 	uint32_t ticks_slot_sync;
109 	uint32_t ticks_slot_aux;
110 	memq_link_t *link_cmplt;
111 	memq_link_t *link_term;
112 	struct ll_adv_set *adv;
113 	uint32_t slot_overhead;
114 	uint32_t event_spacing;
115 	uint16_t ctrl_spacing;
116 	uint8_t sdu_per_event;
117 	uint8_t ter_idx;
118 	uint8_t *acad;
119 	uint32_t ret;
120 	uint8_t err;
121 	uint8_t bn;
122 	int res;
123 
124 	adv_iso = adv_iso_get(big_handle);
125 
126 	/* Already created */
127 	if (!adv_iso || adv_iso->lll.adv) {
128 		return BT_HCI_ERR_CMD_DISALLOWED;
129 	}
130 
131 	/* No advertising set created */
132 	adv = ull_adv_is_created_get(adv_handle);
133 	if (!adv) {
134 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
135 	}
136 
137 	/* Does not identify a periodic advertising train or
138 	 * the periodic advertising trains is already associated
139 	 * with another BIG.
140 	 */
141 	lll_adv_sync = adv->lll.sync;
142 	if (!lll_adv_sync || lll_adv_sync->iso) {
143 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
144 	}
145 
146 	if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK)) {
147 		if (num_bis == 0U || num_bis > 0x1F) {
148 			return BT_HCI_ERR_INVALID_PARAM;
149 		}
150 
151 		if (sdu_interval < 0x000100 || sdu_interval > 0x0FFFFF) {
152 			return BT_HCI_ERR_INVALID_PARAM;
153 		}
154 
155 		if (max_sdu < 0x0001 || max_sdu > 0x0FFF) {
156 			return BT_HCI_ERR_INVALID_PARAM;
157 		}
158 
159 		if (max_latency > 0x0FA0) {
160 			return BT_HCI_ERR_INVALID_PARAM;
161 		}
162 
163 		if (rtn > 0x0F) {
164 			return BT_HCI_ERR_INVALID_PARAM;
165 		}
166 
167 		if (phy > (BT_HCI_LE_EXT_SCAN_PHY_1M |
168 			   BT_HCI_LE_EXT_SCAN_PHY_2M |
169 			   BT_HCI_LE_EXT_SCAN_PHY_CODED)) {
170 			return BT_HCI_ERR_INVALID_PARAM;
171 		}
172 
173 		if (packing > 1U) {
174 			return BT_HCI_ERR_INVALID_PARAM;
175 		}
176 
177 		if (framing > 1U) {
178 			return BT_HCI_ERR_INVALID_PARAM;
179 		}
180 
181 		if (encryption > 1U) {
182 			return BT_HCI_ERR_INVALID_PARAM;
183 		}
184 	}
185 
186 	/* Check if free BISes available */
187 	if (mem_free_count_get(stream_free) < num_bis) {
188 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
189 	}
190 
191 	/* Allocate link buffer for created event */
192 	link_cmplt = ll_rx_link_alloc();
193 	if (!link_cmplt) {
194 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
195 	}
196 
197 	/* Allocate link buffer for sync lost event */
198 	link_term = ll_rx_link_alloc();
199 	if (!link_term) {
200 		ll_rx_link_release(link_cmplt);
201 
202 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
203 	}
204 
205 	/* Store parameters in LLL context */
206 	/* TODO: parameters to ULL if only accessed by ULL */
207 	lll_adv_iso = &adv_iso->lll;
208 	lll_adv_iso->handle = big_handle;
209 	lll_adv_iso->max_pdu = MIN(LL_BIS_OCTETS_TX_MAX, max_sdu);
210 	lll_adv_iso->phy = phy;
211 	lll_adv_iso->phy_flags = PHY_FLAGS_S8;
212 
213 	/* Mandatory Num_BIS = 1 */
214 	lll_adv_iso->num_bis = num_bis;
215 
216 	/* Allocate streams */
217 	for (uint8_t i = 0U; i < num_bis; i++) {
218 		struct lll_adv_iso_stream *stream;
219 
220 		stream = (void *)adv_iso_stream_acquire();
221 		stream->big_handle = big_handle;
222 		stream->dp = NULL;
223 
224 		if (!stream->link_tx_free) {
225 			stream->link_tx_free = &stream->link_tx;
226 		}
227 		memq_init(stream->link_tx_free, &stream->memq_tx.head,
228 			  &stream->memq_tx.tail);
229 		stream->link_tx_free = NULL;
230 
231 		stream->pkt_seq_num = 0U;
232 
233 		lll_adv_iso->stream_handle[i] =
234 			adv_iso_stream_handle_get(stream);
235 	}
236 
237 	/* FIXME: SDU per max latency */
238 	sdu_per_event = MAX((max_latency * USEC_PER_MSEC / sdu_interval), 2U) -
239 			1U;
240 
241 	/* BN (Burst Count), Mandatory BN = 1 */
242 	bn = DIV_ROUND_UP(max_sdu, lll_adv_iso->max_pdu) * sdu_per_event;
243 	if (bn > PDU_BIG_BN_MAX) {
244 		/* Restrict each BIG event to maximum burst per BIG event */
245 		lll_adv_iso->bn = PDU_BIG_BN_MAX;
246 
247 		/* Ceil the required burst count per SDU to next maximum burst
248 		 * per BIG event.
249 		 */
250 		bn = DIV_ROUND_UP(bn, PDU_BIG_BN_MAX) * PDU_BIG_BN_MAX;
251 	} else {
252 		lll_adv_iso->bn = bn;
253 	}
254 
255 	/* Calculate ISO interval */
256 	/* iso_interval shall be at least SDU interval,
257 	 * or integer multiple of SDU interval for unframed PDUs
258 	 */
259 	iso_interval_us = ((sdu_interval * lll_adv_iso->bn * sdu_per_event) /
260 			   (bn * PERIODIC_INT_UNIT_US)) * PERIODIC_INT_UNIT_US;
261 	lll_adv_iso->iso_interval = iso_interval_us / PERIODIC_INT_UNIT_US;
262 
263 	/* Immediate Repetition Count (IRC), Mandatory IRC = 1 */
264 	lll_adv_iso->irc = rtn + 1U;
265 
266 	/* Calculate NSE (No. of Sub Events), Mandatory NSE = 1,
267 	 * without PTO added.
268 	 */
269 	lll_adv_iso->nse = lll_adv_iso->bn * lll_adv_iso->irc;
270 
271 	/* NOTE: Calculate sub_interval, if interleaved then it is Num_BIS x
272 	 *       BIS_Spacing (by BT Spec.)
273 	 *       else if sequential, then by our implementation, lets keep it
274 	 *       max_tx_time for Max_PDU + tMSS.
275 	 */
276 	lll_adv_iso->sub_interval = PDU_BIS_US(lll_adv_iso->max_pdu, encryption,
277 					       phy, lll_adv_iso->phy_flags) +
278 				    EVENT_MSS_US;
279 	ctrl_spacing = PDU_BIS_US(sizeof(struct pdu_big_ctrl), encryption, phy,
280 				  lll_adv_iso->phy_flags);
281 	latency_packing = lll_adv_iso->sub_interval * lll_adv_iso->nse *
282 			  lll_adv_iso->num_bis;
283 	event_spacing = latency_packing + ctrl_spacing +
284 			EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
285 
286 	/* Check if aux context allocated before we are creating ISO */
287 	if (adv->lll.aux) {
288 		aux = HDR_LLL2ULL(adv->lll.aux);
289 	} else {
290 		aux = NULL;
291 	}
292 
293 	/* Calculate overheads due to extended advertising. */
294 	if (aux && aux->is_started) {
295 		ticks_slot_aux = aux->ull.ticks_slot;
296 		if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
297 			ticks_slot_overhead = MAX(aux->ull.ticks_active_to_start,
298 						  aux->ull.ticks_prepare_to_start);
299 		} else {
300 			ticks_slot_overhead = 0U;
301 		}
302 		ticks_slot_aux += ticks_slot_overhead;
303 	} else {
304 		ticks_slot_aux = 0U;
305 	}
306 
307 	/* Calculate overheads due to periodic advertising. */
308 	sync = HDR_LLL2ULL(lll_adv_sync);
309 	if (sync->is_started) {
310 		ticks_slot_sync = sync->ull.ticks_slot;
311 		if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
312 			ticks_slot_overhead = MAX(sync->ull.ticks_active_to_start,
313 						  sync->ull.ticks_prepare_to_start);
314 		} else {
315 			ticks_slot_overhead = 0U;
316 		}
317 		ticks_slot_sync += ticks_slot_overhead;
318 	} else {
319 		ticks_slot_sync = 0U;
320 	}
321 
322 	/* Calculate total overheads due to extended and periodic advertising */
323 	if (CONFIG_BT_CTLR_ADV_AUX_SYNC_OFFSET > 0U) {
324 		ticks_slot_overhead = MAX(ticks_slot_aux, ticks_slot_sync);
325 	} else {
326 		ticks_slot_overhead = ticks_slot_aux + ticks_slot_sync;
327 	}
328 
329 	/* Calculate max available ISO event spacing */
330 	slot_overhead = HAL_TICKER_TICKS_TO_US(ticks_slot_overhead);
331 	if (slot_overhead < iso_interval_us) {
332 		event_spacing_max = iso_interval_us - slot_overhead;
333 	} else {
334 		event_spacing_max = 0U;
335 	}
336 
337 	/* Check if ISO interval too small to fit the calculated BIG event
338 	 * timing required for the supplied BIG create parameters.
339 	 */
340 	if (event_spacing > event_spacing_max) {
341 		/* Release allocated link buffers */
342 		ll_rx_link_release(link_cmplt);
343 		ll_rx_link_release(link_term);
344 
345 		return BT_HCI_ERR_INVALID_PARAM;
346 	}
347 
348 	/* Based on packing requested, sequential or interleaved */
349 	if (packing) {
350 		/* Interleaved Packing */
351 		lll_adv_iso->bis_spacing = lll_adv_iso->sub_interval;
352 		lll_adv_iso->ptc = ptc_calc(lll_adv_iso, event_spacing,
353 					    event_spacing_max);
354 		lll_adv_iso->nse += lll_adv_iso->ptc;
355 		lll_adv_iso->sub_interval = lll_adv_iso->bis_spacing *
356 					    lll_adv_iso->nse;
357 	} else {
358 		/* Sequential Packing */
359 		lll_adv_iso->ptc = ptc_calc(lll_adv_iso, event_spacing,
360 					    event_spacing_max);
361 		lll_adv_iso->nse += lll_adv_iso->ptc;
362 		lll_adv_iso->bis_spacing = lll_adv_iso->sub_interval *
363 					   lll_adv_iso->nse;
364 	}
365 
366 	/* Pre-Transmission Offset (PTO) */
367 	if (lll_adv_iso->ptc) {
368 		lll_adv_iso->pto = bn / lll_adv_iso->bn;
369 	} else {
370 		lll_adv_iso->pto = 0U;
371 	}
372 
373 	/* TODO: Group count, GC = NSE / BN; PTO = GC - IRC;
374 	 *       Is this required?
375 	 */
376 
377 	lll_adv_iso->sdu_interval = sdu_interval;
378 	lll_adv_iso->max_sdu = max_sdu;
379 
380 	res = util_saa_le32(lll_adv_iso->seed_access_addr, big_handle);
381 	LL_ASSERT(!res);
382 
383 	(void)lll_csrand_get(lll_adv_iso->base_crc_init,
384 			     sizeof(lll_adv_iso->base_crc_init));
385 	lll_adv_iso->data_chan_count =
386 		ull_chan_map_get(lll_adv_iso->data_chan_map);
387 	lll_adv_iso->payload_count = 0U;
388 	lll_adv_iso->latency_prepare = 0U;
389 	lll_adv_iso->latency_event = 0U;
390 	lll_adv_iso->term_req = 0U;
391 	lll_adv_iso->term_ack = 0U;
392 	lll_adv_iso->chm_req = 0U;
393 	lll_adv_iso->chm_ack = 0U;
394 	lll_adv_iso->ctrl_expire = 0U;
395 
396 	/* TODO: framing support */
397 	lll_adv_iso->framing = framing;
398 
399 	/* Allocate next PDU */
400 	err = ull_adv_sync_pdu_alloc(adv, ULL_ADV_PDU_EXTRA_DATA_ALLOC_IF_EXIST,
401 				     &pdu_prev, &pdu, NULL, NULL, &ter_idx);
402 	if (err) {
403 		/* Insufficient Advertising PDU buffers to allocate new PDU
404 		 * to add BIGInfo into the ACAD of the Periodic Advertising.
405 		 */
406 
407 		/* Release allocated link buffers */
408 		ll_rx_link_release(link_cmplt);
409 		ll_rx_link_release(link_term);
410 
411 		return err;
412 	}
413 
414 	/* Add ACAD to AUX_SYNC_IND */
415 	if (encryption) {
416 		pdu_big_info_size = PDU_BIG_INFO_ENCRYPTED_SIZE;
417 	} else {
418 		pdu_big_info_size = PDU_BIG_INFO_CLEARTEXT_SIZE;
419 	}
420 	hdr_data[0] = pdu_big_info_size + PDU_ADV_DATA_HEADER_SIZE;
421 	err = ull_adv_sync_pdu_set_clear(lll_adv_sync, pdu_prev, pdu,
422 					 ULL_ADV_PDU_HDR_FIELD_ACAD, 0U,
423 					 &hdr_data);
424 	if (err) {
425 		/* Failed to add BIGInfo into the ACAD of the Periodic
426 		 * Advertising.
427 		 */
428 
429 		/* Release allocated link buffers */
430 		ll_rx_link_release(link_cmplt);
431 		ll_rx_link_release(link_term);
432 
433 		return err;
434 	}
435 
436 	(void)memcpy(&acad, &hdr_data[1], sizeof(acad));
437 	acad[PDU_ADV_DATA_HEADER_LEN_OFFSET] =
438 		pdu_big_info_size + (PDU_ADV_DATA_HEADER_SIZE -
439 				     PDU_ADV_DATA_HEADER_LEN_SIZE);
440 	acad[PDU_ADV_DATA_HEADER_TYPE_OFFSET] = BT_DATA_BIG_INFO;
441 	big_info = (void *)&acad[PDU_ADV_DATA_HEADER_DATA_OFFSET];
442 
443 	/* big_info->offset, big_info->offset_units and
444 	 * big_info->payload_count_framing[] will be filled by periodic
445 	 * advertising event.
446 	 */
447 
448 	big_info->iso_interval =
449 		sys_cpu_to_le16(iso_interval_us / PERIODIC_INT_UNIT_US);
450 	big_info->num_bis = lll_adv_iso->num_bis;
451 	big_info->nse = lll_adv_iso->nse;
452 	big_info->bn = lll_adv_iso->bn;
453 	big_info->sub_interval = sys_cpu_to_le24(lll_adv_iso->sub_interval);
454 	big_info->pto = lll_adv_iso->pto;
455 	big_info->spacing = sys_cpu_to_le24(lll_adv_iso->bis_spacing);
456 	big_info->irc = lll_adv_iso->irc;
457 	big_info->max_pdu = lll_adv_iso->max_pdu;
458 	(void)memcpy(&big_info->seed_access_addr, lll_adv_iso->seed_access_addr,
459 		     sizeof(big_info->seed_access_addr));
460 	big_info->sdu_interval = sys_cpu_to_le24(sdu_interval);
461 	big_info->max_sdu = max_sdu;
462 	(void)memcpy(&big_info->base_crc_init, lll_adv_iso->base_crc_init,
463 		     sizeof(big_info->base_crc_init));
464 	pdu_big_info_chan_map_phy_set(big_info->chm_phy,
465 				      lll_adv_iso->data_chan_map,
466 				      phy);
467 	/* Assign the 39-bit payload count, and 1-bit framing */
468 	big_info->payload_count_framing[0] = lll_adv_iso->payload_count;
469 	big_info->payload_count_framing[1] = lll_adv_iso->payload_count >> 8;
470 	big_info->payload_count_framing[2] = lll_adv_iso->payload_count >> 16;
471 	big_info->payload_count_framing[3] = lll_adv_iso->payload_count >> 24;
472 	big_info->payload_count_framing[4] = lll_adv_iso->payload_count >> 32;
473 	big_info->payload_count_framing[4] &= ~BIT(7);
474 	big_info->payload_count_framing[4] |= ((framing & 0x01) << 7);
475 
476 	if (encryption) {
477 		const uint8_t BIG1[16] = {0x31, 0x47, 0x49, 0x42, };
478 		const uint8_t BIG2[4]  = {0x32, 0x47, 0x49, 0x42};
479 		const uint8_t BIG3[4]  = {0x33, 0x47, 0x49, 0x42};
480 		struct ccm *ccm_tx;
481 		uint8_t igltk[16];
482 		uint8_t gltk[16];
483 		uint8_t gsk[16];
484 
485 		/* Fill GIV and GSKD */
486 		(void)lll_csrand_get(lll_adv_iso->giv,
487 				     sizeof(lll_adv_iso->giv));
488 		(void)memcpy(big_info->giv, lll_adv_iso->giv,
489 			     sizeof(big_info->giv));
490 		(void)lll_csrand_get(big_info->gskd, sizeof(big_info->gskd));
491 
492 		/* Calculate GSK */
493 		err = bt_crypto_h7(BIG1, bcode, igltk);
494 		LL_ASSERT(!err);
495 		err = bt_crypto_h6(igltk, BIG2, gltk);
496 		LL_ASSERT(!err);
497 		err = bt_crypto_h8(gltk, big_info->gskd, BIG3, gsk);
498 		LL_ASSERT(!err);
499 
500 		/* Prepare the CCM parameters */
501 		ccm_tx = &lll_adv_iso->ccm_tx;
502 		ccm_tx->direction = 1U;
503 		(void)memcpy(&ccm_tx->iv[4], &lll_adv_iso->giv[4], 4U);
504 		(void)mem_rcopy(ccm_tx->key, gsk, sizeof(ccm_tx->key));
505 
506 		/* NOTE: counter is filled in LLL */
507 
508 		lll_adv_iso->enc = 1U;
509 	} else {
510 		lll_adv_iso->enc = 0U;
511 	}
512 
513 	/* Associate the ISO instance with an Extended Advertising instance */
514 	lll_adv_iso->adv = &adv->lll;
515 
516 	/* Store the link buffer for ISO create and terminate complete event */
517 	adv_iso->node_rx_complete.hdr.link = link_cmplt;
518 	adv_iso->node_rx_terminate.hdr.link = link_term;
519 
520 	/* Initialise LLL header members */
521 	lll_hdr_init(lll_adv_iso, adv_iso);
522 
523 	/* Start sending BIS empty data packet for each BIS */
524 	ret = adv_iso_start(adv_iso, iso_interval_us);
525 	if (ret) {
526 		/* Failed to schedule BIG events */
527 
528 		/* Reset the association of ISO instance with the Extended
529 		 * Advertising Instance
530 		 */
531 		lll_adv_iso->adv = NULL;
532 
533 		/* Release allocated link buffers */
534 		ll_rx_link_release(link_cmplt);
535 		ll_rx_link_release(link_term);
536 
537 		return BT_HCI_ERR_CMD_DISALLOWED;
538 	}
539 
540 	/* Associate the ISO instance with a Periodic Advertising */
541 	lll_adv_sync->iso = lll_adv_iso;
542 
543 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
544 	/* Notify the sync instance */
545 	ull_adv_sync_iso_created(HDR_LLL2ULL(lll_adv_sync));
546 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
547 
548 	/* Commit the BIGInfo in the ACAD field of Periodic Advertising */
549 	lll_adv_sync_data_enqueue(lll_adv_sync, ter_idx);
550 
551 	return BT_HCI_ERR_SUCCESS;
552 }
553 
ll_big_test_create(uint8_t big_handle,uint8_t adv_handle,uint8_t num_bis,uint32_t sdu_interval,uint16_t iso_interval,uint8_t nse,uint16_t max_sdu,uint16_t max_pdu,uint8_t phy,uint8_t packing,uint8_t framing,uint8_t bn,uint8_t irc,uint8_t pto,uint8_t encryption,uint8_t * bcode)554 uint8_t ll_big_test_create(uint8_t big_handle, uint8_t adv_handle,
555 			   uint8_t num_bis, uint32_t sdu_interval,
556 			   uint16_t iso_interval, uint8_t nse, uint16_t max_sdu,
557 			   uint16_t max_pdu, uint8_t phy, uint8_t packing,
558 			   uint8_t framing, uint8_t bn, uint8_t irc,
559 			   uint8_t pto, uint8_t encryption, uint8_t *bcode)
560 {
561 	/* TODO: Implement */
562 	ARG_UNUSED(big_handle);
563 	ARG_UNUSED(adv_handle);
564 	ARG_UNUSED(num_bis);
565 	ARG_UNUSED(sdu_interval);
566 	ARG_UNUSED(iso_interval);
567 	ARG_UNUSED(nse);
568 	ARG_UNUSED(max_sdu);
569 	ARG_UNUSED(max_pdu);
570 	ARG_UNUSED(phy);
571 	ARG_UNUSED(packing);
572 	ARG_UNUSED(framing);
573 	ARG_UNUSED(bn);
574 	ARG_UNUSED(irc);
575 	ARG_UNUSED(pto);
576 	ARG_UNUSED(encryption);
577 	ARG_UNUSED(bcode);
578 
579 	return BT_HCI_ERR_CMD_DISALLOWED;
580 }
581 
ll_big_terminate(uint8_t big_handle,uint8_t reason)582 uint8_t ll_big_terminate(uint8_t big_handle, uint8_t reason)
583 {
584 	struct lll_adv_sync *lll_adv_sync;
585 	struct lll_adv_iso *lll_adv_iso;
586 	struct ll_adv_iso_set *adv_iso;
587 	struct pdu_adv *pdu_prev, *pdu;
588 	struct node_rx_pdu *node_rx;
589 	struct lll_adv *lll_adv;
590 	struct ll_adv_set *adv;
591 	uint16_t stream_handle;
592 	uint16_t handle;
593 	uint8_t num_bis;
594 	uint8_t ter_idx;
595 	uint8_t err;
596 
597 	adv_iso = adv_iso_get(big_handle);
598 	if (!adv_iso) {
599 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
600 	}
601 
602 	lll_adv_iso = &adv_iso->lll;
603 	lll_adv = lll_adv_iso->adv;
604 	if (!lll_adv) {
605 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
606 	}
607 
608 	if (lll_adv_iso->term_req) {
609 		return BT_HCI_ERR_CMD_DISALLOWED;
610 	}
611 
612 	/* Remove ISO data path, keeping data from entering Tx pipeline */
613 	num_bis = lll_adv_iso->num_bis;
614 	while (num_bis--) {
615 		stream_handle = lll_adv_iso->stream_handle[num_bis];
616 		handle = LL_BIS_ADV_HANDLE_FROM_IDX(stream_handle);
617 		err = ll_remove_iso_path(handle,
618 					 BIT(BT_HCI_DATAPATH_DIR_HOST_TO_CTLR));
619 		if (err) {
620 			return err;
621 		}
622 	}
623 
624 	lll_adv_sync = lll_adv->sync;
625 	adv = HDR_LLL2ULL(lll_adv);
626 
627 	/* Allocate next PDU */
628 	err = ull_adv_sync_pdu_alloc(adv, ULL_ADV_PDU_EXTRA_DATA_ALLOC_IF_EXIST,
629 				     &pdu_prev, &pdu, NULL, NULL, &ter_idx);
630 	if (err) {
631 		return err;
632 	}
633 
634 	/* Remove ACAD to AUX_SYNC_IND */
635 	err = ull_adv_sync_pdu_set_clear(lll_adv_sync, pdu_prev, pdu,
636 					 0U, ULL_ADV_PDU_HDR_FIELD_ACAD, NULL);
637 	if (err) {
638 		return err;
639 	}
640 
641 	lll_adv_sync_data_enqueue(lll_adv_sync, ter_idx);
642 
643 	/* Prepare BIG terminate event, will be enqueued after tx flush  */
644 	node_rx = (void *)&adv_iso->node_rx_terminate;
645 	node_rx->hdr.type = NODE_RX_TYPE_BIG_TERMINATE;
646 	node_rx->hdr.handle = big_handle;
647 	node_rx->hdr.rx_ftr.param = adv_iso;
648 
649 	if (reason == BT_HCI_ERR_REMOTE_USER_TERM_CONN) {
650 		*((uint8_t *)node_rx->pdu) = BT_HCI_ERR_LOCALHOST_TERM_CONN;
651 	} else {
652 		*((uint8_t *)node_rx->pdu) = reason;
653 	}
654 
655 	/* Request terminate procedure */
656 	lll_adv_iso->term_reason = reason;
657 	lll_adv_iso->term_req = 1U;
658 
659 	return BT_HCI_ERR_SUCCESS;
660 }
661 
ull_adv_iso_init(void)662 int ull_adv_iso_init(void)
663 {
664 	int err;
665 
666 	err = init_reset();
667 	if (err) {
668 		return err;
669 	}
670 
671 	return 0;
672 }
673 
ull_adv_iso_reset(void)674 int ull_adv_iso_reset(void)
675 {
676 	int err;
677 
678 	err = init_reset();
679 	if (err) {
680 		return err;
681 	}
682 
683 	return 0;
684 }
685 
ull_adv_iso_get(uint8_t handle)686 struct ll_adv_iso_set *ull_adv_iso_get(uint8_t handle)
687 {
688 	return adv_iso_get(handle);
689 }
690 
ull_adv_iso_chm_update(void)691 uint8_t ull_adv_iso_chm_update(void)
692 {
693 	uint8_t handle;
694 
695 	handle = CONFIG_BT_CTLR_ADV_ISO_SET;
696 	while (handle--) {
697 		(void)adv_iso_chm_update(handle);
698 	}
699 
700 	/* TODO: Should failure due to Channel Map Update being already in
701 	 *       progress be returned to caller?
702 	 */
703 	return 0;
704 }
705 
ull_adv_iso_chm_complete(struct node_rx_hdr * rx)706 void ull_adv_iso_chm_complete(struct node_rx_hdr *rx)
707 {
708 	struct lll_adv_sync *sync_lll;
709 	struct lll_adv_iso *iso_lll;
710 	struct lll_adv *adv_lll;
711 
712 	iso_lll = rx->rx_ftr.param;
713 	adv_lll = iso_lll->adv;
714 	sync_lll = adv_lll->sync;
715 
716 	/* Update Channel Map in BIGInfo in the Periodic Advertising PDU */
717 	while (sync_lll->iso_chm_done_req != sync_lll->iso_chm_done_ack) {
718 		sync_lll->iso_chm_done_ack = sync_lll->iso_chm_done_req;
719 
720 		adv_iso_chm_complete_commit(iso_lll);
721 	}
722 }
723 
724 #if defined(CONFIG_BT_CTLR_HCI_ADV_HANDLE_MAPPING)
ll_adv_iso_by_hci_handle_get(uint8_t hci_handle,uint8_t * handle)725 uint8_t ll_adv_iso_by_hci_handle_get(uint8_t hci_handle, uint8_t *handle)
726 {
727 	struct ll_adv_iso_set *adv_iso;
728 	uint8_t idx;
729 
730 	adv_iso =  &ll_adv_iso[0];
731 
732 	for (idx = 0U; idx < CONFIG_BT_CTLR_ADV_ISO_SET; idx++, adv_iso++) {
733 		if (adv_iso->lll.adv &&
734 		    (adv_iso->hci_handle == hci_handle)) {
735 			*handle = idx;
736 			return 0U;
737 		}
738 	}
739 
740 	return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
741 }
742 
ll_adv_iso_by_hci_handle_new(uint8_t hci_handle,uint8_t * handle)743 uint8_t ll_adv_iso_by_hci_handle_new(uint8_t hci_handle, uint8_t *handle)
744 {
745 	struct ll_adv_iso_set *adv_iso, *adv_iso_empty;
746 	uint8_t idx;
747 
748 	adv_iso = &ll_adv_iso[0];
749 	adv_iso_empty = NULL;
750 
751 	for (idx = 0U; idx < CONFIG_BT_CTLR_ADV_ISO_SET; idx++, adv_iso++) {
752 		if (adv_iso->lll.adv) {
753 			if (adv_iso->hci_handle == hci_handle) {
754 				return BT_HCI_ERR_CMD_DISALLOWED;
755 			}
756 		} else if (!adv_iso_empty) {
757 			adv_iso_empty = adv_iso;
758 			*handle = idx;
759 		}
760 	}
761 
762 	if (adv_iso_empty) {
763 		memset(adv_iso_empty, 0U, sizeof(*adv_iso_empty));
764 		adv_iso_empty->hci_handle = hci_handle;
765 		return 0U;
766 	}
767 
768 	return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
769 }
770 #endif /* CONFIG_BT_CTLR_HCI_ADV_HANDLE_MAPPING */
771 
ull_adv_iso_offset_get(struct ll_adv_sync_set * sync)772 void ull_adv_iso_offset_get(struct ll_adv_sync_set *sync)
773 {
774 	static memq_link_t link;
775 	static struct mayfly mfy = {0U, 0U, &link, NULL, mfy_iso_offset_get};
776 	uint32_t ret;
777 
778 	mfy.param = sync;
779 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1,
780 			     &mfy);
781 	LL_ASSERT(!ret);
782 }
783 
784 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
ull_adv_iso_lll_biginfo_fill(struct pdu_adv * pdu,struct lll_adv_sync * lll_sync)785 void ull_adv_iso_lll_biginfo_fill(struct pdu_adv *pdu, struct lll_adv_sync *lll_sync)
786 {
787 	struct lll_adv_iso *lll_iso;
788 	uint16_t latency_prepare;
789 	struct pdu_big_info *bi;
790 	uint64_t payload_count;
791 
792 	lll_iso = lll_sync->iso;
793 
794 	/* Calculate current payload count. If refcount is non-zero, we have called
795 	 * prepare and the LLL implementation has incremented latency_prepare already.
796 	 * In this case we need to subtract lazy + 1 from latency_prepare
797 	 */
798 	latency_prepare = lll_iso->latency_prepare;
799 	if (ull_ref_get(HDR_LLL2ULL(lll_iso))) {
800 		/* We are in post-prepare. latency_prepare is already
801 		 * incremented by lazy + 1 for next event
802 		 */
803 		latency_prepare -= lll_iso->iso_lazy + 1;
804 	}
805 
806 	payload_count = lll_iso->payload_count + ((latency_prepare +
807 						   lll_iso->iso_lazy) * lll_iso->bn);
808 
809 	bi = big_info_get(pdu);
810 	big_info_offset_fill(bi, lll_iso->ticks_sync_pdu_offset, 0U);
811 	/* Assign the 39-bit payload count, retaining the 1 MS bit framing value */
812 	bi->payload_count_framing[0] = payload_count;
813 	bi->payload_count_framing[1] = payload_count >> 8;
814 	bi->payload_count_framing[2] = payload_count >> 16;
815 	bi->payload_count_framing[3] = payload_count >> 24;
816 	bi->payload_count_framing[4] &= ~0x7F;
817 	bi->payload_count_framing[4] |= (payload_count >> 32) & 0x7F;
818 
819 	/* Update Channel Map in the BIGInfo until Thread context gets a
820 	 * chance to update the PDU with new Channel Map.
821 	 */
822 	if (lll_sync->iso_chm_done_req != lll_sync->iso_chm_done_ack) {
823 		pdu_big_info_chan_map_phy_set(bi->chm_phy,
824 					      lll_iso->data_chan_map,
825 					      lll_iso->phy);
826 	}
827 }
828 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
829 
ull_adv_iso_done_complete(struct node_rx_event_done * done)830 void ull_adv_iso_done_complete(struct node_rx_event_done *done)
831 {
832 	struct ll_adv_iso_set *adv_iso;
833 	struct lll_adv_iso *lll;
834 	struct node_rx_hdr *rx;
835 	memq_link_t *link;
836 
837 	/* switch to normal prepare */
838 	mfy_lll_prepare.fp = lll_adv_iso_prepare;
839 
840 	/* Get reference to ULL context */
841 	adv_iso = CONTAINER_OF(done->param, struct ll_adv_iso_set, ull);
842 	lll = &adv_iso->lll;
843 
844 	/* Prepare BIG complete event */
845 	rx = (void *)&adv_iso->node_rx_complete;
846 	link = rx->link;
847 	if (!link) {
848 		/* NOTE: When BIS events have overlapping prepare placed in
849 		 *       in the pipeline, more than one done complete event
850 		 *       will be generated, lets ignore the additional done
851 		 *       events.
852 		 */
853 		return;
854 	}
855 	rx->link = NULL;
856 
857 	rx->type = NODE_RX_TYPE_BIG_COMPLETE;
858 	rx->handle = lll->handle;
859 	rx->rx_ftr.param = adv_iso;
860 
861 	ll_rx_put_sched(link, rx);
862 }
863 
ull_adv_iso_done_terminate(struct node_rx_event_done * done)864 void ull_adv_iso_done_terminate(struct node_rx_event_done *done)
865 {
866 	struct ll_adv_iso_set *adv_iso;
867 	struct lll_adv_iso *lll;
868 	uint32_t ret;
869 
870 	/* Get reference to ULL context */
871 	adv_iso = CONTAINER_OF(done->param, struct ll_adv_iso_set, ull);
872 	lll = &adv_iso->lll;
873 
874 	/* Skip if terminated already (we come here if pipeline being flushed */
875 	if (unlikely(lll->handle == LLL_ADV_HANDLE_INVALID)) {
876 		return;
877 	}
878 
879 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
880 			  (TICKER_ID_ADV_ISO_BASE + lll->handle),
881 			  ticker_stop_op_cb, adv_iso);
882 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
883 		  (ret == TICKER_STATUS_BUSY));
884 
885 	/* Invalidate the handle */
886 	lll->handle = LLL_ADV_HANDLE_INVALID;
887 }
888 
ull_adv_iso_by_stream_get(uint16_t handle)889 struct ll_adv_iso_set *ull_adv_iso_by_stream_get(uint16_t handle)
890 {
891 	if (handle >= CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT) {
892 		return NULL;
893 	}
894 
895 	return adv_iso_get(stream_pool[handle].big_handle);
896 }
897 
ull_adv_iso_stream_get(uint16_t handle)898 struct lll_adv_iso_stream *ull_adv_iso_stream_get(uint16_t handle)
899 {
900 	if (handle >= CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT) {
901 		return NULL;
902 	}
903 
904 	return &stream_pool[handle];
905 }
906 
ull_adv_iso_lll_stream_get(uint16_t handle)907 struct lll_adv_iso_stream *ull_adv_iso_lll_stream_get(uint16_t handle)
908 {
909 	return ull_adv_iso_stream_get(handle);
910 }
911 
ull_adv_iso_stream_release(struct ll_adv_iso_set * adv_iso)912 void ull_adv_iso_stream_release(struct ll_adv_iso_set *adv_iso)
913 {
914 	struct lll_adv_iso *lll;
915 
916 	lll = &adv_iso->lll;
917 	while (lll->num_bis--) {
918 		struct lll_adv_iso_stream *stream;
919 		struct ll_iso_datapath *dp;
920 		uint16_t stream_handle;
921 		memq_link_t *link;
922 
923 		stream_handle = lll->stream_handle[lll->num_bis];
924 		stream = ull_adv_iso_stream_get(stream_handle);
925 
926 		LL_ASSERT(!stream->link_tx_free);
927 		link = memq_deinit(&stream->memq_tx.head,
928 				   &stream->memq_tx.tail);
929 		LL_ASSERT(link);
930 		stream->link_tx_free = link;
931 
932 		dp = stream->dp;
933 		if (dp) {
934 			stream->dp = NULL;
935 			isoal_source_destroy(dp->source_hdl);
936 			ull_iso_datapath_release(dp);
937 		}
938 
939 		mem_release(stream, &stream_free);
940 	}
941 
942 	/* Remove Periodic Advertising association */
943 	lll->adv->sync->iso = NULL;
944 
945 	/* Remove Extended Advertising association */
946 	lll->adv = NULL;
947 }
948 
ull_adv_iso_max_time_get(const struct ll_adv_iso_set * adv_iso)949 uint32_t ull_adv_iso_max_time_get(const struct ll_adv_iso_set *adv_iso)
950 {
951 	return adv_iso_time_get(adv_iso, true);
952 }
953 
init_reset(void)954 static int init_reset(void)
955 {
956 	/* Add initializations common to power up initialization and HCI reset
957 	 * initializations.
958 	 */
959 
960 	mem_init((void *)stream_pool, sizeof(struct lll_adv_iso_stream),
961 		 CONFIG_BT_CTLR_ADV_ISO_STREAM_COUNT, &stream_free);
962 
963 	return 0;
964 }
965 
adv_iso_get(uint8_t handle)966 static struct ll_adv_iso_set *adv_iso_get(uint8_t handle)
967 {
968 	if (handle >= CONFIG_BT_CTLR_ADV_SET) {
969 		return NULL;
970 	}
971 
972 	return &ll_adv_iso[handle];
973 }
974 
adv_iso_stream_acquire(void)975 static struct stream *adv_iso_stream_acquire(void)
976 {
977 	return mem_acquire(&stream_free);
978 }
979 
adv_iso_stream_handle_get(struct lll_adv_iso_stream * stream)980 static uint16_t adv_iso_stream_handle_get(struct lll_adv_iso_stream *stream)
981 {
982 	return mem_index_get(stream, stream_pool, sizeof(*stream));
983 }
984 
ptc_calc(const struct lll_adv_iso * lll,uint32_t event_spacing,uint32_t event_spacing_max)985 static uint8_t ptc_calc(const struct lll_adv_iso *lll, uint32_t event_spacing,
986 			uint32_t event_spacing_max)
987 {
988 	if (event_spacing < event_spacing_max) {
989 		uint8_t ptc;
990 
991 		/* Possible maximum Pre-transmission Subevents per BIS */
992 		ptc = ((event_spacing_max - event_spacing) /
993 		       (lll->sub_interval * lll->bn * lll->num_bis)) *
994 		      lll->bn;
995 
996 		/* FIXME: Here we retrict to a maximum of BN Pre-Transmission
997 		 * subevents per BIS
998 		 */
999 		ptc = MIN(ptc, lll->bn);
1000 
1001 		return ptc;
1002 	}
1003 
1004 	return 0U;
1005 }
1006 
adv_iso_time_get(const struct ll_adv_iso_set * adv_iso,bool max)1007 static uint32_t adv_iso_time_get(const struct ll_adv_iso_set *adv_iso, bool max)
1008 {
1009 	const struct lll_adv_iso *lll_iso;
1010 	uint32_t ctrl_spacing;
1011 	uint32_t pdu_spacing;
1012 	uint32_t time_us;
1013 
1014 	lll_iso = &adv_iso->lll;
1015 
1016 	pdu_spacing = PDU_BIS_US(lll_iso->max_pdu, lll_iso->enc, lll_iso->phy,
1017 				 lll_iso->phy_flags) +
1018 		      EVENT_MSS_US;
1019 	ctrl_spacing = PDU_BIS_US(sizeof(struct pdu_big_ctrl), lll_iso->enc,
1020 				  lll_iso->phy, lll_iso->phy_flags);
1021 
1022 	/* 1. Maximum PDU transmission time in 1M/2M/S8 PHY is 17040 us, or
1023 	 * represented in 15-bits.
1024 	 * 2. NSE in the range 1 to 31 is represented in 5-bits
1025 	 * 3. num_bis in the range 1 to 31 is represented in 5-bits
1026 	 *
1027 	 * Hence, worst case event time can be represented in 25-bits plus
1028 	 * one each bit for added ctrl_spacing and radio event overheads. I.e.
1029 	 * 27-bits required and sufficiently covered by using 32-bit data type
1030 	 * for time_us.
1031 	 */
1032 
1033 	if (IS_ENABLED(CONFIG_BT_CTLR_ADV_ISO_RESERVE_MAX) || max) {
1034 		time_us = (pdu_spacing * lll_iso->nse * lll_iso->num_bis) +
1035 			  ctrl_spacing;
1036 	} else {
1037 		time_us = pdu_spacing * ((lll_iso->nse * lll_iso->num_bis) -
1038 					 lll_iso->ptc);
1039 	}
1040 
1041 	/* Add implementation defined radio event overheads */
1042 	time_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
1043 
1044 	return time_us;
1045 }
1046 
adv_iso_start(struct ll_adv_iso_set * adv_iso,uint32_t iso_interval_us)1047 static uint32_t adv_iso_start(struct ll_adv_iso_set *adv_iso,
1048 			      uint32_t iso_interval_us)
1049 {
1050 	uint32_t ticks_slot_overhead;
1051 	uint32_t ticks_slot_offset;
1052 	volatile uint32_t ret_cb;
1053 	uint32_t ticks_anchor;
1054 	uint32_t ticks_slot;
1055 	uint32_t slot_us;
1056 	uint32_t ret;
1057 	int err;
1058 
1059 	ull_hdr_init(&adv_iso->ull);
1060 
1061 	slot_us = adv_iso_time_get(adv_iso, false);
1062 
1063 	adv_iso->ull.ticks_active_to_start = 0U;
1064 	adv_iso->ull.ticks_prepare_to_start =
1065 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
1066 	adv_iso->ull.ticks_preempt_to_start =
1067 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
1068 	adv_iso->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
1069 
1070 	ticks_slot_offset = MAX(adv_iso->ull.ticks_active_to_start,
1071 				adv_iso->ull.ticks_prepare_to_start);
1072 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
1073 		ticks_slot_overhead = ticks_slot_offset;
1074 	} else {
1075 		ticks_slot_overhead = 0U;
1076 	}
1077 	ticks_slot = adv_iso->ull.ticks_slot + ticks_slot_overhead;
1078 
1079 	/* Find the slot after Periodic Advertisings events */
1080 	ticks_anchor = ticker_ticks_now_get() +
1081 		       HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
1082 	err = ull_sched_adv_aux_sync_free_anchor_get(ticks_slot, &ticks_anchor);
1083 	if (!err) {
1084 		ticks_anchor += HAL_TICKER_US_TO_TICKS(
1085 					MAX(EVENT_MAFS_US,
1086 					    EVENT_OVERHEAD_START_US) -
1087 					EVENT_OVERHEAD_START_US +
1088 					(EVENT_TICKER_RES_MARGIN_US << 1));
1089 	}
1090 
1091 	/* setup to use ISO create prepare function for first radio event */
1092 	mfy_lll_prepare.fp = lll_adv_iso_create_prepare;
1093 
1094 	ret_cb = TICKER_STATUS_BUSY;
1095 	ret = ticker_start(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_THREAD,
1096 			   (TICKER_ID_ADV_ISO_BASE + adv_iso->lll.handle),
1097 			   ticks_anchor, 0U,
1098 			   HAL_TICKER_US_TO_TICKS(iso_interval_us),
1099 			   HAL_TICKER_REMAINDER(iso_interval_us),
1100 			   TICKER_NULL_LAZY, ticks_slot, ticker_cb, adv_iso,
1101 			   ull_ticker_status_give, (void *)&ret_cb);
1102 	ret = ull_ticker_status_take(ret, &ret_cb);
1103 
1104 	return ret;
1105 }
1106 
adv_iso_chm_update(uint8_t big_handle)1107 static uint8_t adv_iso_chm_update(uint8_t big_handle)
1108 {
1109 	struct ll_adv_iso_set *adv_iso;
1110 	struct lll_adv_iso *lll_iso;
1111 
1112 	adv_iso = adv_iso_get(big_handle);
1113 	if (!adv_iso) {
1114 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
1115 	}
1116 
1117 	lll_iso = &adv_iso->lll;
1118 	if (lll_iso->term_req ||
1119 	    (lll_iso->chm_req != lll_iso->chm_ack)) {
1120 		return BT_HCI_ERR_CMD_DISALLOWED;
1121 	}
1122 
1123 	/* Request channel map update procedure */
1124 	lll_iso->chm_chan_count = ull_chan_map_get(lll_iso->chm_chan_map);
1125 	lll_iso->chm_req++;
1126 
1127 	return BT_HCI_ERR_SUCCESS;
1128 }
1129 
adv_iso_chm_complete_commit(struct lll_adv_iso * lll_iso)1130 static void adv_iso_chm_complete_commit(struct lll_adv_iso *lll_iso)
1131 {
1132 	uint8_t hdr_data[ULL_ADV_HDR_DATA_LEN_SIZE +
1133 			 ULL_ADV_HDR_DATA_ACAD_PTR_SIZE];
1134 	struct pdu_adv *pdu_prev, *pdu;
1135 	struct lll_adv_sync *lll_sync;
1136 	struct pdu_big_info *bi;
1137 	struct ll_adv_set *adv;
1138 	uint8_t acad_len;
1139 	uint8_t ter_idx;
1140 	uint8_t ad_len;
1141 	uint8_t *acad;
1142 	uint8_t *ad;
1143 	uint8_t len;
1144 	uint8_t err;
1145 
1146 	/* Allocate next PDU */
1147 	adv = HDR_LLL2ULL(lll_iso->adv);
1148 	err = ull_adv_sync_pdu_alloc(adv, ULL_ADV_PDU_EXTRA_DATA_ALLOC_IF_EXIST,
1149 				     &pdu_prev, &pdu, NULL, NULL, &ter_idx);
1150 	LL_ASSERT(!err);
1151 
1152 	/* Get the size of current ACAD, first octet returns the old length and
1153 	 * followed by pointer to previous offset to ACAD in the PDU.
1154 	 */
1155 	lll_sync = adv->lll.sync;
1156 	hdr_data[ULL_ADV_HDR_DATA_LEN_OFFSET] = 0U;
1157 	err = ull_adv_sync_pdu_set_clear(lll_sync, pdu_prev, pdu,
1158 					 ULL_ADV_PDU_HDR_FIELD_ACAD, 0U,
1159 					 &hdr_data);
1160 	LL_ASSERT(!err);
1161 
1162 	/* Dev assert if ACAD empty */
1163 	LL_ASSERT(hdr_data[ULL_ADV_HDR_DATA_LEN_OFFSET]);
1164 
1165 	/* Get the pointer, prev content and size of current ACAD */
1166 	err = ull_adv_sync_pdu_set_clear(lll_sync, pdu_prev, pdu,
1167 					 ULL_ADV_PDU_HDR_FIELD_ACAD, 0U,
1168 					 &hdr_data);
1169 	LL_ASSERT(!err);
1170 
1171 	/* Find the BIGInfo */
1172 	acad_len = hdr_data[ULL_ADV_HDR_DATA_LEN_OFFSET];
1173 	len = acad_len;
1174 	(void)memcpy(&acad, &hdr_data[ULL_ADV_HDR_DATA_ACAD_PTR_OFFSET],
1175 		     sizeof(acad));
1176 	ad = acad;
1177 	do {
1178 		ad_len = ad[PDU_ADV_DATA_HEADER_LEN_OFFSET];
1179 		if (ad_len &&
1180 		    (ad[PDU_ADV_DATA_HEADER_TYPE_OFFSET] == BT_DATA_BIG_INFO)) {
1181 			break;
1182 		}
1183 
1184 		ad_len += 1U;
1185 
1186 		LL_ASSERT(ad_len <= len);
1187 
1188 		ad += ad_len;
1189 		len -= ad_len;
1190 	} while (len);
1191 	LL_ASSERT(len);
1192 
1193 	/* Get reference to BIGInfo */
1194 	bi = (void *)&ad[PDU_ADV_DATA_HEADER_DATA_OFFSET];
1195 
1196 	/* Copy the new/current Channel Map */
1197 	pdu_big_info_chan_map_phy_set(bi->chm_phy, lll_iso->data_chan_map,
1198 				      lll_iso->phy);
1199 
1200 	/* Commit the new PDU Buffer */
1201 	lll_adv_sync_data_enqueue(lll_sync, ter_idx);
1202 }
1203 
mfy_iso_offset_get(void * param)1204 static void mfy_iso_offset_get(void *param)
1205 {
1206 	struct lll_adv_sync *lll_sync;
1207 	struct ll_adv_sync_set *sync;
1208 	struct lll_adv_iso *lll_iso;
1209 	uint32_t ticks_to_expire;
1210 	struct pdu_big_info *bi;
1211 	uint32_t ticks_current;
1212 	uint64_t payload_count;
1213 	struct pdu_adv *pdu;
1214 	uint8_t ticker_id;
1215 	uint16_t lazy;
1216 	uint8_t retry;
1217 	uint8_t id;
1218 
1219 	sync = param;
1220 	lll_sync = &sync->lll;
1221 	lll_iso = lll_sync->iso;
1222 	ticker_id = TICKER_ID_ADV_ISO_BASE + lll_iso->handle;
1223 
1224 	id = TICKER_NULL;
1225 	ticks_to_expire = 0U;
1226 	ticks_current = 0U;
1227 	retry = 4U;
1228 	do {
1229 		uint32_t volatile ret_cb;
1230 		uint32_t ticks_previous;
1231 		uint32_t ret;
1232 		bool success;
1233 
1234 		ticks_previous = ticks_current;
1235 
1236 		ret_cb = TICKER_STATUS_BUSY;
1237 		ret = ticker_next_slot_get_ext(TICKER_INSTANCE_ID_CTLR,
1238 					       TICKER_USER_ID_ULL_LOW,
1239 					       &id, &ticks_current,
1240 					       &ticks_to_expire, NULL, &lazy,
1241 					       NULL, NULL,
1242 					       ticker_op_cb, (void *)&ret_cb);
1243 		if (ret == TICKER_STATUS_BUSY) {
1244 			/* Busy wait until Ticker Job is enabled after any Radio
1245 			 * event is done using the Radio hardware. Ticker Job
1246 			 * ISR is disabled during Radio events in LOW_LAT
1247 			 * feature to avoid Radio ISR latencies.
1248 			 */
1249 			while (ret_cb == TICKER_STATUS_BUSY) {
1250 				ticker_job_sched(TICKER_INSTANCE_ID_CTLR,
1251 						 TICKER_USER_ID_ULL_LOW);
1252 			}
1253 		}
1254 
1255 		success = (ret_cb == TICKER_STATUS_SUCCESS);
1256 		LL_ASSERT(success);
1257 
1258 		LL_ASSERT((ticks_current == ticks_previous) || retry--);
1259 
1260 		LL_ASSERT(id != TICKER_NULL);
1261 	} while (id != ticker_id);
1262 
1263 	payload_count = lll_iso->payload_count +
1264 			(((uint64_t)lll_iso->latency_prepare + lazy) * lll_iso->bn);
1265 
1266 	pdu = lll_adv_sync_data_latest_peek(lll_sync);
1267 	bi = big_info_get(pdu);
1268 	big_info_offset_fill(bi, ticks_to_expire, 0U);
1269 	/* Assign the 39-bit payload count, retaining the 1 MS bit framing value */
1270 	bi->payload_count_framing[0] = payload_count;
1271 	bi->payload_count_framing[1] = payload_count >> 8;
1272 	bi->payload_count_framing[2] = payload_count >> 16;
1273 	bi->payload_count_framing[3] = payload_count >> 24;
1274 	bi->payload_count_framing[4] &= ~0x7F;
1275 	bi->payload_count_framing[4] |= (payload_count >> 32) & 0x7F;
1276 
1277 	/* Update Channel Map in the BIGInfo until Thread context gets a
1278 	 * chance to update the PDU with new Channel Map.
1279 	 */
1280 	if (lll_sync->iso_chm_done_req != lll_sync->iso_chm_done_ack) {
1281 		pdu_big_info_chan_map_phy_set(bi->chm_phy,
1282 					      lll_iso->data_chan_map,
1283 					      lll_iso->phy);
1284 	}
1285 }
1286 
pdu_big_info_chan_map_phy_set(uint8_t * chm_phy,uint8_t * chan_map,uint8_t phy)1287 static void pdu_big_info_chan_map_phy_set(uint8_t *chm_phy, uint8_t *chan_map,
1288 					  uint8_t phy)
1289 {
1290 	(void)memcpy(chm_phy, chan_map, PDU_CHANNEL_MAP_SIZE);
1291 	chm_phy[4] &= 0x1F;
1292 	chm_phy[4] |= ((find_lsb_set(phy) - 1U) << 5);
1293 }
1294 
big_info_get(struct pdu_adv * pdu)1295 static inline struct pdu_big_info *big_info_get(struct pdu_adv *pdu)
1296 {
1297 	struct pdu_adv_com_ext_adv *p;
1298 	struct pdu_adv_ext_hdr *h;
1299 	uint8_t *ptr;
1300 
1301 	p = (void *)&pdu->adv_ext_ind;
1302 	h = (void *)p->ext_hdr_adv_data;
1303 	ptr = h->data;
1304 
1305 	/* No AdvA and TargetA */
1306 
1307 	/* traverse through CTE Info, if present */
1308 	if (h->cte_info) {
1309 		ptr += sizeof(struct pdu_cte_info);
1310 	}
1311 
1312 	/* traverse through ADI, if present */
1313 	if (h->adi) {
1314 		ptr += sizeof(struct pdu_adv_adi);
1315 	}
1316 
1317 	/* traverse through aux ptr, if present */
1318 	if (h->aux_ptr) {
1319 		ptr += sizeof(struct pdu_adv_aux_ptr);
1320 	}
1321 
1322 	/* No SyncInfo */
1323 
1324 	/* traverse through Tx Power, if present */
1325 	if (h->tx_pwr) {
1326 		ptr++;
1327 	}
1328 
1329 	/* FIXME: Parse and find the Length encoded AD Format */
1330 	ptr += 2;
1331 
1332 	return (void *)ptr;
1333 }
1334 
big_info_offset_fill(struct pdu_big_info * bi,uint32_t ticks_offset,uint32_t start_us)1335 static inline void big_info_offset_fill(struct pdu_big_info *bi,
1336 					uint32_t ticks_offset,
1337 					uint32_t start_us)
1338 {
1339 	uint32_t offs;
1340 
1341 	offs = HAL_TICKER_TICKS_TO_US(ticks_offset) - start_us;
1342 	offs = offs / OFFS_UNIT_30_US;
1343 	if (!!(offs >> OFFS_UNIT_BITS)) {
1344 		bi->offs = sys_cpu_to_le16(offs / (OFFS_UNIT_300_US /
1345 						   OFFS_UNIT_30_US));
1346 		bi->offs_units = 1U;
1347 	} else {
1348 		bi->offs = sys_cpu_to_le16(offs);
1349 		bi->offs_units = 0U;
1350 	}
1351 }
1352 
ticker_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)1353 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
1354 		      uint32_t remainder, uint16_t lazy, uint8_t force,
1355 		      void *param)
1356 {
1357 	static struct lll_prepare_param p;
1358 	struct ll_adv_iso_set *adv_iso = param;
1359 	uint32_t remainder_us;
1360 	uint32_t ret;
1361 	uint8_t ref;
1362 
1363 	DEBUG_RADIO_PREPARE_A(1);
1364 
1365 	/* Increment prepare reference count */
1366 	ref = ull_ref_inc(&adv_iso->ull);
1367 	LL_ASSERT(ref);
1368 
1369 	/* Append timing parameters */
1370 	p.ticks_at_expire = ticks_at_expire;
1371 	p.remainder = remainder;
1372 	p.lazy = lazy;
1373 	p.force = force;
1374 	p.param = &adv_iso->lll;
1375 	mfy_lll_prepare.param = &p;
1376 
1377 	/* Kick LLL prepare */
1378 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0,
1379 			     &mfy_lll_prepare);
1380 	LL_ASSERT(!ret);
1381 
1382 	/* Calculate the BIG reference point of current BIG event */
1383 	remainder_us = remainder;
1384 	hal_ticker_remove_jitter(&ticks_at_expire, &remainder_us);
1385 	ticks_at_expire &= HAL_TICKER_CNTR_MASK;
1386 	adv_iso->big_ref_point = isoal_get_wrapped_time_us(HAL_TICKER_TICKS_TO_US(ticks_at_expire),
1387 							   (remainder_us +
1388 							    EVENT_OVERHEAD_START_US));
1389 
1390 	DEBUG_RADIO_PREPARE_A(1);
1391 }
1392 
ticker_op_cb(uint32_t status,void * param)1393 static void ticker_op_cb(uint32_t status, void *param)
1394 {
1395 	*((uint32_t volatile *)param) = status;
1396 }
1397 
ticker_stop_op_cb(uint32_t status,void * param)1398 static void ticker_stop_op_cb(uint32_t status, void *param)
1399 {
1400 	static memq_link_t link;
1401 	static struct mayfly mfy = {0U, 0U, &link, NULL, adv_iso_disable};
1402 	uint32_t ret;
1403 
1404 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1405 
1406 	/* Check if any pending LLL events that need to be aborted */
1407 	mfy.param = param;
1408 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
1409 			     TICKER_USER_ID_ULL_HIGH, 0U, &mfy);
1410 	LL_ASSERT(!ret);
1411 }
1412 
adv_iso_disable(void * param)1413 static void adv_iso_disable(void *param)
1414 {
1415 	struct ll_adv_iso_set *adv_iso;
1416 	struct ull_hdr *hdr;
1417 
1418 	/* Check ref count to determine if any pending LLL events in pipeline */
1419 	adv_iso = param;
1420 	hdr = &adv_iso->ull;
1421 	if (ull_ref_get(hdr)) {
1422 		static memq_link_t link;
1423 		static struct mayfly mfy = {0U, 0U, &link, NULL, lll_disable};
1424 		uint32_t ret;
1425 
1426 		mfy.param = &adv_iso->lll;
1427 
1428 		/* Setup disabled callback to be called when ref count
1429 		 * returns to zero.
1430 		 */
1431 		LL_ASSERT(!hdr->disabled_cb);
1432 		hdr->disabled_param = mfy.param;
1433 		hdr->disabled_cb = disabled_cb;
1434 
1435 		/* Trigger LLL disable */
1436 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1437 				     TICKER_USER_ID_LLL, 0U, &mfy);
1438 		LL_ASSERT(!ret);
1439 	} else {
1440 		/* No pending LLL events */
1441 		disabled_cb(&adv_iso->lll);
1442 	}
1443 }
1444 
disabled_cb(void * param)1445 static void disabled_cb(void *param)
1446 {
1447 	static memq_link_t link;
1448 	static struct mayfly mfy = {0U, 0U, &link, NULL, tx_lll_flush};
1449 	uint32_t ret;
1450 
1451 	mfy.param = param;
1452 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1453 			     TICKER_USER_ID_LLL, 0U, &mfy);
1454 	LL_ASSERT(!ret);
1455 }
1456 
tx_lll_flush(void * param)1457 static void tx_lll_flush(void *param)
1458 {
1459 	struct ll_adv_iso_set *adv_iso;
1460 	struct lll_adv_iso *lll;
1461 	struct node_rx_pdu *rx;
1462 	memq_link_t *link;
1463 	uint8_t num_bis;
1464 
1465 	/* Get reference to ULL context */
1466 	lll = param;
1467 
1468 	/* Flush TX */
1469 	num_bis = lll->num_bis;
1470 	while (num_bis--) {
1471 		struct lll_adv_iso_stream *stream;
1472 		struct node_tx_iso *tx;
1473 		uint16_t stream_handle;
1474 		memq_link_t *link2;
1475 		uint16_t handle;
1476 
1477 		stream_handle = lll->stream_handle[num_bis];
1478 		handle = LL_BIS_ADV_HANDLE_FROM_IDX(stream_handle);
1479 		stream = ull_adv_iso_stream_get(stream_handle);
1480 
1481 		link2 = memq_dequeue(stream->memq_tx.tail, &stream->memq_tx.head,
1482 				     (void **)&tx);
1483 		while (link2) {
1484 			tx->next = link2;
1485 			ull_iso_lll_ack_enqueue(handle, tx);
1486 
1487 			link2 = memq_dequeue(stream->memq_tx.tail,
1488 					    &stream->memq_tx.head,
1489 					    (void **)&tx);
1490 		}
1491 	}
1492 
1493 	/* Get the terminate structure reserved in the ISO context.
1494 	 * The terminate reason and connection handle should already be
1495 	 * populated before this mayfly function was scheduled.
1496 	 */
1497 	adv_iso = HDR_LLL2ULL(lll);
1498 	rx = (void *)&adv_iso->node_rx_terminate;
1499 	link = rx->hdr.link;
1500 	LL_ASSERT(link);
1501 	rx->hdr.link = NULL;
1502 
1503 	/* Enqueue the terminate towards ULL context */
1504 	ull_rx_put_sched(link, rx);
1505 }
1506