1 /*
2  * Copyright (c) 2020 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/sys/byteorder.h>
9 #include <zephyr/bluetooth/hci_types.h>
10 
11 #include "util/util.h"
12 #include "util/mem.h"
13 #include "util/memq.h"
14 #include "util/mayfly.h"
15 #include "util/dbuf.h"
16 
17 #include "hal/ccm.h"
18 #include "hal/radio.h"
19 #include "hal/ticker.h"
20 
21 #include "ticker/ticker.h"
22 
23 #include "pdu_df.h"
24 #include "lll/pdu_vendor.h"
25 #include "pdu.h"
26 
27 #include "lll.h"
28 #include "lll/lll_vendor.h"
29 #include "lll_clock.h"
30 #include "lll_scan.h"
31 #include "lll/lll_df_types.h"
32 #include "lll_sync.h"
33 #include "lll_sync_iso.h"
34 #include "lll_conn.h"
35 #include "lll_conn_iso.h"
36 
37 #include "isoal.h"
38 
39 #include "ull_tx_queue.h"
40 
41 #include "ull_scan_types.h"
42 #include "ull_sync_types.h"
43 #include "ull_iso_types.h"
44 #include "ull_conn_types.h"
45 #include "ull_conn_iso_types.h"
46 
47 #include "ull_internal.h"
48 #include "ull_scan_internal.h"
49 #include "ull_sync_internal.h"
50 #include "ull_iso_internal.h"
51 #include "ull_sync_iso_internal.h"
52 #include "ull_conn_internal.h"
53 #include "ull_conn_iso_internal.h"
54 
55 #include "ll.h"
56 
57 #include "bt_crypto.h"
58 
59 #include "hal/debug.h"
60 
61 static int init_reset(void);
62 static struct ll_sync_iso_set *sync_iso_get(uint8_t handle);
63 static struct ll_sync_iso_set *sync_iso_alloc(uint8_t handle);
64 static uint8_t sync_iso_handle_get(struct ll_sync_iso_set *sync);
65 static uint8_t sync_iso_handle_to_index(uint8_t handle);
66 static struct stream *sync_iso_stream_acquire(void);
67 static uint16_t sync_iso_stream_handle_get(struct lll_sync_iso_stream *stream);
68 static void timeout_cleanup(struct ll_sync_iso_set *sync_iso);
69 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
70 		      uint32_t remainder, uint16_t lazy, uint8_t force,
71 		      void *param);
72 static void ticker_start_op_cb(uint32_t status, void *param);
73 static void ticker_update_op_cb(uint32_t status, void *param);
74 static void ticker_stop_op_cb(uint32_t status, void *param);
75 static void sync_iso_disable(void *param);
76 static void disabled_cb(void *param);
77 static void lll_flush(void *param);
78 static void stop_ticker(struct ll_sync_iso_set *sync_iso, ticker_op_func fp_op_func);
79 
80 static memq_link_t link_lll_prepare;
81 static struct mayfly mfy_lll_prepare = {0U, 0U, &link_lll_prepare, NULL, NULL};
82 
83 static struct ll_sync_iso_set ll_sync_iso[CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET];
84 static struct lll_sync_iso_stream
85 			stream_pool[CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT];
86 static struct ll_iso_rx_test_mode
87 			test_mode[CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT];
88 static void *stream_free;
89 
ll_big_sync_create(uint8_t big_handle,uint16_t sync_handle,uint8_t encryption,uint8_t * bcode,uint8_t mse,uint16_t sync_timeout,uint8_t num_bis,uint8_t * bis)90 uint8_t ll_big_sync_create(uint8_t big_handle, uint16_t sync_handle,
91 			   uint8_t encryption, uint8_t *bcode, uint8_t mse,
92 			   uint16_t sync_timeout, uint8_t num_bis,
93 			   uint8_t *bis)
94 {
95 	struct ll_sync_iso_set *sync_iso;
96 	memq_link_t *link_sync_estab;
97 	memq_link_t *link_sync_lost;
98 	struct node_rx_pdu *node_rx;
99 	struct ll_sync_set *sync;
100 	struct lll_sync_iso *lll;
101 	int8_t last_index;
102 
103 	sync = ull_sync_is_enabled_get(sync_handle);
104 	if (!sync) {
105 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
106 	}
107 
108 	if (sync->iso.sync_iso) {
109 		return BT_HCI_ERR_CMD_DISALLOWED;
110 	}
111 
112 	sync_iso = sync_iso_get(big_handle);
113 	if (sync_iso) {
114 		/* BIG handle already in use */
115 		return BT_HCI_ERR_CMD_DISALLOWED;
116 	}
117 
118 	sync_iso = sync_iso_alloc(big_handle);
119 	if (!sync_iso) {
120 		return BT_HCI_ERR_INSUFFICIENT_RESOURCES;
121 	}
122 
123 	/* TODO: Check remaining parameters */
124 
125 	/* Check BIS indices */
126 	last_index = -1;
127 	for (uint8_t i = 0U; i < num_bis; i++) {
128 		/* Stream index must be in valid range and in ascending order */
129 		if (!IN_RANGE(bis[i], 0x01, 0x1F) || (bis[i] <= last_index)) {
130 			return BT_HCI_ERR_INVALID_PARAM;
131 
132 		} else if (bis[i] > sync->num_bis) {
133 			return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
134 		}
135 		last_index = bis[i];
136 	}
137 
138 	/* Check if encryption supported */
139 	if (!IS_ENABLED(CONFIG_BT_CTLR_BROADCAST_ISO_ENC) &&
140 	    encryption) {
141 		return BT_HCI_ERR_CMD_DISALLOWED;
142 	};
143 
144 	/* Check if requested encryption matches */
145 	if (encryption != sync->enc) {
146 		return BT_HCI_ERR_ENC_MODE_NOT_ACCEPTABLE;
147 	}
148 
149 	/* Check if free BISes available */
150 	if ((num_bis > BT_CTLR_SYNC_ISO_STREAM_MAX) ||
151 	    (mem_free_count_get(stream_free) < num_bis)) {
152 		return BT_HCI_ERR_INSUFFICIENT_RESOURCES;
153 	}
154 
155 	link_sync_estab = ll_rx_link_alloc();
156 	if (!link_sync_estab) {
157 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
158 	}
159 
160 	link_sync_lost = ll_rx_link_alloc();
161 	if (!link_sync_lost) {
162 		ll_rx_link_release(link_sync_estab);
163 
164 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
165 	}
166 
167 	node_rx = ll_rx_alloc();
168 	if (!node_rx) {
169 		ll_rx_link_release(link_sync_lost);
170 		ll_rx_link_release(link_sync_estab);
171 
172 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
173 	}
174 
175 	/* Initialize the ISO sync ULL context */
176 	sync_iso->sync = sync;
177 	sync_iso->timeout = sync_timeout;
178 	sync_iso->timeout_reload = 0U;
179 	sync_iso->timeout_expire = 0U;
180 
181 	/* Setup the periodic sync to establish ISO sync */
182 	node_rx->hdr.link = link_sync_estab;
183 	sync->iso.node_rx_estab = node_rx;
184 	sync_iso->node_rx_lost.rx.hdr.link = link_sync_lost;
185 
186 	/* Initialize sync LLL context */
187 	lll = &sync_iso->lll;
188 	lll->latency_prepare = 0U;
189 	lll->latency_event = 0U;
190 	lll->window_widening_prepare_us = 0U;
191 	lll->window_widening_event_us = 0U;
192 	lll->ctrl = 0U;
193 	lll->cssn_curr = 0U;
194 	lll->cssn_next = 0U;
195 	lll->term_reason = 0U;
196 
197 	if (IS_ENABLED(CONFIG_BT_CTLR_BROADCAST_ISO_ENC) && encryption) {
198 		const uint8_t BIG1[16] = {0x31, 0x47, 0x49, 0x42, };
199 		const uint8_t BIG2[4]  = {0x32, 0x47, 0x49, 0x42};
200 		uint8_t igltk[16];
201 		int err;
202 
203 		/* Calculate GLTK */
204 		err = bt_crypto_h7(BIG1, bcode, igltk);
205 		LL_ASSERT(!err);
206 		err = bt_crypto_h6(igltk, BIG2, sync_iso->gltk);
207 		LL_ASSERT(!err);
208 
209 		lll->enc = 1U;
210 	} else {
211 		lll->enc = 0U;
212 	}
213 
214 	/* TODO: Implement usage of MSE to limit listening to subevents */
215 
216 	/* Allocate streams */
217 	lll->stream_count = num_bis;
218 	for (uint8_t i = 0U; i < num_bis; i++) {
219 		struct lll_sync_iso_stream *stream;
220 
221 		stream = (void *)sync_iso_stream_acquire();
222 		stream->big_handle = big_handle;
223 		stream->bis_index = bis[i];
224 		stream->dp = NULL;
225 		stream->test_mode = &test_mode[i];
226 		memset(stream->test_mode, 0, sizeof(struct ll_iso_rx_test_mode));
227 		lll->stream_handle[i] = sync_iso_stream_handle_get(stream);
228 	}
229 
230 	/* Initialize ULL and LLL headers */
231 	ull_hdr_init(&sync_iso->ull);
232 	lll_hdr_init(lll, sync_iso);
233 
234 	/* Enable periodic advertising to establish ISO sync */
235 	sync->iso.sync_iso = sync_iso;
236 
237 	return BT_HCI_ERR_SUCCESS;
238 }
239 
ll_big_sync_terminate(uint8_t big_handle,void ** rx)240 uint8_t ll_big_sync_terminate(uint8_t big_handle, void **rx)
241 {
242 	static memq_link_t link;
243 	static struct mayfly mfy = {0, 0, &link, NULL, lll_flush};
244 
245 	struct ll_sync_iso_set *sync_iso;
246 	memq_link_t *link_sync_estab;
247 	struct node_rx_pdu *node_rx;
248 	memq_link_t *link_sync_lost;
249 	struct ll_sync_set *sync;
250 	struct k_sem sem;
251 	uint32_t ret;
252 	int err;
253 
254 	sync_iso = sync_iso_get(big_handle);
255 	if (!sync_iso) {
256 		return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
257 	}
258 
259 	sync = sync_iso->sync;
260 	if (sync && sync->iso.sync_iso) {
261 		struct node_rx_sync_iso *se;
262 
263 		if (sync->iso.sync_iso != sync_iso) {
264 			return BT_HCI_ERR_CMD_DISALLOWED;
265 		}
266 		sync->iso.sync_iso = NULL;
267 
268 		node_rx = sync->iso.node_rx_estab;
269 		link_sync_estab = node_rx->hdr.link;
270 		link_sync_lost = sync_iso->node_rx_lost.rx.hdr.link;
271 
272 		ll_rx_link_release(link_sync_lost);
273 		ll_rx_link_release(link_sync_estab);
274 		ll_rx_release(node_rx);
275 
276 		node_rx = (void *)&sync_iso->node_rx_lost;
277 		node_rx->hdr.type = NODE_RX_TYPE_SYNC_ISO;
278 		node_rx->hdr.handle = big_handle;
279 
280 		/* NOTE: Since NODE_RX_TYPE_SYNC_ISO is only generated from ULL
281 		 *       context, pass ULL context as parameter.
282 		 */
283 		node_rx->rx_ftr.param = sync_iso;
284 
285 		/* NOTE: struct node_rx_lost has uint8_t member store the reason.
286 		 */
287 		se = (void *)node_rx->pdu;
288 		se->status = BT_HCI_ERR_OP_CANCELLED_BY_HOST;
289 
290 		*rx = node_rx;
291 
292 		return BT_HCI_ERR_SUCCESS;
293 	}
294 
295 	err = ull_ticker_stop_with_mark((TICKER_ID_SCAN_SYNC_ISO_BASE +
296 					 sync_iso_handle_to_index(big_handle)),
297 					 sync_iso, &sync_iso->lll);
298 	LL_ASSERT_INFO2(err == 0 || err == -EALREADY, big_handle, err);
299 	if (err) {
300 		return BT_HCI_ERR_CMD_DISALLOWED;
301 	}
302 
303 	/* Do a blocking mayfly call to LLL context for flushing any outstanding
304 	 * operations.
305 	 */
306 	sync_iso->flush_sem = &sem;
307 	k_sem_init(&sem, 0, 1);
308 	mfy.param = &sync_iso->lll;
309 
310 	ret = mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_LLL, 0, &mfy);
311 	LL_ASSERT(!ret);
312 	k_sem_take(&sem, K_FOREVER);
313 	sync_iso->flush_sem = NULL;
314 
315 	/* Release resources */
316 	ull_sync_iso_stream_release(sync_iso);
317 
318 	link_sync_lost = sync_iso->node_rx_lost.rx.hdr.link;
319 	ll_rx_link_release(link_sync_lost);
320 
321 	return BT_HCI_ERR_SUCCESS;
322 }
323 
ull_sync_iso_init(void)324 int ull_sync_iso_init(void)
325 {
326 	int err;
327 
328 	err = init_reset();
329 	if (err) {
330 		return err;
331 	}
332 
333 	return 0;
334 }
335 
ull_sync_iso_reset(void)336 int ull_sync_iso_reset(void)
337 {
338 	int err;
339 
340 	err = init_reset();
341 	if (err) {
342 		return err;
343 	}
344 
345 	return 0;
346 }
347 
ull_sync_iso_lll_index_get(struct lll_sync_iso * lll)348 uint8_t ull_sync_iso_lll_index_get(struct lll_sync_iso *lll)
349 {
350 	return ARRAY_INDEX(ll_sync_iso, HDR_LLL2ULL(lll));
351 }
352 
ull_sync_iso_by_stream_get(uint16_t handle)353 struct ll_sync_iso_set *ull_sync_iso_by_stream_get(uint16_t handle)
354 {
355 	if (handle >= CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT) {
356 		return NULL;
357 	}
358 
359 	return sync_iso_get(stream_pool[handle].big_handle);
360 }
361 
ull_sync_iso_stream_get(uint16_t handle)362 struct lll_sync_iso_stream *ull_sync_iso_stream_get(uint16_t handle)
363 {
364 	struct ll_sync_iso_set *sync_iso;
365 
366 	/* Get the BIG Sync context and check for not being terminated */
367 	sync_iso = ull_sync_iso_by_stream_get(handle);
368 	if (!sync_iso || !sync_iso->sync) {
369 		return NULL;
370 	}
371 
372 	return &stream_pool[handle];
373 }
374 
ull_sync_iso_lll_stream_get(uint16_t handle)375 struct lll_sync_iso_stream *ull_sync_iso_lll_stream_get(uint16_t handle)
376 {
377 	return ull_sync_iso_stream_get(handle);
378 }
379 
ull_sync_iso_stream_release(struct ll_sync_iso_set * sync_iso)380 void ull_sync_iso_stream_release(struct ll_sync_iso_set *sync_iso)
381 {
382 	struct lll_sync_iso *lll;
383 
384 	lll = &sync_iso->lll;
385 	while (lll->stream_count--) {
386 		struct lll_sync_iso_stream *stream;
387 		struct ll_iso_datapath *dp;
388 		uint16_t stream_handle;
389 
390 		stream_handle = lll->stream_handle[lll->stream_count];
391 		stream = ull_sync_iso_stream_get(stream_handle);
392 		LL_ASSERT(stream);
393 
394 		dp = stream->dp;
395 		if (dp) {
396 			stream->dp = NULL;
397 			isoal_sink_destroy(dp->sink_hdl);
398 			ull_iso_datapath_release(dp);
399 		}
400 
401 		mem_release(stream, &stream_free);
402 	}
403 
404 	sync_iso->sync = NULL;
405 }
406 
ull_sync_iso_setup(struct ll_sync_iso_set * sync_iso,struct node_rx_pdu * node_rx,uint8_t * acad,uint8_t acad_len)407 void ull_sync_iso_setup(struct ll_sync_iso_set *sync_iso,
408 			struct node_rx_pdu *node_rx,
409 			uint8_t *acad, uint8_t acad_len)
410 {
411 	struct lll_sync_iso_stream *stream;
412 	uint32_t ticks_slot_overhead;
413 	uint32_t sync_iso_offset_us;
414 	uint32_t ticks_slot_offset;
415 	uint32_t ticks_threshold;
416 	struct lll_sync_iso *lll;
417 	struct node_rx_ftr *ftr;
418 	struct pdu_big_info *bi;
419 	uint32_t ready_delay_us;
420 	uint32_t ticks_expire;
421 	uint32_t interval_us;
422 	uint32_t ticks_diff;
423 	struct pdu_adv *pdu;
424 	uint32_t slot_us;
425 	uint8_t num_bis;
426 	uint8_t bi_size;
427 	uint8_t handle;
428 	uint32_t ret;
429 	uint8_t sca;
430 
431 	while (acad_len) {
432 		const uint8_t hdr_len = acad[PDU_ADV_DATA_HEADER_LEN_OFFSET];
433 
434 		if ((hdr_len >= PDU_ADV_DATA_HEADER_TYPE_SIZE) &&
435 		    (acad[PDU_ADV_DATA_HEADER_TYPE_OFFSET] ==
436 		     BT_DATA_BIG_INFO)) {
437 			break;
438 		}
439 
440 		if (acad_len < (hdr_len + PDU_ADV_DATA_HEADER_LEN_SIZE)) {
441 			return;
442 		}
443 
444 		acad_len -= hdr_len + PDU_ADV_DATA_HEADER_LEN_SIZE;
445 		acad += hdr_len + PDU_ADV_DATA_HEADER_LEN_SIZE;
446 	}
447 
448 	if ((acad_len < (PDU_BIG_INFO_CLEARTEXT_SIZE +
449 			 PDU_ADV_DATA_HEADER_SIZE)) ||
450 	    ((acad[PDU_ADV_DATA_HEADER_LEN_OFFSET] !=
451 	      (PDU_ADV_DATA_HEADER_TYPE_SIZE + PDU_BIG_INFO_CLEARTEXT_SIZE)) &&
452 	     (acad[PDU_ADV_DATA_HEADER_LEN_OFFSET] !=
453 	      (PDU_ADV_DATA_HEADER_TYPE_SIZE + PDU_BIG_INFO_ENCRYPTED_SIZE)))) {
454 		return;
455 	}
456 
457 	bi_size = acad[PDU_ADV_DATA_HEADER_LEN_OFFSET] -
458 		  PDU_ADV_DATA_HEADER_TYPE_SIZE;
459 	bi = (void *)&acad[PDU_ADV_DATA_HEADER_DATA_OFFSET];
460 
461 	lll = &sync_iso->lll;
462 	(void)memcpy(lll->seed_access_addr, &bi->seed_access_addr,
463 		     sizeof(lll->seed_access_addr));
464 	(void)memcpy(lll->base_crc_init, &bi->base_crc_init,
465 		     sizeof(lll->base_crc_init));
466 
467 	(void)memcpy(lll->data_chan_map, bi->chm_phy,
468 		     sizeof(lll->data_chan_map));
469 	lll->data_chan_map[4] &= 0x1F;
470 	lll->data_chan_count = util_ones_count_get(lll->data_chan_map,
471 						   sizeof(lll->data_chan_map));
472 	if (lll->data_chan_count < CHM_USED_COUNT_MIN) {
473 		return;
474 	}
475 
476 	/* Reset ISO create BIG flag in the periodic advertising context */
477 	sync_iso->sync->iso.sync_iso = NULL;
478 
479 	lll->phy = BIT(bi->chm_phy[4] >> 5);
480 
481 	lll->num_bis = PDU_BIG_INFO_NUM_BIS_GET(bi);
482 	lll->bn = PDU_BIG_INFO_BN_GET(bi);
483 	lll->nse = PDU_BIG_INFO_NSE_GET(bi);
484 	lll->sub_interval = PDU_BIG_INFO_SUB_INTERVAL_GET(bi);
485 	lll->max_pdu = bi->max_pdu;
486 	lll->pto = PDU_BIG_INFO_PTO_GET(bi);
487 	lll->bis_spacing = PDU_BIG_INFO_SPACING_GET(bi);
488 	lll->irc = PDU_BIG_INFO_IRC_GET(bi);
489 	if (lll->pto) {
490 		uint8_t nse;
491 
492 		nse = lll->irc * lll->bn; /* 4 bits * 3 bits, total 7 bits */
493 		if (nse >= lll->nse) {
494 			return;
495 		}
496 
497 		lll->ptc = lll->nse - nse;
498 
499 		/* FIXME: Do not remember why ptc is 4 bits, it should be 5 bits as ptc is a
500 		 *        running buffer offset related to nse.
501 		 *        Fix ptc and ptc_curr definitions, until then we keep an assertion check
502 		 *        here.
503 		 */
504 		LL_ASSERT(lll->ptc <= BIT_MASK(4));
505 	} else {
506 		lll->ptc = 0U;
507 	}
508 	lll->sdu_interval = PDU_BIG_INFO_SDU_INTERVAL_GET(bi);
509 
510 	/* Pick the 39-bit payload count, 1 MSb is framing bit */
511 	lll->payload_count = (uint64_t)bi->payload_count_framing[0];
512 	lll->payload_count |= (uint64_t)bi->payload_count_framing[1] << 8;
513 	lll->payload_count |= (uint64_t)bi->payload_count_framing[2] << 16;
514 	lll->payload_count |= (uint64_t)bi->payload_count_framing[3] << 24;
515 	lll->payload_count |= (uint64_t)(bi->payload_count_framing[4] & 0x7f) << 32;
516 	lll->framing = (bi->payload_count_framing[4] & 0x80) >> 7;
517 
518 	/* Set establishment event countdown */
519 	lll->establish_events = CONN_ESTAB_COUNTDOWN;
520 
521 	if (IS_ENABLED(CONFIG_BT_CTLR_BROADCAST_ISO_ENC) &&
522 	    lll->enc && (bi_size == PDU_BIG_INFO_ENCRYPTED_SIZE)) {
523 		const uint8_t BIG3[4]  = {0x33, 0x47, 0x49, 0x42};
524 		struct ccm *ccm_rx;
525 		uint8_t gsk[16];
526 		int err;
527 
528 		/* Copy the GIV in BIGInfo */
529 		(void)memcpy(lll->giv, bi->giv, sizeof(lll->giv));
530 
531 		/* Calculate GSK */
532 		err = bt_crypto_h8(sync_iso->gltk, bi->gskd, BIG3, gsk);
533 		LL_ASSERT(!err);
534 
535 		/* Prepare the CCM parameters */
536 		ccm_rx = &lll->ccm_rx;
537 		ccm_rx->direction = 1U;
538 		(void)memcpy(&ccm_rx->iv[4], &lll->giv[4], 4U);
539 		(void)mem_rcopy(ccm_rx->key, gsk, sizeof(ccm_rx->key));
540 
541 		/* NOTE: counter is filled in LLL */
542 	} else {
543 		lll->enc = 0U;
544 	}
545 
546 	/* Initialize payload pointers */
547 	lll->payload_count_max = PDU_BIG_PAYLOAD_COUNT_MAX;
548 	lll->payload_tail = 0U;
549 	for (int i = 0; i < CONFIG_BT_CTLR_SYNC_ISO_STREAM_MAX; i++) {
550 		for (int j = 0; j < lll->payload_count_max; j++) {
551 			lll->payload[i][j] = NULL;
552 		}
553 	}
554 
555 	lll->iso_interval = PDU_BIG_INFO_ISO_INTERVAL_GET(bi);
556 	interval_us = lll->iso_interval * PERIODIC_INT_UNIT_US;
557 
558 	sync_iso->timeout_reload =
559 		RADIO_SYNC_EVENTS((sync_iso->timeout * 10U * USEC_PER_MSEC),
560 				  interval_us);
561 
562 	sca = sync_iso->sync->lll.sca;
563 	lll->window_widening_periodic_us =
564 		DIV_ROUND_UP(((lll_clock_ppm_local_get() +
565 				   lll_clock_ppm_get(sca)) *
566 				 interval_us), USEC_PER_SEC);
567 	lll->window_widening_max_us = (interval_us >> 1) - EVENT_IFS_US;
568 	if (PDU_BIG_INFO_OFFS_UNITS_GET(bi)) {
569 		lll->window_size_event_us = OFFS_UNIT_300_US;
570 	} else {
571 		lll->window_size_event_us = OFFS_UNIT_30_US;
572 	}
573 
574 	ftr = &node_rx->rx_ftr;
575 	pdu = (void *)((struct node_rx_pdu *)node_rx)->pdu;
576 
577 	ready_delay_us = lll_radio_rx_ready_delay_get(lll->phy, PHY_FLAGS_S8);
578 
579 	/* Calculate the BIG Offset in microseconds */
580 	sync_iso_offset_us = ftr->radio_end_us;
581 	sync_iso_offset_us += PDU_BIG_INFO_OFFS_GET(bi) *
582 			      lll->window_size_event_us;
583 	/* Skip to first selected BIS subevent */
584 	stream = ull_sync_iso_stream_get(lll->stream_handle[0]);
585 	if (lll->bis_spacing >= (lll->sub_interval * lll->nse)) {
586 		sync_iso_offset_us += (stream->bis_index - 1U) *
587 				      lll->sub_interval *
588 				      ((lll->irc * lll->bn) + lll->ptc);
589 	} else {
590 		sync_iso_offset_us += (stream->bis_index - 1U) *
591 				      lll->bis_spacing;
592 	}
593 	sync_iso_offset_us -= PDU_AC_US(pdu->len, sync_iso->sync->lll.phy,
594 					ftr->phy_flags);
595 	sync_iso_offset_us -= EVENT_TICKER_RES_MARGIN_US;
596 	sync_iso_offset_us -= EVENT_JITTER_US;
597 	sync_iso_offset_us -= ready_delay_us;
598 
599 	interval_us -= lll->window_widening_periodic_us;
600 
601 	/* Calculate ISO Receiver BIG event timings */
602 
603 	/* Number of maximum BISes to sync from the first BIS to sync */
604 	/* NOTE: When ULL scheduling is implemented for subevents, then update
605 	 * the time reservation as required.
606 	 */
607 	num_bis = lll->num_bis - (stream->bis_index - 1U);
608 
609 	/* 1. Maximum PDU transmission time in 1M/2M/S8 PHY is 17040 us, or
610 	 * represented in 15-bits.
611 	 * 2. NSE in the range 1 to 31 is represented in 5-bits
612 	 * 3. num_bis in the range 1 to 31 is represented in 5-bits
613 	 *
614 	 * Hence, worst case event time can be represented in 25-bits plus
615 	 * one each bit for added ctrl_spacing and radio event overheads. I.e.
616 	 * 27-bits required and sufficiently covered by using 32-bit data type
617 	 * for time_us.
618 	 */
619 
620 	if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_ISO_RESERVE_MAX)) {
621 		uint32_t ctrl_spacing_us;
622 
623 		/* Maximum time reservation for sequential and interleaved
624 		 * packing.
625 		 */
626 		if (lll->bis_spacing >= (lll->sub_interval * lll->nse)) {
627 			slot_us = lll->sub_interval * lll->nse * num_bis;
628 		} else {
629 			slot_us = lll->bis_spacing * lll->nse * num_bis;
630 		}
631 
632 		ctrl_spacing_us = PDU_BIS_US(sizeof(struct pdu_big_ctrl),
633 					     lll->enc, lll->phy, PHY_FLAGS_S8);
634 		slot_us += ctrl_spacing_us;
635 
636 	} else if (lll->bis_spacing >= (lll->sub_interval * lll->nse)) {
637 		/* Time reservation omitting PTC subevents in sequential
638 		 * packing.
639 		 */
640 		slot_us = lll->sub_interval * ((lll->nse * num_bis) - lll->ptc);
641 
642 	} else {
643 		/* Time reservation omitting PTC subevents in interleaved
644 		 * packing.
645 		 */
646 		slot_us = lll->bis_spacing * ((lll->nse - lll->ptc) * num_bis);
647 	}
648 
649 	/* Add radio ready delay */
650 	slot_us += ready_delay_us;
651 	slot_us += lll->window_widening_periodic_us << 1U;
652 	slot_us += EVENT_JITTER_US << 1U;
653 	slot_us += EVENT_TICKER_RES_MARGIN_US << 2U;
654 
655 	/* Add implementation defined radio event overheads */
656 	if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX)) {
657 		slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
658 	}
659 
660 	/* TODO: active_to_start feature port */
661 	sync_iso->ull.ticks_active_to_start = 0U;
662 	sync_iso->ull.ticks_prepare_to_start =
663 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
664 	sync_iso->ull.ticks_preempt_to_start =
665 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
666 	sync_iso->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
667 
668 	ticks_slot_offset = MAX(sync_iso->ull.ticks_active_to_start,
669 				sync_iso->ull.ticks_prepare_to_start);
670 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
671 		ticks_slot_overhead = ticks_slot_offset;
672 	} else {
673 		ticks_slot_overhead = 0U;
674 	}
675 	ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
676 
677 	/* Check and skip to next interval if CPU processing introduces latency
678 	 * that can delay scheduling the first ISO event.
679 	 */
680 	ticks_expire = ftr->ticks_anchor - ticks_slot_offset +
681 		       HAL_TICKER_US_TO_TICKS(sync_iso_offset_us);
682 	ticks_threshold = ticker_ticks_now_get() +
683 			  HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
684 	ticks_diff = ticker_ticks_diff_get(ticks_expire, ticks_threshold);
685 	if (ticks_diff & BIT(HAL_TICKER_CNTR_MSBIT)) {
686 		sync_iso_offset_us += interval_us -
687 			lll->window_widening_periodic_us;
688 		lll->window_widening_event_us +=
689 			lll->window_widening_periodic_us;
690 		lll->payload_count += lll->bn;
691 	}
692 
693 	/* setup to use ISO create prepare function until sync established */
694 	mfy_lll_prepare.fp = lll_sync_iso_create_prepare;
695 
696 	handle = sync_iso_handle_get(sync_iso);
697 	ret = ticker_start(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
698 			   (TICKER_ID_SCAN_SYNC_ISO_BASE +
699 			    sync_iso_handle_to_index(handle)),
700 			   ftr->ticks_anchor - ticks_slot_offset,
701 			   HAL_TICKER_US_TO_TICKS(sync_iso_offset_us),
702 			   HAL_TICKER_US_TO_TICKS(interval_us),
703 			   HAL_TICKER_REMAINDER(interval_us),
704 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
705 	!defined(CONFIG_BT_CTLR_LOW_LAT)
706 			   TICKER_LAZY_MUST_EXPIRE,
707 #else
708 			   TICKER_NULL_LAZY,
709 #endif /* !CONFIG_BT_TICKER_LOW_LAT && !CONFIG_BT_CTLR_LOW_LAT */
710 			   (sync_iso->ull.ticks_slot + ticks_slot_overhead),
711 			   ticker_cb, sync_iso,
712 			   ticker_start_op_cb, (void *)__LINE__);
713 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
714 		  (ret == TICKER_STATUS_BUSY));
715 }
716 
ull_sync_iso_estab_done(struct node_rx_event_done * done)717 void ull_sync_iso_estab_done(struct node_rx_event_done *done)
718 {
719 	struct ll_sync_iso_set *sync_iso;
720 	struct node_rx_sync_iso *se;
721 	struct node_rx_pdu *rx;
722 
723 	if (done->extra.trx_cnt || done->extra.estab_failed) {
724 		/* Switch to normal prepare */
725 		mfy_lll_prepare.fp = lll_sync_iso_prepare;
726 
727 		/* Get reference to ULL context */
728 		sync_iso = CONTAINER_OF(done->param, struct ll_sync_iso_set, ull);
729 
730 		/* Prepare BIG Sync Established */
731 		rx = (void *)sync_iso->sync->iso.node_rx_estab;
732 		rx->hdr.type = NODE_RX_TYPE_SYNC_ISO;
733 		rx->hdr.handle = sync_iso_handle_get(sync_iso);
734 		rx->rx_ftr.param = sync_iso;
735 
736 		/* status value is stored in the PDU member of the node rx */
737 		se = (void *)rx->pdu;
738 		if (done->extra.estab_failed) {
739 			if (sync_iso->lll.term_reason != BT_HCI_ERR_SUCCESS) {
740 				se->status = sync_iso->lll.term_reason;
741 			} else {
742 				se->status = BT_HCI_ERR_CONN_FAIL_TO_ESTAB;
743 			}
744 		} else {
745 			se->status = BT_HCI_ERR_SUCCESS;
746 		}
747 
748 		ll_rx_put_sched(rx->hdr.link, rx);
749 	}
750 
751 	ull_sync_iso_done(done);
752 }
753 
ull_sync_iso_done(struct node_rx_event_done * done)754 void ull_sync_iso_done(struct node_rx_event_done *done)
755 {
756 	struct ll_sync_iso_set *sync_iso;
757 	uint32_t ticks_drift_minus;
758 	uint32_t ticks_drift_plus;
759 	struct lll_sync_iso *lll;
760 	uint16_t elapsed_event;
761 	uint16_t latency_event;
762 	uint16_t lazy;
763 	uint8_t force;
764 
765 	/* Get reference to ULL context */
766 	sync_iso = CONTAINER_OF(done->param, struct ll_sync_iso_set, ull);
767 	lll = &sync_iso->lll;
768 
769 	/* Events elapsed used in timeout checks below */
770 	latency_event = lll->latency_event;
771 	if (lll->latency_prepare) {
772 		elapsed_event = latency_event + lll->latency_prepare;
773 	} else {
774 		elapsed_event = latency_event + 1U;
775 	}
776 
777 	/* Check for establishmet failure */
778 	if (done->extra.estab_failed) {
779 		/* Stop Sync ISO Ticker directly. Establishment failure has been
780 		 * notified.
781 		 */
782 		stop_ticker(sync_iso, NULL);
783 		return;
784 	}
785 
786 	/* Sync drift compensation and new skip calculation
787 	 */
788 	ticks_drift_plus = 0U;
789 	ticks_drift_minus = 0U;
790 	if (done->extra.trx_cnt) {
791 		/* Calculate drift in ticks unit */
792 		ull_drift_ticks_get(done, &ticks_drift_plus,
793 				    &ticks_drift_minus);
794 
795 		/* Reset latency */
796 		lll->latency_event = 0U;
797 	}
798 
799 	/* Reset supervision countdown */
800 	if (done->extra.crc_valid) {
801 		sync_iso->timeout_expire = 0U;
802 	} else {
803 		/* if anchor point not sync-ed, start timeout countdown */
804 		if (!sync_iso->timeout_expire) {
805 			sync_iso->timeout_expire = sync_iso->timeout_reload;
806 		}
807 	}
808 
809 	/* check timeout */
810 	force = 0U;
811 	if (sync_iso->timeout_expire) {
812 		if (sync_iso->timeout_expire > elapsed_event) {
813 			sync_iso->timeout_expire -= elapsed_event;
814 
815 			/* break skip */
816 			lll->latency_event = 0U;
817 
818 			if (latency_event) {
819 				force = 1U;
820 			}
821 		} else {
822 			timeout_cleanup(sync_iso);
823 
824 			return;
825 		}
826 	}
827 
828 	/* check if skip needs update */
829 	lazy = 0U;
830 	if (force || (latency_event != lll->latency_event)) {
831 		lazy = lll->latency_event + 1U;
832 	}
833 
834 	/* Update Sync ticker instance */
835 	if (ticks_drift_plus || ticks_drift_minus || lazy || force) {
836 		uint8_t handle = sync_iso_handle_get(sync_iso);
837 		uint32_t ticker_status;
838 
839 		/* Call to ticker_update can fail under the race
840 		 * condition where in the periodic sync role is being stopped
841 		 * but at the same time it is preempted by periodic sync event
842 		 * that gets into close state. Accept failure when periodic sync
843 		 * role is being stopped.
844 		 */
845 		ticker_status = ticker_update(TICKER_INSTANCE_ID_CTLR,
846 					      TICKER_USER_ID_ULL_HIGH,
847 					      (TICKER_ID_SCAN_SYNC_ISO_BASE +
848 					       sync_iso_handle_to_index(handle)),
849 					      ticks_drift_plus,
850 					      ticks_drift_minus, 0U, 0U,
851 					      lazy, force,
852 					      ticker_update_op_cb,
853 					      sync_iso);
854 		LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
855 			  (ticker_status == TICKER_STATUS_BUSY) ||
856 			  ((void *)sync_iso == ull_disable_mark_get()));
857 	}
858 }
859 
ull_sync_iso_done_terminate(struct node_rx_event_done * done)860 void ull_sync_iso_done_terminate(struct node_rx_event_done *done)
861 {
862 	struct ll_sync_iso_set *sync_iso;
863 	struct lll_sync_iso *lll;
864 	struct node_rx_pdu *rx;
865 
866 	/* Get reference to ULL context */
867 	sync_iso = CONTAINER_OF(done->param, struct ll_sync_iso_set, ull);
868 	lll = &sync_iso->lll;
869 
870 	/* Populate the Sync Lost which will be enqueued in disabled_cb */
871 	rx = (void *)&sync_iso->node_rx_lost;
872 	rx->hdr.handle = sync_iso_handle_get(sync_iso);
873 	rx->hdr.type = NODE_RX_TYPE_SYNC_ISO_LOST;
874 	rx->rx_ftr.param = sync_iso;
875 	*((uint8_t *)rx->pdu) = lll->term_reason;
876 
877 	/* Stop Sync ISO Ticker */
878 	stop_ticker(sync_iso, ticker_stop_op_cb);
879 }
880 
disable(uint8_t sync_idx)881 static void disable(uint8_t sync_idx)
882 {
883 	struct ll_sync_iso_set *sync_iso;
884 	int err;
885 
886 	sync_iso = &ll_sync_iso[sync_idx];
887 
888 	/* Stop any active resume ticker */
889 	(void)ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
890 			  TICKER_ID_SCAN_SYNC_ISO_RESUME_BASE + sync_idx,
891 			  NULL, NULL);
892 
893 	err = ull_ticker_stop_with_mark(TICKER_ID_SCAN_SYNC_ISO_BASE +
894 					sync_idx, sync_iso, &sync_iso->lll);
895 	LL_ASSERT_INFO2(err == 0 || err == -EALREADY, sync_idx, err);
896 }
897 
init_reset(void)898 static int init_reset(void)
899 {
900 	uint8_t idx;
901 
902 	/* Disable all active BIGs (uses blocking ull_ticker_stop_with_mark) */
903 	for (idx = 0U; idx < CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET; idx++) {
904 		disable(idx);
905 	}
906 
907 	mem_init((void *)stream_pool, sizeof(struct lll_sync_iso_stream),
908 		 CONFIG_BT_CTLR_SYNC_ISO_STREAM_COUNT, &stream_free);
909 
910 	memset(&ll_sync_iso, 0, sizeof(ll_sync_iso));
911 
912 	/* Initialize LLL */
913 	return lll_sync_iso_init();
914 }
915 
sync_iso_get(uint8_t handle)916 static struct ll_sync_iso_set *sync_iso_get(uint8_t handle)
917 {
918 	for (uint8_t idx = 0; idx < CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET; idx++) {
919 		if (ll_sync_iso[idx].sync && ll_sync_iso[idx].big_handle == handle) {
920 			return &ll_sync_iso[idx];
921 		}
922 	}
923 
924 	return NULL;
925 }
926 
sync_iso_alloc(uint8_t handle)927 static struct ll_sync_iso_set *sync_iso_alloc(uint8_t handle)
928 {
929 	for (uint8_t idx = 0; idx < CONFIG_BT_CTLR_SCAN_SYNC_ISO_SET; idx++) {
930 		if (!ll_sync_iso[idx].sync) {
931 			ll_sync_iso[idx].big_handle = handle;
932 			return &ll_sync_iso[idx];
933 		}
934 	}
935 
936 	return NULL;
937 }
938 
sync_iso_handle_get(struct ll_sync_iso_set * sync)939 static uint8_t sync_iso_handle_get(struct ll_sync_iso_set *sync)
940 {
941 	return sync->big_handle;
942 }
943 
sync_iso_handle_to_index(uint8_t handle)944 static uint8_t sync_iso_handle_to_index(uint8_t handle)
945 {
946 	return ARRAY_INDEX(ll_sync_iso, sync_iso_get(handle));
947 }
948 
sync_iso_stream_acquire(void)949 static struct stream *sync_iso_stream_acquire(void)
950 {
951 	return mem_acquire(&stream_free);
952 }
953 
sync_iso_stream_handle_get(struct lll_sync_iso_stream * stream)954 static uint16_t sync_iso_stream_handle_get(struct lll_sync_iso_stream *stream)
955 {
956 	return mem_index_get(stream, stream_pool, sizeof(*stream));
957 }
958 
timeout_cleanup(struct ll_sync_iso_set * sync_iso)959 static void timeout_cleanup(struct ll_sync_iso_set *sync_iso)
960 {
961 	struct node_rx_pdu *rx;
962 
963 	/* Populate the Sync Lost which will be enqueued in disabled_cb */
964 	rx = (void *)&sync_iso->node_rx_lost;
965 	rx->hdr.handle = sync_iso_handle_get(sync_iso);
966 	rx->rx_ftr.param = sync_iso;
967 
968 	if (mfy_lll_prepare.fp == lll_sync_iso_prepare) {
969 		rx->hdr.type = NODE_RX_TYPE_SYNC_ISO_LOST;
970 		*((uint8_t *)rx->pdu) = BT_HCI_ERR_CONN_TIMEOUT;
971 	} else {
972 		rx->hdr.type = NODE_RX_TYPE_SYNC_ISO;
973 		*((uint8_t *)rx->pdu) = BT_HCI_ERR_CONN_FAIL_TO_ESTAB;
974 	}
975 
976 	/* Stop Sync ISO Ticker */
977 	stop_ticker(sync_iso, ticker_stop_op_cb);
978 }
979 
ticker_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)980 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
981 		      uint32_t remainder, uint16_t lazy, uint8_t force,
982 		      void *param)
983 {
984 	static struct lll_prepare_param p;
985 	struct ll_sync_iso_set *sync_iso;
986 	struct lll_sync_iso *lll;
987 	uint32_t ret;
988 	uint8_t ref;
989 
990 	DEBUG_RADIO_PREPARE_O(1);
991 
992 	if (!IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT) &&
993 	    !IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) &&
994 	    (lazy == TICKER_LAZY_MUST_EXPIRE)) {
995 		/* FIXME: generate ISO PDU with status set to invalid */
996 
997 		DEBUG_RADIO_PREPARE_O(0);
998 		return;
999 	}
1000 
1001 	sync_iso = param;
1002 	lll = &sync_iso->lll;
1003 
1004 	/* Increment prepare reference count */
1005 	ref = ull_ref_inc(&sync_iso->ull);
1006 	LL_ASSERT(ref);
1007 
1008 	/* Append timing parameters */
1009 	p.ticks_at_expire = ticks_at_expire;
1010 	p.remainder = remainder;
1011 	p.lazy = lazy;
1012 	p.force = force;
1013 	p.param = lll;
1014 	mfy_lll_prepare.param = &p;
1015 
1016 	/* Kick LLL prepare */
1017 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL, 0U,
1018 			     &mfy_lll_prepare);
1019 	LL_ASSERT(!ret);
1020 
1021 	DEBUG_RADIO_PREPARE_O(1);
1022 }
1023 
ticker_start_op_cb(uint32_t status,void * param)1024 static void ticker_start_op_cb(uint32_t status, void *param)
1025 {
1026 	ARG_UNUSED(param);
1027 
1028 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1029 }
1030 
ticker_update_op_cb(uint32_t status,void * param)1031 static void ticker_update_op_cb(uint32_t status, void *param)
1032 {
1033 	LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
1034 		  param == ull_disable_mark_get());
1035 }
1036 
ticker_stop_op_cb(uint32_t status,void * param)1037 static void ticker_stop_op_cb(uint32_t status, void *param)
1038 {
1039 	static memq_link_t link;
1040 	static struct mayfly mfy = {0U, 0U, &link, NULL, sync_iso_disable};
1041 	uint32_t ret;
1042 
1043 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1044 
1045 	/* Check if any pending LLL events that need to be aborted */
1046 	mfy.param = param;
1047 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
1048 			     TICKER_USER_ID_ULL_HIGH, 0U, &mfy);
1049 	LL_ASSERT(!ret);
1050 }
1051 
sync_iso_disable(void * param)1052 static void sync_iso_disable(void *param)
1053 {
1054 	struct ll_sync_iso_set *sync_iso;
1055 	struct ull_hdr *hdr;
1056 
1057 	/* Check ref count to determine if any pending LLL events in pipeline */
1058 	sync_iso = param;
1059 	hdr = &sync_iso->ull;
1060 	if (ull_ref_get(hdr)) {
1061 		static memq_link_t link;
1062 		static struct mayfly mfy = {0U, 0U, &link, NULL, lll_disable};
1063 		uint32_t ret;
1064 
1065 		mfy.param = &sync_iso->lll;
1066 
1067 		/* Setup disabled callback to be called when ref count
1068 		 * returns to zero.
1069 		 */
1070 		LL_ASSERT(!hdr->disabled_cb);
1071 		hdr->disabled_param = mfy.param;
1072 		hdr->disabled_cb = disabled_cb;
1073 
1074 		/* Trigger LLL disable */
1075 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1076 				     TICKER_USER_ID_LLL, 0U, &mfy);
1077 		LL_ASSERT(!ret);
1078 	} else {
1079 		/* No pending LLL events */
1080 		disabled_cb(&sync_iso->lll);
1081 	}
1082 }
1083 
lll_flush(void * param)1084 static void lll_flush(void *param)
1085 {
1086 	struct ll_sync_iso_set *sync_iso;
1087 	uint8_t handle;
1088 
1089 	/* Get reference to ULL context */
1090 	sync_iso = HDR_LLL2ULL(param);
1091 	handle = sync_iso_handle_get(sync_iso);
1092 
1093 	lll_sync_iso_flush(handle, param);
1094 
1095 	if (sync_iso->flush_sem) {
1096 		k_sem_give(sync_iso->flush_sem);
1097 	}
1098 }
1099 
disabled_cb(void * param)1100 static void disabled_cb(void *param)
1101 {
1102 	static memq_link_t mfy_link;
1103 	static struct mayfly mfy = {0U, 0U, &mfy_link, NULL, lll_flush};
1104 	struct ll_sync_iso_set *sync_iso;
1105 	struct node_rx_pdu *rx;
1106 	memq_link_t *link;
1107 	uint32_t ret;
1108 
1109 	/* Get reference to ULL context */
1110 	sync_iso = HDR_LLL2ULL(param);
1111 
1112 	/* Generate BIG sync lost */
1113 	rx = (void *)&sync_iso->node_rx_lost;
1114 	LL_ASSERT(rx->hdr.link);
1115 	link = rx->hdr.link;
1116 	rx->hdr.link = NULL;
1117 
1118 	/* Enqueue the BIG sync lost towards ULL context */
1119 	ll_rx_put_sched(link, rx);
1120 
1121 	mfy.param = param;
1122 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1123 			     TICKER_USER_ID_LLL, 0U, &mfy);
1124 	LL_ASSERT(!ret);
1125 }
1126 
stop_ticker(struct ll_sync_iso_set * sync_iso,ticker_op_func fp_op_func)1127 static void stop_ticker(struct ll_sync_iso_set *sync_iso, ticker_op_func fp_op_func)
1128 {
1129 
1130 	uint8_t handle;
1131 	uint32_t ret;
1132 
1133 	handle = sync_iso_handle_get(sync_iso);
1134 
1135 	(void)ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
1136 			  TICKER_ID_SCAN_SYNC_ISO_RESUME_BASE +
1137 			  sync_iso_handle_to_index(handle), NULL, NULL);
1138 
1139 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
1140 			  TICKER_ID_SCAN_SYNC_ISO_BASE +
1141 			  sync_iso_handle_to_index(handle),
1142 			  fp_op_func, fp_op_func ? (void *)sync_iso : NULL);
1143 
1144 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1145 		  (ret == TICKER_STATUS_BUSY));
1146 }
1147