1 /*
2  * Copyright (c) 2018-2019 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <soc.h>
9 #include <zephyr/bluetooth/hci_types.h>
10 #include <zephyr/sys/byteorder.h>
11 
12 #include "util/util.h"
13 #include "util/memq.h"
14 #include "util/mem.h"
15 #include "util/mayfly.h"
16 #include "util/dbuf.h"
17 
18 #include "hal/cpu.h"
19 #include "hal/ccm.h"
20 #include "hal/radio.h"
21 #include "hal/ticker.h"
22 
23 #include "ticker/ticker.h"
24 
25 #include "pdu_df.h"
26 #include "lll/pdu_vendor.h"
27 #include "pdu.h"
28 
29 #include "lll.h"
30 #include "lll_clock.h"
31 #include "lll/lll_vendor.h"
32 #include "lll/lll_adv_types.h"
33 #include "lll_adv.h"
34 #include "lll/lll_adv_pdu.h"
35 #include "lll_chan.h"
36 #include "lll/lll_df_types.h"
37 #include "lll_conn.h"
38 #include "lll_peripheral.h"
39 #include "lll_filter.h"
40 #include "lll_conn_iso.h"
41 
42 #include "ll_sw/ull_tx_queue.h"
43 
44 #include "ull_adv_types.h"
45 #include "ull_conn_types.h"
46 #include "ull_filter.h"
47 
48 #include "ull_internal.h"
49 #include "ull_adv_internal.h"
50 #include "ull_conn_internal.h"
51 #include "ull_peripheral_internal.h"
52 
53 #include "ll.h"
54 
55 #include "ll_sw/isoal.h"
56 #include "ll_sw/ull_iso_types.h"
57 #include "ll_sw/ull_conn_iso_types.h"
58 
59 #include "ll_sw/ull_llcp.h"
60 
61 #include "hal/debug.h"
62 
63 static void invalid_release(struct ull_hdr *hdr, struct lll_conn *lll,
64 			    memq_link_t *link, struct node_rx_pdu *rx);
65 static void ticker_op_stop_adv_cb(uint32_t status, void *param);
66 static void ticker_op_cb(uint32_t status, void *param);
67 static void ticker_update_latency_cancel_op_cb(uint32_t ticker_status,
68 					       void *param);
69 
ull_periph_setup(struct node_rx_pdu * rx,struct node_rx_ftr * ftr,struct lll_conn * lll)70 void ull_periph_setup(struct node_rx_pdu *rx, struct node_rx_ftr *ftr,
71 		     struct lll_conn *lll)
72 {
73 	uint32_t conn_offset_us, conn_interval_us;
74 	uint8_t ticker_id_adv, ticker_id_conn;
75 	uint8_t peer_id_addr[BDADDR_SIZE];
76 	uint8_t peer_addr[BDADDR_SIZE];
77 	uint32_t ticks_slot_overhead;
78 	uint32_t ticks_slot_offset;
79 	uint32_t ready_delay_us;
80 	struct pdu_adv *pdu_adv;
81 	struct ll_adv_set *adv;
82 	uint32_t ticker_status;
83 	uint8_t peer_addr_type;
84 	uint32_t ticks_at_stop;
85 	uint16_t win_delay_us;
86 	struct node_rx_cc *cc;
87 	struct ll_conn *conn;
88 	uint16_t max_tx_time;
89 	uint16_t max_rx_time;
90 	uint16_t win_offset;
91 	memq_link_t *link;
92 	uint32_t slot_us;
93 	uint8_t chan_sel;
94 	void *node;
95 
96 	adv = ((struct lll_adv *)ftr->param)->hdr.parent;
97 	conn = lll->hdr.parent;
98 
99 	/* Populate the peripheral context */
100 	pdu_adv = (void *)rx->pdu;
101 
102 	peer_addr_type = pdu_adv->tx_addr;
103 	memcpy(peer_addr, pdu_adv->connect_ind.init_addr, BDADDR_SIZE);
104 
105 #if defined(CONFIG_BT_CTLR_PRIVACY)
106 	uint8_t rl_idx = ftr->rl_idx;
107 
108 	if (rl_idx != FILTER_IDX_NONE) {
109 		/* Get identity address */
110 		ll_rl_id_addr_get(rl_idx, &peer_addr_type, peer_id_addr);
111 		/* Mark it as identity address from RPA (0x02, 0x03) */
112 		MARK_AS_IDENTITY_ADDR(peer_addr_type);
113 	} else {
114 #else /* CONFIG_BT_CTLR_PRIVACY */
115 	if (1) {
116 #endif /* CONFIG_BT_CTLR_PRIVACY */
117 		memcpy(peer_id_addr, peer_addr, BDADDR_SIZE);
118 	}
119 
120 	/* Use the link stored in the node rx to enqueue connection
121 	 * complete node rx towards LL context.
122 	 */
123 	link = rx->hdr.link;
124 
125 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
126 	const uint8_t peer_id_addr_type = (peer_addr_type & 0x01);
127 	const uint8_t own_id_addr_type = pdu_adv->rx_addr;
128 	const uint8_t *own_id_addr = adv->own_id_addr;
129 
130 	/* Do not connect twice to the same peer */
131 	if (ull_conn_peer_connected(own_id_addr_type, own_id_addr,
132 				    peer_id_addr_type, peer_id_addr)) {
133 		invalid_release(&adv->ull, lll, link, rx);
134 
135 		return;
136 	}
137 
138 	/* Remember peer and own identity address */
139 	conn->peer_id_addr_type = peer_id_addr_type;
140 	(void)memcpy(conn->peer_id_addr, peer_id_addr,
141 		     sizeof(conn->peer_id_addr));
142 	conn->own_id_addr_type = own_id_addr_type;
143 	(void)memcpy(conn->own_id_addr, own_id_addr,
144 		     sizeof(conn->own_id_addr));
145 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
146 
147 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
148 	/* Set default PAST parameters */
149 	conn->past = ull_conn_default_past_param_get();
150 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
151 
152 	memcpy(&lll->crc_init[0], &pdu_adv->connect_ind.crc_init[0], 3);
153 	memcpy(&lll->access_addr[0], &pdu_adv->connect_ind.access_addr[0], 4);
154 	memcpy(&lll->data_chan_map[0], &pdu_adv->connect_ind.chan_map[0],
155 	       sizeof(lll->data_chan_map));
156 	lll->data_chan_count = util_ones_count_get(&lll->data_chan_map[0],
157 			       sizeof(lll->data_chan_map));
158 	lll->data_chan_hop = pdu_adv->connect_ind.hop;
159 	lll->interval = sys_le16_to_cpu(pdu_adv->connect_ind.interval);
160 	if ((lll->data_chan_count < CHM_USED_COUNT_MIN) ||
161 	    (lll->data_chan_hop < CHM_HOP_COUNT_MIN) ||
162 	    (lll->data_chan_hop > CHM_HOP_COUNT_MAX) ||
163 	    !lll->interval) {
164 		invalid_release(&adv->ull, lll, link, rx);
165 
166 		return;
167 	}
168 
169 	((struct lll_adv *)ftr->param)->conn = NULL;
170 
171 	lll->latency = sys_le16_to_cpu(pdu_adv->connect_ind.latency);
172 
173 	win_offset = sys_le16_to_cpu(pdu_adv->connect_ind.win_offset);
174 	conn_interval_us = lll->interval * CONN_INT_UNIT_US;
175 
176 	/* transmitWindowDelay to default calculated connection offset:
177 	 * 1.25ms for a legacy PDU, 2.5ms for an LE Uncoded PHY and 3.75ms for
178 	 * an LE Coded PHY.
179 	 */
180 	if (0) {
181 #if defined(CONFIG_BT_CTLR_ADV_EXT)
182 	} else if (adv->lll.aux) {
183 		if (adv->lll.phy_s & PHY_CODED) {
184 			win_delay_us = WIN_DELAY_CODED;
185 		} else {
186 			win_delay_us = WIN_DELAY_UNCODED;
187 		}
188 #endif
189 	} else {
190 		win_delay_us = WIN_DELAY_LEGACY;
191 	}
192 
193 	/* Set LLCP as connection-wise connected */
194 	ull_cp_state_set(conn, ULL_CP_CONNECTED);
195 
196 	/* calculate the window widening */
197 	conn->periph.sca = pdu_adv->connect_ind.sca;
198 	lll->periph.window_widening_periodic_us =
199 		DIV_ROUND_UP(((lll_clock_ppm_local_get() +
200 				   lll_clock_ppm_get(conn->periph.sca)) *
201 				  conn_interval_us), USEC_PER_SEC);
202 	lll->periph.window_widening_max_us = (conn_interval_us >> 1) -
203 					    EVENT_IFS_US;
204 	lll->periph.window_size_event_us = pdu_adv->connect_ind.win_size *
205 		CONN_INT_UNIT_US;
206 
207 	/* procedure timeouts */
208 	conn->supervision_timeout = sys_le16_to_cpu(pdu_adv->connect_ind.timeout);
209 
210 	/* Setup the PRT reload */
211 	ull_cp_prt_reload_set(conn, conn_interval_us);
212 
213 #if defined(CONFIG_BT_CTLR_CONN_ISO)
214 	uint16_t conn_accept_timeout;
215 
216 	(void)ll_conn_iso_accept_timeout_get(&conn_accept_timeout);
217 	conn->connect_accept_to = conn_accept_timeout * 625U;
218 #else
219 	conn->connect_accept_to = DEFAULT_CONNECTION_ACCEPT_TIMEOUT_US;
220 #endif /* CONFIG_BT_CTLR_CONN_ISO */
221 
222 #if defined(CONFIG_BT_CTLR_LE_PING)
223 	/* APTO in no. of connection events */
224 	conn->apto_reload = RADIO_CONN_EVENTS((30 * 1000 * 1000),
225 					      conn_interval_us);
226 	/* Dispatch LE Ping PDU 6 connection events (that peer would
227 	 * listen to) before 30s timeout
228 	 * TODO: "peer listens to" is greater than 30s due to latency
229 	 */
230 	conn->appto_reload = (conn->apto_reload > (lll->latency + 6)) ?
231 			     (conn->apto_reload - (lll->latency + 6)) :
232 			     conn->apto_reload;
233 #endif /* CONFIG_BT_CTLR_LE_PING */
234 
235 #if defined(CONFIG_BT_CTLR_CONN_RANDOM_FORCE)
236 	memcpy((void *)&conn->periph.force, &lll->access_addr[0],
237 	       sizeof(conn->periph.force));
238 #endif /* CONFIG_BT_CTLR_CONN_RANDOM_FORCE */
239 
240 	if (0) {
241 #if defined(CONFIG_BT_CTLR_ADV_EXT)
242 	} else if (adv->lll.aux) {
243 		chan_sel = 1U;
244 #endif
245 	} else {
246 		chan_sel = pdu_adv->chan_sel;
247 	}
248 
249 	/* Check for pdu field being aligned before populating connection
250 	 * complete event.
251 	 */
252 	node = pdu_adv;
253 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_cc));
254 
255 	/* Populate the fields required for connection complete event */
256 	cc = node;
257 	cc->status = 0U;
258 	cc->role = 1U;
259 
260 #if defined(CONFIG_BT_CTLR_PRIVACY)
261 	if (ull_filter_lll_lrpa_used(adv->lll.rl_idx)) {
262 		memcpy(&cc->local_rpa[0], &pdu_adv->connect_ind.adv_addr[0],
263 		       BDADDR_SIZE);
264 	} else {
265 		memset(&cc->local_rpa[0], 0x0, BDADDR_SIZE);
266 	}
267 
268 	if (rl_idx != FILTER_IDX_NONE) {
269 		/* Store peer RPA */
270 		memcpy(cc->peer_rpa, peer_addr, BDADDR_SIZE);
271 	} else {
272 		memset(cc->peer_rpa, 0x0, BDADDR_SIZE);
273 	}
274 #endif /* CONFIG_BT_CTLR_PRIVACY */
275 
276 	cc->peer_addr_type = peer_addr_type;
277 	memcpy(cc->peer_addr, peer_id_addr, BDADDR_SIZE);
278 
279 	cc->interval = lll->interval;
280 	cc->latency = lll->latency;
281 	cc->timeout = conn->supervision_timeout;
282 	cc->sca = conn->periph.sca;
283 
284 	lll->handle = ll_conn_handle_get(conn);
285 	rx->hdr.handle = lll->handle;
286 
287 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
288 	lll->tx_pwr_lvl = RADIO_TXP_DEFAULT;
289 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
290 
291 	/* Use Channel Selection Algorithm #2 if peer too supports it */
292 	if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
293 		struct node_rx_pdu *rx_csa;
294 		struct node_rx_cs *cs;
295 
296 		/* pick the rx node instance stored within the connection
297 		 * rx node.
298 		 */
299 		rx_csa = (void *)ftr->extra;
300 
301 		/* Enqueue the connection event */
302 		ll_rx_put(link, rx);
303 
304 		/* use the rx node for CSA event */
305 		rx = rx_csa;
306 		link = rx->hdr.link;
307 
308 		rx->hdr.handle = lll->handle;
309 		rx->hdr.type = NODE_RX_TYPE_CHAN_SEL_ALGO;
310 
311 		cs = (void *)rx_csa->pdu;
312 
313 		if (chan_sel) {
314 			lll->data_chan_sel = 1;
315 			lll->data_chan_id = lll_chan_id(lll->access_addr);
316 
317 			cs->csa = 0x01;
318 		} else {
319 			cs->csa = 0x00;
320 		}
321 	}
322 
323 #if defined(CONFIG_BT_CTLR_ADV_EXT)
324 	if (ll_adv_cmds_is_ext()) {
325 		uint8_t handle;
326 
327 		/* Enqueue connection or CSA event */
328 		ll_rx_put(link, rx);
329 
330 		/* use reserved link and node_rx to prepare
331 		 * advertising terminate event
332 		 */
333 		rx = adv->lll.node_rx_adv_term;
334 		link = rx->hdr.link;
335 
336 		handle = ull_adv_handle_get(adv);
337 		LL_ASSERT(handle < BT_CTLR_ADV_SET);
338 
339 		rx->hdr.type = NODE_RX_TYPE_EXT_ADV_TERMINATE;
340 		rx->hdr.handle = handle;
341 		rx->rx_ftr.param_adv_term.status = 0U;
342 		rx->rx_ftr.param_adv_term.conn_handle = lll->handle;
343 		rx->rx_ftr.param_adv_term.num_events = 0U;
344 	}
345 #endif
346 
347 	ll_rx_put_sched(link, rx);
348 
349 #if defined(CONFIG_BT_CTLR_PERIPHERAL_RESERVE_MAX)
350 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
351 #if defined(CONFIG_BT_CTLR_PHY)
352 	max_tx_time = lll->dle.eff.max_tx_time;
353 	max_rx_time = lll->dle.eff.max_rx_time;
354 
355 #else /* !CONFIG_BT_CTLR_PHY */
356 	max_tx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
357 	max_rx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
358 #endif /* !CONFIG_BT_CTLR_PHY */
359 
360 #else /* !CONFIG_BT_CTLR_DATA_LENGTH */
361 	max_tx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
362 	max_rx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
363 
364 #if defined(CONFIG_BT_CTLR_PHY)
365 	max_tx_time = MAX(max_tx_time,
366 			  PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy_tx));
367 	max_rx_time = MAX(max_rx_time,
368 			  PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy_rx));
369 #endif /* CONFIG_BT_CTLR_PHY */
370 #endif /* !CONFIG_BT_CTLR_DATA_LENGTH */
371 
372 #else /* !CONFIG_BT_CTLR_PERIPHERAL_RESERVE_MAX */
373 #if defined(CONFIG_BT_CTLR_PHY)
374 	max_tx_time = PDU_MAX_US(0U, 0U, lll->phy_tx);
375 	max_rx_time = PDU_MAX_US(0U, 0U, lll->phy_rx);
376 
377 #else /* !CONFIG_BT_CTLR_PHY */
378 	max_tx_time = PDU_MAX_US(0U, 0U, PHY_1M);
379 	max_rx_time = PDU_MAX_US(0U, 0U, PHY_1M);
380 #endif /* !CONFIG_BT_CTLR_PHY */
381 #endif /* !CONFIG_BT_CTLR_PERIPHERAL_RESERVE_MAX */
382 
383 #if defined(CONFIG_BT_CTLR_PHY)
384 	ready_delay_us = lll_radio_rx_ready_delay_get(lll->phy_rx, PHY_FLAGS_S8);
385 #else /* CONFIG_BT_CTLR_PHY */
386 	ready_delay_us = lll_radio_rx_ready_delay_get(0U, 0U);
387 #endif /* CONFIG_BT_CTLR_PHY */
388 
389 	lll->tifs_tx_us = EVENT_IFS_DEFAULT_US;
390 	lll->tifs_rx_us = EVENT_IFS_DEFAULT_US;
391 	lll->tifs_hcto_us = EVENT_IFS_DEFAULT_US;
392 	lll->tifs_cis_us = EVENT_IFS_DEFAULT_US;
393 
394 	/* Calculate event time reservation */
395 	slot_us = max_rx_time + max_tx_time;
396 	slot_us += lll->tifs_rx_us + (EVENT_CLOCK_JITTER_US << 1);
397 	slot_us += ready_delay_us;
398 
399 	if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX)) {
400 		slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
401 	}
402 
403 	/* TODO: active_to_start feature port */
404 	conn->ull.ticks_active_to_start = 0U;
405 	conn->ull.ticks_prepare_to_start =
406 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
407 	conn->ull.ticks_preempt_to_start =
408 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
409 	conn->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
410 
411 	ticks_slot_offset = MAX(conn->ull.ticks_active_to_start,
412 				conn->ull.ticks_prepare_to_start);
413 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
414 		ticks_slot_overhead = ticks_slot_offset;
415 	} else {
416 		ticks_slot_overhead = 0U;
417 	}
418 	ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
419 
420 	conn_interval_us -= lll->periph.window_widening_periodic_us;
421 
422 	conn_offset_us = ftr->radio_end_us;
423 	conn_offset_us += win_offset * CONN_INT_UNIT_US;
424 	conn_offset_us += win_delay_us;
425 	conn_offset_us -= EVENT_TICKER_RES_MARGIN_US;
426 	conn_offset_us -= EVENT_JITTER_US;
427 	conn_offset_us -= ready_delay_us;
428 
429 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
430 	/* disable ticker job, in order to chain stop and start to avoid RTC
431 	 * being stopped if no tickers active.
432 	 */
433 	mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 0);
434 #endif
435 
436 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
437 	struct lll_adv_aux *lll_aux = adv->lll.aux;
438 
439 	if (lll_aux) {
440 		struct ll_adv_aux_set *aux;
441 
442 		aux = HDR_LLL2ULL(lll_aux);
443 
444 		ticker_id_adv = TICKER_ID_ADV_AUX_BASE +
445 				ull_adv_aux_handle_get(aux);
446 		ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR,
447 					    TICKER_USER_ID_ULL_HIGH,
448 					    ticker_id_adv,
449 					    ticker_op_stop_adv_cb, aux);
450 		ticker_op_stop_adv_cb(ticker_status, aux);
451 
452 		aux->is_started = 0U;
453 	}
454 #endif
455 
456 	/* Stop Advertiser */
457 	ticker_id_adv = TICKER_ID_ADV_BASE + ull_adv_handle_get(adv);
458 	ticks_at_stop = ftr->ticks_anchor +
459 			HAL_TICKER_US_TO_TICKS(conn_offset_us) -
460 			ticks_slot_offset;
461 	ticker_status = ticker_stop_abs(TICKER_INSTANCE_ID_CTLR,
462 					TICKER_USER_ID_ULL_HIGH,
463 					ticker_id_adv, ticks_at_stop,
464 					ticker_op_stop_adv_cb, adv);
465 	ticker_op_stop_adv_cb(ticker_status, adv);
466 
467 	/* Stop Direct Adv Stop */
468 	if (adv->lll.is_hdcd) {
469 		/* Advertiser stop can expire while here in this ISR.
470 		 * Deferred attempt to stop can fail as it would have
471 		 * expired, hence ignore failure.
472 		 */
473 		(void)ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
474 				  TICKER_ID_ADV_STOP, NULL, NULL);
475 	}
476 
477 	/* Start Peripheral */
478 	ticker_id_conn = TICKER_ID_CONN_BASE + ll_conn_handle_get(conn);
479 	ticker_status = ticker_start(TICKER_INSTANCE_ID_CTLR,
480 				     TICKER_USER_ID_ULL_HIGH,
481 				     ticker_id_conn,
482 				     ftr->ticks_anchor - ticks_slot_offset,
483 				     HAL_TICKER_US_TO_TICKS(conn_offset_us),
484 				     HAL_TICKER_US_TO_TICKS(conn_interval_us),
485 				     HAL_TICKER_REMAINDER(conn_interval_us),
486 				     TICKER_NULL_LAZY,
487 				     (conn->ull.ticks_slot +
488 				      ticks_slot_overhead),
489 				     ull_periph_ticker_cb, conn, ticker_op_cb,
490 				     (void *)__LINE__);
491 	LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
492 		  (ticker_status == TICKER_STATUS_BUSY));
493 
494 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
495 	/* enable ticker job, irrespective of disabled in this function so
496 	 * first connection event can be scheduled as soon as possible.
497 	 */
498 	mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1);
499 #endif
500 }
501 
502 void ull_periph_latency_cancel(struct ll_conn *conn, uint16_t handle)
503 {
504 	/* break peripheral latency */
505 	if (conn->lll.latency_event && !conn->periph.latency_cancel) {
506 		uint32_t ticker_status;
507 
508 		conn->periph.latency_cancel = 1U;
509 
510 		ticker_status =
511 			ticker_update(TICKER_INSTANCE_ID_CTLR,
512 				      TICKER_USER_ID_THREAD,
513 				      (TICKER_ID_CONN_BASE + handle),
514 				      0, 0, 0, 0, 1, 0,
515 				      ticker_update_latency_cancel_op_cb,
516 				      (void *)conn);
517 		LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
518 			  (ticker_status == TICKER_STATUS_BUSY));
519 	}
520 }
521 
522 void ull_periph_ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
523 			 uint32_t remainder, uint16_t lazy, uint8_t force,
524 			 void *param)
525 {
526 	static memq_link_t link;
527 	static struct mayfly mfy = {0, 0, &link, NULL, lll_periph_prepare};
528 	static struct lll_prepare_param p;
529 	struct ll_conn *conn;
530 	uint32_t err;
531 	uint8_t ref;
532 
533 	DEBUG_RADIO_PREPARE_S(1);
534 
535 	conn = param;
536 
537 	/* Check if stopping ticker (on disconnection, race with ticker expiry)
538 	 */
539 	if (unlikely(conn->lll.handle == 0xFFFF)) {
540 		DEBUG_RADIO_CLOSE_S(0);
541 		return;
542 	}
543 
544 #if defined(CONFIG_BT_CTLR_CONN_META)
545 	conn->common.is_must_expire = (lazy == TICKER_LAZY_MUST_EXPIRE);
546 #endif
547 	/* If this is a must-expire callback, LLCP state machine does not need
548 	 * to know. Will be called with lazy > 0 when scheduled in air.
549 	 */
550 	if (!IS_ENABLED(CONFIG_BT_CTLR_CONN_META) ||
551 	    (lazy != TICKER_LAZY_MUST_EXPIRE)) {
552 		int ret;
553 
554 		/* Handle any LL Control Procedures */
555 		ret = ull_conn_llcp(conn, ticks_at_expire, remainder, lazy);
556 		if (ret) {
557 			/* NOTE: Under BT_CTLR_LOW_LAT, ULL_LOW context is
558 			 *       disabled inside radio events, hence, abort any
559 			 *       active radio event which will re-enable
560 			 *       ULL_LOW context that permits ticker job to run.
561 			 */
562 			if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) &&
563 			    (CONFIG_BT_CTLR_LLL_PRIO ==
564 			     CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
565 				ll_radio_state_abort();
566 			}
567 
568 			DEBUG_RADIO_CLOSE_S(0);
569 			return;
570 		}
571 	}
572 
573 	/* Increment prepare reference count */
574 	ref = ull_ref_inc(&conn->ull);
575 	LL_ASSERT(ref);
576 
577 	/* Append timing parameters */
578 	p.ticks_at_expire = ticks_at_expire;
579 	p.remainder = remainder;
580 	p.lazy = lazy;
581 	p.force = force;
582 	p.param = &conn->lll;
583 	mfy.param = &p;
584 
585 	/* Kick LLL prepare */
586 	err = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
587 			     0, &mfy);
588 	LL_ASSERT(!err);
589 
590 	/* De-mux remaining tx nodes from FIFO */
591 	ull_conn_tx_demux(UINT8_MAX);
592 
593 	/* Enqueue towards LLL */
594 	ull_conn_tx_lll_enqueue(conn, UINT8_MAX);
595 
596 	DEBUG_RADIO_PREPARE_S(1);
597 }
598 
599 #if defined(CONFIG_BT_CTLR_LE_ENC)
600 uint8_t ll_start_enc_req_send(uint16_t handle, uint8_t error_code,
601 			    uint8_t const *const ltk)
602 {
603 	struct ll_conn *conn;
604 
605 	conn = ll_connected_get(handle);
606 	if (!conn) {
607 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
608 	}
609 
610 	if (error_code) {
611 		return ull_cp_ltk_req_neq_reply(conn);
612 	} else {
613 		return ull_cp_ltk_req_reply(conn, ltk);
614 	}
615 
616 	return 0;
617 }
618 #endif /* CONFIG_BT_CTLR_LE_ENC */
619 
620 static void invalid_release(struct ull_hdr *hdr, struct lll_conn *lll,
621 			    memq_link_t *link, struct node_rx_pdu *rx)
622 {
623 	/* Reset the advertising disabled callback */
624 	hdr->disabled_cb = NULL;
625 
626 	/* Let the advertiser continue with connectable advertising */
627 	lll->periph.initiated = 0U;
628 
629 	/* Mark for buffer for release */
630 	rx->hdr.type = NODE_RX_TYPE_RELEASE;
631 
632 	/* Release CSA#2 related node rx too */
633 	if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
634 		struct node_rx_pdu *rx_csa;
635 
636 		/* pick the rx node instance stored within the
637 		 * connection rx node.
638 		 */
639 		rx_csa = rx->rx_ftr.extra;
640 
641 		/* Enqueue the connection event to be release */
642 		ll_rx_put(link, rx);
643 
644 		/* Use the rx node for CSA event */
645 		rx = rx_csa;
646 		link = rx->hdr.link;
647 
648 		/* Mark for buffer for release */
649 		rx->hdr.type = NODE_RX_TYPE_RELEASE;
650 	}
651 
652 	/* Enqueue connection or CSA event to be release */
653 	ll_rx_put_sched(link, rx);
654 }
655 
656 static void ticker_op_stop_adv_cb(uint32_t status, void *param)
657 {
658 	LL_ASSERT(status != TICKER_STATUS_FAILURE ||
659 		  param == ull_disable_mark_get());
660 }
661 
662 static void ticker_op_cb(uint32_t status, void *param)
663 {
664 	ARG_UNUSED(param);
665 
666 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
667 }
668 
669 static void ticker_update_latency_cancel_op_cb(uint32_t ticker_status,
670 					       void *param)
671 {
672 	struct ll_conn *conn = param;
673 
674 	LL_ASSERT(ticker_status == TICKER_STATUS_SUCCESS);
675 
676 	conn->periph.latency_cancel = 0U;
677 }
678 
679 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
680 uint8_t ll_set_min_used_chans(uint16_t handle, uint8_t const phys,
681 			      uint8_t const min_used_chans)
682 {
683 	struct ll_conn *conn;
684 
685 	conn = ll_connected_get(handle);
686 	if (!conn) {
687 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
688 	}
689 
690 	if (!conn->lll.role) {
691 		return BT_HCI_ERR_CMD_DISALLOWED;
692 	}
693 
694 	return ull_cp_min_used_chans(conn, phys, min_used_chans);
695 }
696 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */
697