1 /*
2  * Copyright (c) 2018-2019 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <soc.h>
9 #include <zephyr/bluetooth/hci_types.h>
10 #include <zephyr/sys/byteorder.h>
11 
12 #include "util/util.h"
13 #include "util/memq.h"
14 #include "util/mem.h"
15 #include "util/mayfly.h"
16 #include "util/dbuf.h"
17 
18 #include "hal/cpu.h"
19 #include "hal/ccm.h"
20 #include "hal/radio.h"
21 #include "hal/ticker.h"
22 
23 #include "ticker/ticker.h"
24 
25 #include "pdu_df.h"
26 #include "lll/pdu_vendor.h"
27 #include "pdu.h"
28 
29 #include "lll.h"
30 #include "lll_clock.h"
31 #include "lll/lll_vendor.h"
32 #include "lll/lll_adv_types.h"
33 #include "lll_adv.h"
34 #include "lll/lll_adv_pdu.h"
35 #include "lll_chan.h"
36 #include "lll/lll_df_types.h"
37 #include "lll_conn.h"
38 #include "lll_peripheral.h"
39 #include "lll_filter.h"
40 #include "lll_conn_iso.h"
41 
42 #include "ll_sw/ull_tx_queue.h"
43 
44 #include "ull_adv_types.h"
45 #include "ull_conn_types.h"
46 #include "ull_filter.h"
47 
48 #include "ull_internal.h"
49 #include "ull_adv_internal.h"
50 #include "ull_conn_internal.h"
51 #include "ull_peripheral_internal.h"
52 
53 #include "ll.h"
54 
55 #include "ll_sw/isoal.h"
56 #include "ll_sw/ull_iso_types.h"
57 #include "ll_sw/ull_conn_iso_types.h"
58 
59 #include "ll_sw/ull_llcp.h"
60 
61 #include "hal/debug.h"
62 
63 static void invalid_release(struct ull_hdr *hdr, struct lll_conn *lll,
64 			    memq_link_t *link, struct node_rx_pdu *rx);
65 static void ticker_op_stop_adv_cb(uint32_t status, void *param);
66 static void ticker_op_cb(uint32_t status, void *param);
67 static void ticker_update_latency_cancel_op_cb(uint32_t ticker_status,
68 					       void *param);
69 
ull_periph_setup(struct node_rx_pdu * rx,struct node_rx_ftr * ftr,struct lll_conn * lll)70 void ull_periph_setup(struct node_rx_pdu *rx, struct node_rx_ftr *ftr,
71 		     struct lll_conn *lll)
72 {
73 	uint32_t conn_offset_us, conn_interval_us;
74 	uint8_t ticker_id_adv, ticker_id_conn;
75 	uint8_t peer_id_addr[BDADDR_SIZE];
76 	uint8_t peer_addr[BDADDR_SIZE];
77 	uint32_t ticks_slot_overhead;
78 	uint32_t ticks_slot_offset;
79 	uint32_t ready_delay_us;
80 	struct pdu_adv *pdu_adv;
81 	struct ll_adv_set *adv;
82 	uint32_t ticker_status;
83 	uint8_t peer_addr_type;
84 	uint32_t ticks_at_stop;
85 	uint16_t win_delay_us;
86 	struct node_rx_cc *cc;
87 	struct ll_conn *conn;
88 	uint16_t max_tx_time;
89 	uint16_t max_rx_time;
90 	uint16_t win_offset;
91 	memq_link_t *link;
92 	uint32_t slot_us;
93 	uint8_t chan_sel;
94 	void *node;
95 
96 	adv = ((struct lll_adv *)ftr->param)->hdr.parent;
97 	conn = lll->hdr.parent;
98 
99 	/* Populate the peripheral context */
100 	pdu_adv = (void *)rx->pdu;
101 
102 	peer_addr_type = pdu_adv->tx_addr;
103 	memcpy(peer_addr, pdu_adv->connect_ind.init_addr, BDADDR_SIZE);
104 
105 #if defined(CONFIG_BT_CTLR_PRIVACY)
106 	uint8_t rl_idx = ftr->rl_idx;
107 
108 	if (rl_idx != FILTER_IDX_NONE) {
109 		/* Get identity address */
110 		ll_rl_id_addr_get(rl_idx, &peer_addr_type, peer_id_addr);
111 		/* Mark it as identity address from RPA (0x02, 0x03) */
112 		peer_addr_type += 2;
113 	} else {
114 #else /* CONFIG_BT_CTLR_PRIVACY */
115 	if (1) {
116 #endif /* CONFIG_BT_CTLR_PRIVACY */
117 		memcpy(peer_id_addr, peer_addr, BDADDR_SIZE);
118 	}
119 
120 	/* Use the link stored in the node rx to enqueue connection
121 	 * complete node rx towards LL context.
122 	 */
123 	link = rx->hdr.link;
124 
125 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
126 	const uint8_t peer_id_addr_type = (peer_addr_type & 0x01);
127 	const uint8_t own_id_addr_type = pdu_adv->rx_addr;
128 	const uint8_t *own_id_addr = adv->own_id_addr;
129 
130 	/* Do not connect twice to the same peer */
131 	if (ull_conn_peer_connected(own_id_addr_type, own_id_addr,
132 				    peer_id_addr_type, peer_id_addr)) {
133 		invalid_release(&adv->ull, lll, link, rx);
134 
135 		return;
136 	}
137 
138 	/* Remember peer and own identity address */
139 	conn->peer_id_addr_type = peer_id_addr_type;
140 	(void)memcpy(conn->peer_id_addr, peer_id_addr,
141 		     sizeof(conn->peer_id_addr));
142 	conn->own_id_addr_type = own_id_addr_type;
143 	(void)memcpy(conn->own_id_addr, own_id_addr,
144 		     sizeof(conn->own_id_addr));
145 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
146 
147 	memcpy(&lll->crc_init[0], &pdu_adv->connect_ind.crc_init[0], 3);
148 	memcpy(&lll->access_addr[0], &pdu_adv->connect_ind.access_addr[0], 4);
149 	memcpy(&lll->data_chan_map[0], &pdu_adv->connect_ind.chan_map[0],
150 	       sizeof(lll->data_chan_map));
151 	lll->data_chan_count = util_ones_count_get(&lll->data_chan_map[0],
152 			       sizeof(lll->data_chan_map));
153 	lll->data_chan_hop = pdu_adv->connect_ind.hop;
154 	lll->interval = sys_le16_to_cpu(pdu_adv->connect_ind.interval);
155 	if ((lll->data_chan_count < CHM_USED_COUNT_MIN) ||
156 	    (lll->data_chan_hop < CHM_HOP_COUNT_MIN) ||
157 	    (lll->data_chan_hop > CHM_HOP_COUNT_MAX) ||
158 	    !lll->interval) {
159 		invalid_release(&adv->ull, lll, link, rx);
160 
161 		return;
162 	}
163 
164 	((struct lll_adv *)ftr->param)->conn = NULL;
165 
166 	lll->latency = sys_le16_to_cpu(pdu_adv->connect_ind.latency);
167 
168 	win_offset = sys_le16_to_cpu(pdu_adv->connect_ind.win_offset);
169 	conn_interval_us = lll->interval * CONN_INT_UNIT_US;
170 
171 	/* transmitWindowDelay to default calculated connection offset:
172 	 * 1.25ms for a legacy PDU, 2.5ms for an LE Uncoded PHY and 3.75ms for
173 	 * an LE Coded PHY.
174 	 */
175 	if (0) {
176 #if defined(CONFIG_BT_CTLR_ADV_EXT)
177 	} else if (adv->lll.aux) {
178 		if (adv->lll.phy_s & PHY_CODED) {
179 			win_delay_us = WIN_DELAY_CODED;
180 		} else {
181 			win_delay_us = WIN_DELAY_UNCODED;
182 		}
183 #endif
184 	} else {
185 		win_delay_us = WIN_DELAY_LEGACY;
186 	}
187 
188 	/* Set LLCP as connection-wise connected */
189 	ull_cp_state_set(conn, ULL_CP_CONNECTED);
190 
191 	/* calculate the window widening */
192 	conn->periph.sca = pdu_adv->connect_ind.sca;
193 	lll->periph.window_widening_periodic_us =
194 		DIV_ROUND_UP(((lll_clock_ppm_local_get() +
195 				   lll_clock_ppm_get(conn->periph.sca)) *
196 				  conn_interval_us), USEC_PER_SEC);
197 	lll->periph.window_widening_max_us = (conn_interval_us >> 1) -
198 					    EVENT_IFS_US;
199 	lll->periph.window_size_event_us = pdu_adv->connect_ind.win_size *
200 		CONN_INT_UNIT_US;
201 
202 	/* procedure timeouts */
203 	conn->supervision_timeout = sys_le16_to_cpu(pdu_adv->connect_ind.timeout);
204 
205 	/* Setup the PRT reload */
206 	ull_cp_prt_reload_set(conn, conn_interval_us);
207 
208 #if defined(CONFIG_BT_CTLR_CONN_ISO)
209 	uint16_t conn_accept_timeout;
210 
211 	(void)ll_conn_iso_accept_timeout_get(&conn_accept_timeout);
212 	conn->connect_accept_to = conn_accept_timeout * 625U;
213 #else
214 	conn->connect_accept_to = DEFAULT_CONNECTION_ACCEPT_TIMEOUT_US;
215 #endif /* CONFIG_BT_CTLR_CONN_ISO */
216 
217 #if defined(CONFIG_BT_CTLR_LE_PING)
218 	/* APTO in no. of connection events */
219 	conn->apto_reload = RADIO_CONN_EVENTS((30 * 1000 * 1000),
220 					      conn_interval_us);
221 	/* Dispatch LE Ping PDU 6 connection events (that peer would
222 	 * listen to) before 30s timeout
223 	 * TODO: "peer listens to" is greater than 30s due to latency
224 	 */
225 	conn->appto_reload = (conn->apto_reload > (lll->latency + 6)) ?
226 			     (conn->apto_reload - (lll->latency + 6)) :
227 			     conn->apto_reload;
228 #endif /* CONFIG_BT_CTLR_LE_PING */
229 
230 #if defined(CONFIG_BT_CTLR_CONN_RANDOM_FORCE)
231 	memcpy((void *)&conn->periph.force, &lll->access_addr[0],
232 	       sizeof(conn->periph.force));
233 #endif /* CONFIG_BT_CTLR_CONN_RANDOM_FORCE */
234 
235 	if (0) {
236 #if defined(CONFIG_BT_CTLR_ADV_EXT)
237 	} else if (adv->lll.aux) {
238 		chan_sel = 1U;
239 #endif
240 	} else {
241 		chan_sel = pdu_adv->chan_sel;
242 	}
243 
244 	/* Check for pdu field being aligned before populating connection
245 	 * complete event.
246 	 */
247 	node = pdu_adv;
248 	LL_ASSERT(IS_PTR_ALIGNED(node, struct node_rx_cc));
249 
250 	/* Populate the fields required for connection complete event */
251 	cc = node;
252 	cc->status = 0U;
253 	cc->role = 1U;
254 
255 #if defined(CONFIG_BT_CTLR_PRIVACY)
256 	if (ull_filter_lll_lrpa_used(adv->lll.rl_idx)) {
257 		memcpy(&cc->local_rpa[0], &pdu_adv->connect_ind.adv_addr[0],
258 		       BDADDR_SIZE);
259 	} else {
260 		memset(&cc->local_rpa[0], 0x0, BDADDR_SIZE);
261 	}
262 
263 	if (rl_idx != FILTER_IDX_NONE) {
264 		/* Store peer RPA */
265 		memcpy(cc->peer_rpa, peer_addr, BDADDR_SIZE);
266 	} else {
267 		memset(cc->peer_rpa, 0x0, BDADDR_SIZE);
268 	}
269 #endif /* CONFIG_BT_CTLR_PRIVACY */
270 
271 	cc->peer_addr_type = peer_addr_type;
272 	memcpy(cc->peer_addr, peer_id_addr, BDADDR_SIZE);
273 
274 	cc->interval = lll->interval;
275 	cc->latency = lll->latency;
276 	cc->timeout = conn->supervision_timeout;
277 	cc->sca = conn->periph.sca;
278 
279 	lll->handle = ll_conn_handle_get(conn);
280 	rx->hdr.handle = lll->handle;
281 
282 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
283 	lll->tx_pwr_lvl = RADIO_TXP_DEFAULT;
284 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
285 
286 	/* Use Channel Selection Algorithm #2 if peer too supports it */
287 	if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
288 		struct node_rx_pdu *rx_csa;
289 		struct node_rx_cs *cs;
290 
291 		/* pick the rx node instance stored within the connection
292 		 * rx node.
293 		 */
294 		rx_csa = (void *)ftr->extra;
295 
296 		/* Enqueue the connection event */
297 		ll_rx_put(link, rx);
298 
299 		/* use the rx node for CSA event */
300 		rx = rx_csa;
301 		link = rx->hdr.link;
302 
303 		rx->hdr.handle = lll->handle;
304 		rx->hdr.type = NODE_RX_TYPE_CHAN_SEL_ALGO;
305 
306 		cs = (void *)rx_csa->pdu;
307 
308 		if (chan_sel) {
309 			lll->data_chan_sel = 1;
310 			lll->data_chan_id = lll_chan_id(lll->access_addr);
311 
312 			cs->csa = 0x01;
313 		} else {
314 			cs->csa = 0x00;
315 		}
316 	}
317 
318 #if defined(CONFIG_BT_CTLR_ADV_EXT)
319 	if (ll_adv_cmds_is_ext()) {
320 		uint8_t handle;
321 
322 		/* Enqueue connection or CSA event */
323 		ll_rx_put(link, rx);
324 
325 		/* use reserved link and node_rx to prepare
326 		 * advertising terminate event
327 		 */
328 		rx = adv->lll.node_rx_adv_term;
329 		link = rx->hdr.link;
330 
331 		handle = ull_adv_handle_get(adv);
332 		LL_ASSERT(handle < BT_CTLR_ADV_SET);
333 
334 		rx->hdr.type = NODE_RX_TYPE_EXT_ADV_TERMINATE;
335 		rx->hdr.handle = handle;
336 		rx->rx_ftr.param_adv_term.status = 0U;
337 		rx->rx_ftr.param_adv_term.conn_handle = lll->handle;
338 		rx->rx_ftr.param_adv_term.num_events = 0U;
339 	}
340 #endif
341 
342 	ll_rx_put_sched(link, rx);
343 
344 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
345 #if defined(CONFIG_BT_CTLR_PHY)
346 	max_tx_time = lll->dle.eff.max_tx_time;
347 	max_rx_time = lll->dle.eff.max_rx_time;
348 #else /* !CONFIG_BT_CTLR_PHY */
349 	max_tx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
350 	max_rx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
351 #endif /* !CONFIG_BT_CTLR_PHY */
352 #else /* !CONFIG_BT_CTLR_DATA_LENGTH */
353 	max_tx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
354 	max_rx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
355 #if defined(CONFIG_BT_CTLR_PHY)
356 	max_tx_time = MAX(max_tx_time,
357 			  PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy_tx));
358 	max_rx_time = MAX(max_rx_time,
359 			  PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy_rx));
360 #endif /* !CONFIG_BT_CTLR_PHY */
361 #endif /* !CONFIG_BT_CTLR_DATA_LENGTH */
362 
363 #if defined(CONFIG_BT_CTLR_PHY)
364 	ready_delay_us = lll_radio_rx_ready_delay_get(lll->phy_rx, PHY_FLAGS_S8);
365 #else /* CONFIG_BT_CTLR_PHY */
366 	ready_delay_us = lll_radio_rx_ready_delay_get(0U, 0U);
367 #endif /* CONFIG_BT_CTLR_PHY */
368 
369 	/* Calculate event time reservation */
370 	slot_us = max_rx_time + max_tx_time;
371 	slot_us += EVENT_IFS_US + (EVENT_CLOCK_JITTER_US << 1);
372 	slot_us += ready_delay_us;
373 
374 	if (IS_ENABLED(CONFIG_BT_CTLR_EVENT_OVERHEAD_RESERVE_MAX)) {
375 		slot_us += EVENT_OVERHEAD_START_US + EVENT_OVERHEAD_END_US;
376 	}
377 
378 	/* TODO: active_to_start feature port */
379 	conn->ull.ticks_active_to_start = 0U;
380 	conn->ull.ticks_prepare_to_start =
381 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
382 	conn->ull.ticks_preempt_to_start =
383 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
384 	conn->ull.ticks_slot = HAL_TICKER_US_TO_TICKS_CEIL(slot_us);
385 
386 	ticks_slot_offset = MAX(conn->ull.ticks_active_to_start,
387 				conn->ull.ticks_prepare_to_start);
388 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
389 		ticks_slot_overhead = ticks_slot_offset;
390 	} else {
391 		ticks_slot_overhead = 0U;
392 	}
393 	ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
394 
395 	conn_interval_us -= lll->periph.window_widening_periodic_us;
396 
397 	conn_offset_us = ftr->radio_end_us;
398 	conn_offset_us += win_offset * CONN_INT_UNIT_US;
399 	conn_offset_us += win_delay_us;
400 	conn_offset_us -= EVENT_TICKER_RES_MARGIN_US;
401 	conn_offset_us -= EVENT_JITTER_US;
402 	conn_offset_us -= ready_delay_us;
403 
404 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
405 	/* disable ticker job, in order to chain stop and start to avoid RTC
406 	 * being stopped if no tickers active.
407 	 */
408 	mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 0);
409 #endif
410 
411 #if defined(CONFIG_BT_CTLR_ADV_EXT) && (CONFIG_BT_CTLR_ADV_AUX_SET > 0)
412 	struct lll_adv_aux *lll_aux = adv->lll.aux;
413 
414 	if (lll_aux) {
415 		struct ll_adv_aux_set *aux;
416 
417 		aux = HDR_LLL2ULL(lll_aux);
418 
419 		ticker_id_adv = TICKER_ID_ADV_AUX_BASE +
420 				ull_adv_aux_handle_get(aux);
421 		ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR,
422 					    TICKER_USER_ID_ULL_HIGH,
423 					    ticker_id_adv,
424 					    ticker_op_stop_adv_cb, aux);
425 		ticker_op_stop_adv_cb(ticker_status, aux);
426 
427 		aux->is_started = 0U;
428 	}
429 #endif
430 
431 	/* Stop Advertiser */
432 	ticker_id_adv = TICKER_ID_ADV_BASE + ull_adv_handle_get(adv);
433 	ticks_at_stop = ftr->ticks_anchor +
434 			HAL_TICKER_US_TO_TICKS(conn_offset_us) -
435 			ticks_slot_offset;
436 	ticker_status = ticker_stop_abs(TICKER_INSTANCE_ID_CTLR,
437 					TICKER_USER_ID_ULL_HIGH,
438 					ticker_id_adv, ticks_at_stop,
439 					ticker_op_stop_adv_cb, adv);
440 	ticker_op_stop_adv_cb(ticker_status, adv);
441 
442 	/* Stop Direct Adv Stop */
443 	if (adv->lll.is_hdcd) {
444 		/* Advertiser stop can expire while here in this ISR.
445 		 * Deferred attempt to stop can fail as it would have
446 		 * expired, hence ignore failure.
447 		 */
448 		(void)ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
449 				  TICKER_ID_ADV_STOP, NULL, NULL);
450 	}
451 
452 	/* Start Peripheral */
453 	ticker_id_conn = TICKER_ID_CONN_BASE + ll_conn_handle_get(conn);
454 	ticker_status = ticker_start(TICKER_INSTANCE_ID_CTLR,
455 				     TICKER_USER_ID_ULL_HIGH,
456 				     ticker_id_conn,
457 				     ftr->ticks_anchor - ticks_slot_offset,
458 				     HAL_TICKER_US_TO_TICKS(conn_offset_us),
459 				     HAL_TICKER_US_TO_TICKS(conn_interval_us),
460 				     HAL_TICKER_REMAINDER(conn_interval_us),
461 				     TICKER_NULL_LAZY,
462 				     (conn->ull.ticks_slot +
463 				      ticks_slot_overhead),
464 				     ull_periph_ticker_cb, conn, ticker_op_cb,
465 				     (void *)__LINE__);
466 	LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
467 		  (ticker_status == TICKER_STATUS_BUSY));
468 
469 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
470 	/* enable ticker job, irrespective of disabled in this function so
471 	 * first connection event can be scheduled as soon as possible.
472 	 */
473 	mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1);
474 #endif
475 }
476 
477 void ull_periph_latency_cancel(struct ll_conn *conn, uint16_t handle)
478 {
479 	/* break peripheral latency */
480 	if (conn->lll.latency_event && !conn->periph.latency_cancel) {
481 		uint32_t ticker_status;
482 
483 		conn->periph.latency_cancel = 1U;
484 
485 		ticker_status =
486 			ticker_update(TICKER_INSTANCE_ID_CTLR,
487 				      TICKER_USER_ID_THREAD,
488 				      (TICKER_ID_CONN_BASE + handle),
489 				      0, 0, 0, 0, 1, 0,
490 				      ticker_update_latency_cancel_op_cb,
491 				      (void *)conn);
492 		LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
493 			  (ticker_status == TICKER_STATUS_BUSY));
494 	}
495 }
496 
497 void ull_periph_ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
498 			 uint32_t remainder, uint16_t lazy, uint8_t force,
499 			 void *param)
500 {
501 	static memq_link_t link;
502 	static struct mayfly mfy = {0, 0, &link, NULL, lll_periph_prepare};
503 	static struct lll_prepare_param p;
504 	struct ll_conn *conn;
505 	uint32_t err;
506 	uint8_t ref;
507 
508 	DEBUG_RADIO_PREPARE_S(1);
509 
510 	conn = param;
511 
512 	/* Check if stopping ticker (on disconnection, race with ticker expiry)
513 	 */
514 	if (unlikely(conn->lll.handle == 0xFFFF)) {
515 		DEBUG_RADIO_CLOSE_S(0);
516 		return;
517 	}
518 
519 #if defined(CONFIG_BT_CTLR_CONN_META)
520 	conn->common.is_must_expire = (lazy == TICKER_LAZY_MUST_EXPIRE);
521 #endif
522 	/* If this is a must-expire callback, LLCP state machine does not need
523 	 * to know. Will be called with lazy > 0 when scheduled in air.
524 	 */
525 	if (!IS_ENABLED(CONFIG_BT_CTLR_CONN_META) ||
526 	    (lazy != TICKER_LAZY_MUST_EXPIRE)) {
527 		int ret;
528 
529 		/* Handle any LL Control Procedures */
530 		ret = ull_conn_llcp(conn, ticks_at_expire, remainder, lazy);
531 		if (ret) {
532 			/* NOTE: Under BT_CTLR_LOW_LAT, ULL_LOW context is
533 			 *       disabled inside radio events, hence, abort any
534 			 *       active radio event which will re-enable
535 			 *       ULL_LOW context that permits ticker job to run.
536 			 */
537 			if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) &&
538 			    (CONFIG_BT_CTLR_LLL_PRIO ==
539 			     CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
540 				ll_radio_state_abort();
541 			}
542 
543 			DEBUG_RADIO_CLOSE_S(0);
544 			return;
545 		}
546 	}
547 
548 	/* Increment prepare reference count */
549 	ref = ull_ref_inc(&conn->ull);
550 	LL_ASSERT(ref);
551 
552 	/* Append timing parameters */
553 	p.ticks_at_expire = ticks_at_expire;
554 	p.remainder = remainder;
555 	p.lazy = lazy;
556 	p.force = force;
557 	p.param = &conn->lll;
558 	mfy.param = &p;
559 
560 	/* Kick LLL prepare */
561 	err = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
562 			     0, &mfy);
563 	LL_ASSERT(!err);
564 
565 	/* De-mux remaining tx nodes from FIFO */
566 	ull_conn_tx_demux(UINT8_MAX);
567 
568 	/* Enqueue towards LLL */
569 	ull_conn_tx_lll_enqueue(conn, UINT8_MAX);
570 
571 	DEBUG_RADIO_PREPARE_S(1);
572 }
573 
574 #if defined(CONFIG_BT_CTLR_LE_ENC)
575 uint8_t ll_start_enc_req_send(uint16_t handle, uint8_t error_code,
576 			    uint8_t const *const ltk)
577 {
578 	struct ll_conn *conn;
579 
580 	conn = ll_connected_get(handle);
581 	if (!conn) {
582 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
583 	}
584 
585 	if (error_code) {
586 		return ull_cp_ltk_req_neq_reply(conn);
587 	} else {
588 		return ull_cp_ltk_req_reply(conn, ltk);
589 	}
590 
591 	return 0;
592 }
593 #endif /* CONFIG_BT_CTLR_LE_ENC */
594 
595 static void invalid_release(struct ull_hdr *hdr, struct lll_conn *lll,
596 			    memq_link_t *link, struct node_rx_pdu *rx)
597 {
598 	/* Reset the advertising disabled callback */
599 	hdr->disabled_cb = NULL;
600 
601 	/* Let the advertiser continue with connectable advertising */
602 	lll->periph.initiated = 0U;
603 
604 	/* Mark for buffer for release */
605 	rx->hdr.type = NODE_RX_TYPE_RELEASE;
606 
607 	/* Release CSA#2 related node rx too */
608 	if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
609 		struct node_rx_pdu *rx_csa;
610 
611 		/* pick the rx node instance stored within the
612 		 * connection rx node.
613 		 */
614 		rx_csa = rx->rx_ftr.extra;
615 
616 		/* Enqueue the connection event to be release */
617 		ll_rx_put(link, rx);
618 
619 		/* Use the rx node for CSA event */
620 		rx = rx_csa;
621 		link = rx->hdr.link;
622 
623 		/* Mark for buffer for release */
624 		rx->hdr.type = NODE_RX_TYPE_RELEASE;
625 	}
626 
627 	/* Enqueue connection or CSA event to be release */
628 	ll_rx_put_sched(link, rx);
629 }
630 
631 static void ticker_op_stop_adv_cb(uint32_t status, void *param)
632 {
633 	LL_ASSERT(status != TICKER_STATUS_FAILURE ||
634 		  param == ull_disable_mark_get());
635 }
636 
637 static void ticker_op_cb(uint32_t status, void *param)
638 {
639 	ARG_UNUSED(param);
640 
641 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
642 }
643 
644 static void ticker_update_latency_cancel_op_cb(uint32_t ticker_status,
645 					       void *param)
646 {
647 	struct ll_conn *conn = param;
648 
649 	LL_ASSERT(ticker_status == TICKER_STATUS_SUCCESS);
650 
651 	conn->periph.latency_cancel = 0U;
652 }
653 
654 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
655 uint8_t ll_set_min_used_chans(uint16_t handle, uint8_t const phys,
656 			      uint8_t const min_used_chans)
657 {
658 	struct ll_conn *conn;
659 
660 	conn = ll_connected_get(handle);
661 	if (!conn) {
662 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
663 	}
664 
665 	if (!conn->lll.role) {
666 		return BT_HCI_ERR_CMD_DISALLOWED;
667 	}
668 
669 	return ull_cp_min_used_chans(conn, phys, min_used_chans);
670 }
671 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */
672