1 /*
2  * Copyright (c) 2018-2021 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr.h>
8 #include <soc.h>
9 #include <bluetooth/hci.h>
10 #include <sys/byteorder.h>
11 
12 #include "util/util.h"
13 #include "util/memq.h"
14 #include "util/mem.h"
15 #include "util/mayfly.h"
16 
17 #include "hal/cpu.h"
18 #include "hal/ccm.h"
19 #include "hal/radio.h"
20 #include "hal/ticker.h"
21 
22 #include "ticker/ticker.h"
23 
24 #include "pdu.h"
25 
26 #include "lll.h"
27 #include "lll_clock.h"
28 #include "lll/lll_vendor.h"
29 #include "lll/lll_adv_types.h"
30 #include "lll_adv.h"
31 #include "lll/lll_adv_pdu.h"
32 #include "lll_chan.h"
33 #include "lll_scan.h"
34 #include "lll/lll_df_types.h"
35 #include "lll_conn.h"
36 #include "lll_central.h"
37 #include "lll_filter.h"
38 
39 #include "ull_adv_types.h"
40 #include "ull_scan_types.h"
41 #include "ull_conn_types.h"
42 #include "ull_filter.h"
43 
44 #include "ull_internal.h"
45 #include "ull_chan_internal.h"
46 #include "ull_scan_internal.h"
47 #include "ull_conn_internal.h"
48 #include "ull_central_internal.h"
49 
50 #include "ll.h"
51 #include "ll_feat.h"
52 #include "ll_settings.h"
53 
54 #define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_HCI_DRIVER)
55 #define LOG_MODULE_NAME bt_ctlr_ull_central
56 #include "common/log.h"
57 #include "hal/debug.h"
58 
59 static void ticker_op_stop_scan_cb(uint32_t status, void *param);
60 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_CTLR_PHY_CODED)
61 static void ticker_op_stop_scan_other_cb(uint32_t status, void *param);
62 #endif /* CONFIG_BT_CTLR_ADV_EXT && CONFIG_BT_CTLR_PHY_CODED */
63 static void ticker_op_cb(uint32_t status, void *param);
64 static inline void conn_release(struct ll_scan_set *scan);
65 
66 #if defined(CONFIG_BT_CTLR_ADV_EXT)
ll_create_connection(uint16_t scan_interval,uint16_t scan_window,uint8_t filter_policy,uint8_t peer_addr_type,uint8_t const * const peer_addr,uint8_t own_addr_type,uint16_t interval,uint16_t latency,uint16_t timeout,uint8_t phy)67 uint8_t ll_create_connection(uint16_t scan_interval, uint16_t scan_window,
68 			  uint8_t filter_policy, uint8_t peer_addr_type,
69 			  uint8_t const *const peer_addr, uint8_t own_addr_type,
70 			  uint16_t interval, uint16_t latency, uint16_t timeout,
71 			  uint8_t phy)
72 #else /* !CONFIG_BT_CTLR_ADV_EXT */
73 uint8_t ll_create_connection(uint16_t scan_interval, uint16_t scan_window,
74 			  uint8_t filter_policy, uint8_t peer_addr_type,
75 			  uint8_t const *const peer_addr, uint8_t own_addr_type,
76 			  uint16_t interval, uint16_t latency, uint16_t timeout)
77 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
78 {
79 	struct lll_conn *conn_lll;
80 	uint32_t conn_interval_us;
81 	uint8_t own_id_addr_type;
82 	struct ll_scan_set *scan;
83 	uint32_t ready_delay_us;
84 	uint8_t *own_id_addr;
85 	struct lll_scan *lll;
86 	struct ll_conn *conn;
87 	uint16_t max_tx_time;
88 	uint16_t max_rx_time;
89 	memq_link_t *link;
90 	uint8_t hop;
91 	int err;
92 
93 	scan = ull_scan_is_disabled_get(SCAN_HANDLE_1M);
94 	if (!scan) {
95 		return BT_HCI_ERR_CMD_DISALLOWED;
96 	}
97 
98 	/* Check if random address has been set */
99 	own_id_addr_type = (own_addr_type & 0x01);
100 	own_id_addr = ll_addr_get(own_id_addr_type);
101 	if (own_id_addr_type && !mem_nz((void *)own_id_addr, BDADDR_SIZE)) {
102 		return BT_HCI_ERR_INVALID_PARAM;
103 	}
104 
105 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
106 	/* Do not connect twice to the same peer */
107 	if (ull_conn_peer_connected(own_id_addr_type, own_id_addr,
108 				    peer_addr_type, peer_addr)) {
109 		return BT_HCI_ERR_CONN_ALREADY_EXISTS;
110 	}
111 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
112 
113 #if defined(CONFIG_BT_CTLR_ADV_EXT)
114 #if defined(CONFIG_BT_CTLR_PHY_CODED)
115 	struct ll_scan_set *scan_coded;
116 	struct lll_scan *lll_coded;
117 
118 	scan_coded = ull_scan_is_disabled_get(SCAN_HANDLE_PHY_CODED);
119 	if (!scan_coded) {
120 		return BT_HCI_ERR_CMD_DISALLOWED;
121 	}
122 
123 	lll = &scan->lll;
124 	lll_coded = &scan_coded->lll;
125 
126 	/* NOTE: When coded PHY is supported, and connection establishment
127 	 *       over coded PHY is selected by application then look for
128 	 *       a connection context already assigned to 1M PHY scanning
129 	 *       context. Use the same connection context in the coded PHY
130 	 *       scanning context.
131 	 */
132 	if (phy & BT_HCI_LE_EXT_SCAN_PHY_CODED) {
133 		if (!lll_coded->conn) {
134 			lll_coded->conn = lll->conn;
135 		}
136 		scan = scan_coded;
137 		lll = lll_coded;
138 	} else {
139 		if (!lll->conn) {
140 			lll->conn = lll_coded->conn;
141 		}
142 	}
143 
144 #else /* !CONFIG_BT_CTLR_PHY_CODED */
145 	if (phy & ~BT_HCI_LE_EXT_SCAN_PHY_1M) {
146 		return BT_HCI_ERR_CMD_DISALLOWED;
147 	}
148 
149 	lll = &scan->lll;
150 
151 #endif /* !CONFIG_BT_CTLR_PHY_CODED */
152 
153 	/* NOTE: non-zero PHY value enables initiating connection on that PHY */
154 	lll->phy = phy;
155 
156 #else /* !CONFIG_BT_CTLR_ADV_EXT */
157 	lll = &scan->lll;
158 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
159 
160 	if (lll->conn) {
161 		conn_lll = lll->conn;
162 		conn = HDR_LLL2ULL(conn_lll);
163 
164 		goto conn_is_valid;
165 	}
166 
167 	link = ll_rx_link_alloc();
168 	if (!link) {
169 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
170 	}
171 
172 	conn = ll_conn_acquire();
173 	if (!conn) {
174 		ll_rx_link_release(link);
175 
176 		return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
177 	}
178 
179 	conn_lll = &conn->lll;
180 
181 	err = util_aa_le32(conn_lll->access_addr);
182 	LL_ASSERT(!err);
183 
184 	lll_csrand_get(conn_lll->crc_init, sizeof(conn_lll->crc_init));
185 
186 	conn_lll->handle = 0xFFFF;
187 	conn_lll->interval = interval;
188 	conn_lll->latency = latency;
189 
190 	if (!conn_lll->link_tx_free) {
191 		conn_lll->link_tx_free = &conn_lll->link_tx;
192 	}
193 
194 	memq_init(conn_lll->link_tx_free, &conn_lll->memq_tx.head,
195 		  &conn_lll->memq_tx.tail);
196 	conn_lll->link_tx_free = NULL;
197 
198 	conn_lll->packet_tx_head_len = 0;
199 	conn_lll->packet_tx_head_offset = 0;
200 
201 	conn_lll->sn = 0;
202 	conn_lll->nesn = 0;
203 	conn_lll->empty = 0;
204 
205 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
206 	conn_lll->max_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
207 	conn_lll->max_rx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
208 
209 #if defined(CONFIG_BT_CTLR_PHY)
210 	/* Use the default 1M packet Tx time, extended connection initiation
211 	 * in LLL will update this with the correct PHY.
212 	 */
213 	conn_lll->max_tx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
214 	conn_lll->max_rx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
215 #endif /* CONFIG_BT_CTLR_PHY */
216 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
217 
218 #if defined(CONFIG_BT_CTLR_PHY)
219 	/* Use the default 1M PHY, extended connection initiation in LLL will
220 	 * update this with the correct PHY.
221 	 */
222 	conn_lll->phy_tx = PHY_1M;
223 	conn_lll->phy_flags = 0;
224 	conn_lll->phy_tx_time = PHY_1M;
225 	conn_lll->phy_rx = PHY_1M;
226 #endif /* CONFIG_BT_CTLR_PHY */
227 
228 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
229 	conn_lll->rssi_latest = BT_HCI_LE_RSSI_NOT_AVAILABLE;
230 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
231 	conn_lll->rssi_reported = BT_HCI_LE_RSSI_NOT_AVAILABLE;
232 	conn_lll->rssi_sample_count = 0;
233 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
234 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
235 
236 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
237 	conn_lll->tx_pwr_lvl = RADIO_TXP_DEFAULT;
238 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
239 
240 	/* FIXME: BEGIN: Move to ULL? */
241 	conn_lll->latency_prepare = 0;
242 	conn_lll->latency_event = 0;
243 	conn_lll->event_counter = 0;
244 
245 	conn_lll->data_chan_count = ull_chan_map_get(conn_lll->data_chan_map);
246 	lll_csrand_get(&hop, sizeof(uint8_t));
247 	conn_lll->data_chan_hop = 5 + (hop % 12);
248 	conn_lll->data_chan_sel = 0;
249 	conn_lll->data_chan_use = 0;
250 	conn_lll->role = 0;
251 	conn_lll->central.initiated = 0;
252 	conn_lll->central.cancelled = 0;
253 	/* FIXME: END: Move to ULL? */
254 #if defined(CONFIG_BT_CTLR_CONN_META)
255 	memset(&conn_lll->conn_meta, 0, sizeof(conn_lll->conn_meta));
256 #endif /* CONFIG_BT_CTLR_CONN_META */
257 
258 	conn->connect_expire = 6U;
259 	conn->supervision_expire = 0U;
260 	conn_interval_us = (uint32_t)interval * CONN_INT_UNIT_US;
261 	conn->supervision_reload = RADIO_CONN_EVENTS(timeout * 10000U,
262 							 conn_interval_us);
263 
264 	conn->procedure_expire = 0U;
265 	conn->procedure_reload = RADIO_CONN_EVENTS(40000000,
266 						       conn_interval_us);
267 
268 #if defined(CONFIG_BT_CTLR_LE_PING)
269 	conn->apto_expire = 0U;
270 	/* APTO in no. of connection events */
271 	conn->apto_reload = RADIO_CONN_EVENTS((30000000), conn_interval_us);
272 	conn->appto_expire = 0U;
273 	/* Dispatch LE Ping PDU 6 connection events (that peer would listen to)
274 	 * before 30s timeout
275 	 * TODO: "peer listens to" is greater than 30s due to latency
276 	 */
277 	conn->appto_reload = (conn->apto_reload > (conn_lll->latency + 6)) ?
278 			     (conn->apto_reload - (conn_lll->latency + 6)) :
279 			     conn->apto_reload;
280 #endif /* CONFIG_BT_CTLR_LE_PING */
281 
282 	conn->common.fex_valid = 0U;
283 	conn->common.txn_lock = 0U;
284 	conn->central.terminate_ack = 0U;
285 
286 	conn->llcp_req = conn->llcp_ack = conn->llcp_type = 0U;
287 	conn->llcp_rx = NULL;
288 	conn->llcp_cu.req = conn->llcp_cu.ack = 0;
289 	conn->llcp_feature.req = conn->llcp_feature.ack = 0;
290 	conn->llcp_feature.features_conn = ll_feat_get();
291 	conn->llcp_feature.features_peer = 0;
292 	conn->llcp_version.req = conn->llcp_version.ack = 0;
293 	conn->llcp_version.tx = conn->llcp_version.rx = 0U;
294 	conn->llcp_terminate.req = conn->llcp_terminate.ack = 0U;
295 	conn->llcp_terminate.reason_final = 0U;
296 	/* NOTE: use allocated link for generating dedicated
297 	 * terminate ind rx node
298 	 */
299 	conn->llcp_terminate.node_rx.hdr.link = link;
300 
301 #if defined(CONFIG_BT_CTLR_LE_ENC)
302 	conn_lll->enc_rx = conn_lll->enc_tx = 0U;
303 	conn->llcp_enc.req = conn->llcp_enc.ack = 0U;
304 	conn->llcp_enc.pause_tx = conn->llcp_enc.pause_rx = 0U;
305 	conn->llcp_enc.refresh = 0U;
306 #endif /* CONFIG_BT_CTLR_LE_ENC */
307 
308 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
309 	conn->llcp_conn_param.req = 0U;
310 	conn->llcp_conn_param.ack = 0U;
311 	conn->llcp_conn_param.disabled = 0U;
312 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
313 
314 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
315 	conn->llcp_length.req = conn->llcp_length.ack = 0U;
316 	conn->llcp_length.disabled = 0U;
317 	conn->llcp_length.cache.tx_octets = 0U;
318 	conn->default_tx_octets = ull_conn_default_tx_octets_get();
319 
320 #if defined(CONFIG_BT_CTLR_PHY)
321 	conn->default_tx_time = ull_conn_default_tx_time_get();
322 #endif /* CONFIG_BT_CTLR_PHY */
323 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
324 
325 #if defined(CONFIG_BT_CTLR_PHY)
326 	conn->llcp_phy.req = conn->llcp_phy.ack = 0U;
327 	conn->llcp_phy.disabled = 0U;
328 	conn->llcp_phy.pause_tx = 0U;
329 	conn->phy_pref_tx = ull_conn_default_phy_tx_get();
330 	conn->phy_pref_rx = ull_conn_default_phy_rx_get();
331 #endif /* CONFIG_BT_CTLR_PHY */
332 
333 	conn->tx_head = conn->tx_ctrl = conn->tx_ctrl_last =
334 	conn->tx_data = conn->tx_data_last = 0;
335 
336 	/* TODO: active_to_start feature port */
337 	conn->ull.ticks_active_to_start = 0U;
338 	conn->ull.ticks_prepare_to_start =
339 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
340 	conn->ull.ticks_preempt_to_start =
341 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
342 
343 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
344 	/* Remember peer and own identity address */
345 	conn->peer_id_addr_type = peer_addr_type;
346 	(void)memcpy(conn->peer_id_addr, peer_addr, sizeof(conn->peer_id_addr));
347 	conn->own_id_addr_type = own_id_addr_type;
348 	(void)memcpy(conn->own_id_addr, own_id_addr, sizeof(conn->own_id_addr));
349 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
350 
351 	lll->conn = conn_lll;
352 
353 	ull_hdr_init(&conn->ull);
354 	lll_hdr_init(&conn->lll, conn);
355 
356 conn_is_valid:
357 #if defined(CONFIG_BT_CTLR_PHY)
358 	ready_delay_us = lll_radio_tx_ready_delay_get(conn_lll->phy_tx,
359 						      conn_lll->phy_flags);
360 #else
361 	ready_delay_us = lll_radio_tx_ready_delay_get(0, 0);
362 #endif
363 
364 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
365 #if defined(CONFIG_BT_CTLR_PHY)
366 #if defined(CONFIG_BT_CTLR_ADV_EXT)
367 	conn_lll->max_tx_time = MAX(conn_lll->max_tx_time,
368 				    PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN,
369 						  lll->phy));
370 	conn_lll->max_rx_time = MAX(conn_lll->max_rx_time,
371 				    PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN,
372 						  lll->phy));
373 #endif /* CONFIG_BT_CTLR_ADV_EXT */
374 	max_tx_time = conn_lll->max_tx_time;
375 	max_rx_time = conn_lll->max_rx_time;
376 #else /* !CONFIG_BT_CTLR_PHY */
377 	max_tx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
378 	max_rx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
379 #endif /* !CONFIG_BT_CTLR_PHY */
380 #else /* !CONFIG_BT_CTLR_DATA_LENGTH */
381 	max_tx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
382 	max_rx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
383 #if defined(CONFIG_BT_CTLR_ADV_EXT)
384 	max_tx_time = MAX(max_tx_time,
385 			  PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy));
386 	max_rx_time = MAX(max_rx_time,
387 			  PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, lll->phy));
388 #endif /* CONFIG_BT_CTLR_ADV_EXT */
389 #endif /* !CONFIG_BT_CTLR_DATA_LENGTH */
390 
391 	conn->ull.ticks_slot =
392 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US +
393 				       ready_delay_us +
394 				       max_tx_time +
395 				       EVENT_IFS_US +
396 				       max_rx_time);
397 
398 #if defined(CONFIG_BT_CTLR_PRIVACY)
399 	ull_filter_scan_update(filter_policy);
400 
401 	lll->rl_idx = FILTER_IDX_NONE;
402 	lll->rpa_gen = 0;
403 	if (!filter_policy && ull_filter_lll_rl_enabled()) {
404 		/* Look up the resolving list */
405 		lll->rl_idx = ull_filter_rl_find(peer_addr_type, peer_addr,
406 						 NULL);
407 	}
408 
409 	if (own_addr_type == BT_ADDR_LE_PUBLIC_ID ||
410 	    own_addr_type == BT_ADDR_LE_RANDOM_ID) {
411 
412 		/* Generate RPAs if required */
413 		ull_filter_rpa_update(false);
414 		own_addr_type &= 0x1;
415 		lll->rpa_gen = 1;
416 	}
417 #endif
418 
419 	scan->own_addr_type = own_addr_type;
420 	lll->adv_addr_type = peer_addr_type;
421 	memcpy(lll->adv_addr, peer_addr, BDADDR_SIZE);
422 	lll->conn_timeout = timeout;
423 
424 	ull_scan_params_set(lll, 0, scan_interval, scan_window, filter_policy);
425 
426 #if defined(CONFIG_BT_CTLR_ADV_EXT)
427 	return 0;
428 #else /* !CONFIG_BT_CTLR_ADV_EXT */
429 	/* wait for stable clocks */
430 	err = lll_clock_wait();
431 	if (err) {
432 		conn_release(scan);
433 
434 		return BT_HCI_ERR_HW_FAILURE;
435 	}
436 
437 	return ull_scan_enable(scan);
438 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
439 }
440 
441 #if defined(CONFIG_BT_CTLR_ADV_EXT)
ll_connect_enable(uint8_t is_coded_included)442 uint8_t ll_connect_enable(uint8_t is_coded_included)
443 {
444 	uint8_t err = BT_HCI_ERR_CMD_DISALLOWED;
445 	struct ll_scan_set *scan;
446 
447 	scan = ull_scan_set_get(SCAN_HANDLE_1M);
448 
449 	/* wait for stable clocks */
450 	err = lll_clock_wait();
451 	if (err) {
452 		conn_release(scan);
453 
454 		return BT_HCI_ERR_HW_FAILURE;
455 	}
456 
457 	if (!is_coded_included ||
458 	    (scan->lll.phy & PHY_1M)) {
459 		err = ull_scan_enable(scan);
460 		if (err) {
461 			return err;
462 		}
463 	}
464 
465 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) && is_coded_included) {
466 		scan = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
467 		err = ull_scan_enable(scan);
468 		if (err) {
469 			return err;
470 		}
471 	}
472 
473 	return err;
474 }
475 #endif /* CONFIG_BT_CTLR_ADV_EXT */
476 
ll_connect_disable(void ** rx)477 uint8_t ll_connect_disable(void **rx)
478 {
479 	struct ll_scan_set *scan_coded;
480 	struct lll_scan *scan_lll;
481 	struct lll_conn *conn_lll;
482 	struct ll_scan_set *scan;
483 	uint8_t err;
484 
485 	scan = ull_scan_is_enabled_get(SCAN_HANDLE_1M);
486 
487 	if (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
488 	    IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
489 		scan_coded = ull_scan_is_enabled_get(SCAN_HANDLE_PHY_CODED);
490 	} else {
491 		scan_coded = NULL;
492 	}
493 
494 	if (!scan) {
495 		if (!scan_coded) {
496 			return BT_HCI_ERR_CMD_DISALLOWED;
497 		}
498 
499 		scan_lll = &scan_coded->lll;
500 	} else {
501 		scan_lll = &scan->lll;
502 	}
503 
504 	/* Check if initiator active */
505 	conn_lll = scan_lll->conn;
506 	if (!conn_lll) {
507 		/* Scanning not associated with initiation of a connection or
508 		 * connection setup already complete (was set to NULL in
509 		 * ull_central_setup), but HCI event not processed by host.
510 		 */
511 		return BT_HCI_ERR_CMD_DISALLOWED;
512 	}
513 
514 	/* Indicate to LLL that a cancellation is requested */
515 	conn_lll->central.cancelled = 1U;
516 	cpu_dmb();
517 
518 	/* Check if connection was established under race condition, i.e.
519 	 * before the cancelled flag was set.
520 	 */
521 	conn_lll = scan_lll->conn;
522 	if (!conn_lll) {
523 		/* Connection setup completed on race condition with cancelled
524 		 * flag, before it was set.
525 		 */
526 		return BT_HCI_ERR_CMD_DISALLOWED;
527 	}
528 
529 	if (scan) {
530 		err = ull_scan_disable(SCAN_HANDLE_1M, scan);
531 	} else {
532 		err = 0U;
533 	}
534 
535 	if (!err && scan_coded) {
536 		err = ull_scan_disable(SCAN_HANDLE_PHY_CODED, scan_coded);
537 	}
538 
539 	if (!err) {
540 		struct node_rx_pdu *node_rx;
541 		struct node_rx_cc *cc;
542 		struct ll_conn *conn;
543 		memq_link_t *link;
544 
545 		conn = HDR_LLL2ULL(conn_lll);
546 		node_rx = (void *)&conn->llcp_terminate.node_rx;
547 		link = node_rx->hdr.link;
548 		LL_ASSERT(link);
549 
550 		/* free the memq link early, as caller could overwrite it */
551 		ll_rx_link_release(link);
552 
553 		node_rx->hdr.type = NODE_RX_TYPE_CONNECTION;
554 		node_rx->hdr.handle = 0xffff;
555 
556 		/* NOTE: struct llcp_terminate.node_rx has uint8_t member
557 		 *       following the struct node_rx_hdr to store the reason.
558 		 */
559 		cc = (void *)node_rx->pdu;
560 		cc->status = BT_HCI_ERR_UNKNOWN_CONN_ID;
561 
562 		/* NOTE: Since NODE_RX_TYPE_CONNECTION is also generated from
563 		 *       LLL context for other cases, pass LLL context as
564 		 *       parameter.
565 		 */
566 		node_rx->hdr.rx_ftr.param = scan_lll;
567 
568 		*rx = node_rx;
569 	}
570 
571 	return err;
572 }
573 
574 #if defined(CONFIG_BT_CTLR_LE_ENC)
ll_enc_req_send(uint16_t handle,uint8_t const * const rand,uint8_t const * const ediv,uint8_t const * const ltk)575 uint8_t ll_enc_req_send(uint16_t handle, uint8_t const *const rand,
576 		     uint8_t const *const ediv, uint8_t const *const ltk)
577 {
578 	struct ll_conn *conn;
579 	struct node_tx *tx;
580 
581 	conn = ll_connected_get(handle);
582 	if (!conn) {
583 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
584 	}
585 
586 	if ((conn->llcp_enc.req != conn->llcp_enc.ack) ||
587 	    ((conn->llcp_req != conn->llcp_ack) &&
588 	     (conn->llcp_type == LLCP_ENCRYPTION))) {
589 		return BT_HCI_ERR_CMD_DISALLOWED;
590 	}
591 
592 	tx = ll_tx_mem_acquire();
593 	if (tx) {
594 		struct pdu_data *pdu_data_tx;
595 
596 		pdu_data_tx = (void *)tx->pdu;
597 
598 		memcpy(&conn->llcp_enc.ltk[0], ltk, sizeof(conn->llcp_enc.ltk));
599 
600 		if (!conn->lll.enc_rx && !conn->lll.enc_tx) {
601 			struct pdu_data_llctrl_enc_req *enc_req;
602 
603 			pdu_data_tx->ll_id = PDU_DATA_LLID_CTRL;
604 			pdu_data_tx->len =
605 				offsetof(struct pdu_data_llctrl, enc_rsp) +
606 				sizeof(struct pdu_data_llctrl_enc_req);
607 			pdu_data_tx->llctrl.opcode =
608 				PDU_DATA_LLCTRL_TYPE_ENC_REQ;
609 			enc_req = (void *)
610 				&pdu_data_tx->llctrl.enc_req;
611 			memcpy(enc_req->rand, rand, sizeof(enc_req->rand));
612 			enc_req->ediv[0] = ediv[0];
613 			enc_req->ediv[1] = ediv[1];
614 			lll_csrand_get(enc_req->skdm, sizeof(enc_req->skdm));
615 			lll_csrand_get(enc_req->ivm, sizeof(enc_req->ivm));
616 		} else if (conn->lll.enc_rx && conn->lll.enc_tx) {
617 			memcpy(&conn->llcp_enc.rand[0], rand,
618 			       sizeof(conn->llcp_enc.rand));
619 
620 			conn->llcp_enc.ediv[0] = ediv[0];
621 			conn->llcp_enc.ediv[1] = ediv[1];
622 
623 			pdu_data_tx->ll_id = PDU_DATA_LLID_CTRL;
624 			pdu_data_tx->len = offsetof(struct pdu_data_llctrl,
625 						    enc_req);
626 			pdu_data_tx->llctrl.opcode =
627 				PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_REQ;
628 		} else {
629 			ll_tx_mem_release(tx);
630 
631 			return BT_HCI_ERR_CMD_DISALLOWED;
632 		}
633 
634 		if (ll_tx_mem_enqueue(handle, tx)) {
635 			ll_tx_mem_release(tx);
636 
637 			return BT_HCI_ERR_CMD_DISALLOWED;
638 		}
639 
640 		conn->llcp_enc.req++;
641 
642 		return 0;
643 	}
644 
645 	return BT_HCI_ERR_CMD_DISALLOWED;
646 }
647 #endif /* CONFIG_BT_CTLR_LE_ENC */
648 
ull_central_reset(void)649 int ull_central_reset(void)
650 {
651 	int err;
652 	void *rx;
653 
654 	err = ll_connect_disable(&rx);
655 	if (!err) {
656 		struct ll_scan_set *scan;
657 
658 		scan = ull_scan_is_enabled_get(SCAN_HANDLE_1M);
659 
660 		if (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
661 		    IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
662 			struct ll_scan_set *scan_other;
663 
664 			scan_other =
665 				ull_scan_is_enabled_get(SCAN_HANDLE_PHY_CODED);
666 			if (scan_other) {
667 				if (scan) {
668 					scan->is_enabled = 0U;
669 					scan->lll.conn = NULL;
670 				}
671 
672 				scan = scan_other;
673 			}
674 		}
675 
676 		LL_ASSERT(scan);
677 
678 		scan->is_enabled = 0U;
679 		scan->lll.conn = NULL;
680 	}
681 
682 	ARG_UNUSED(rx);
683 
684 	return err;
685 }
686 
ull_central_cleanup(struct node_rx_hdr * rx_free)687 void ull_central_cleanup(struct node_rx_hdr *rx_free)
688 {
689 	struct lll_conn *conn_lll;
690 	struct ll_scan_set *scan;
691 	struct ll_conn *conn;
692 	memq_link_t *link;
693 
694 	/* NOTE: `scan` variable can be 1M PHY or coded PHY scanning context.
695 	 *       Single connection context is allocated in both the 1M PHY and
696 	 *       coded PHY scanning context, hence releasing only this one
697 	 *       connection context.
698 	 */
699 	scan = HDR_LLL2ULL(rx_free->rx_ftr.param);
700 	conn_lll = scan->lll.conn;
701 	LL_ASSERT(conn_lll);
702 	scan->lll.conn = NULL;
703 
704 	LL_ASSERT(!conn_lll->link_tx_free);
705 	link = memq_deinit(&conn_lll->memq_tx.head,
706 			   &conn_lll->memq_tx.tail);
707 	LL_ASSERT(link);
708 	conn_lll->link_tx_free = link;
709 
710 	conn = HDR_LLL2ULL(conn_lll);
711 	ll_conn_release(conn);
712 
713 	/* 1M PHY is disabled here if both 1M and coded PHY was enabled for
714 	 * connection establishment.
715 	 */
716 	scan->is_enabled = 0U;
717 
718 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_CTLR_PHY_CODED)
719 	scan->lll.phy = 0U;
720 
721 	/* Determine if coded PHY was also enabled, if so, reset the assigned
722 	 * connection context, enabled flag and phy value.
723 	 */
724 	struct ll_scan_set *scan_coded =
725 				ull_scan_is_enabled_get(SCAN_HANDLE_PHY_CODED);
726 	if (scan_coded && scan_coded != scan) {
727 		conn_lll = scan_coded->lll.conn;
728 		LL_ASSERT(conn_lll);
729 		scan_coded->lll.conn = NULL;
730 
731 		scan_coded->is_enabled = 0U;
732 		scan_coded->lll.phy = 0U;
733 	}
734 #endif /* CONFIG_BT_CTLR_ADV_EXT && CONFIG_BT_CTLR_PHY_CODED */
735 }
736 
ull_central_setup(struct node_rx_hdr * rx,struct node_rx_ftr * ftr,struct lll_conn * lll)737 void ull_central_setup(struct node_rx_hdr *rx, struct node_rx_ftr *ftr,
738 		      struct lll_conn *lll)
739 {
740 	uint32_t conn_offset_us, conn_interval_us;
741 	uint8_t ticker_id_scan, ticker_id_conn;
742 	uint8_t peer_addr[BDADDR_SIZE];
743 	uint32_t ticks_slot_overhead;
744 	uint32_t ticks_slot_offset;
745 	struct ll_scan_set *scan;
746 	struct pdu_adv *pdu_tx;
747 	uint8_t peer_addr_type;
748 	uint32_t ticker_status;
749 	struct node_rx_cc *cc;
750 	struct ll_conn *conn;
751 	memq_link_t *link;
752 	uint8_t chan_sel;
753 
754 	/* Get reference to Tx-ed CONNECT_IND PDU */
755 	pdu_tx = (void *)((struct node_rx_pdu *)rx)->pdu;
756 
757 	/* Backup peer addr and type, as we reuse the Tx-ed PDU to generate
758 	 * event towards LL
759 	 */
760 	peer_addr_type = pdu_tx->rx_addr;
761 	memcpy(peer_addr, &pdu_tx->connect_ind.adv_addr[0], BDADDR_SIZE);
762 
763 	/* This is the chan sel bit from the received adv pdu */
764 	chan_sel = pdu_tx->chan_sel;
765 
766 	/* Populate the fields required for connection complete event */
767 	cc = (void *)pdu_tx;
768 	cc->status = 0U;
769 	cc->role = 0U;
770 
771 #if defined(CONFIG_BT_CTLR_PRIVACY)
772 	uint8_t rl_idx = ftr->rl_idx;
773 
774 	if (ftr->lrpa_used) {
775 		memcpy(&cc->local_rpa[0], &pdu_tx->connect_ind.init_addr[0],
776 		       BDADDR_SIZE);
777 	} else {
778 		memset(&cc->local_rpa[0], 0x0, BDADDR_SIZE);
779 	}
780 
781 	if (rl_idx != FILTER_IDX_NONE) {
782 		/* Store identity address */
783 		ll_rl_id_addr_get(rl_idx, &cc->peer_addr_type,
784 				  &cc->peer_addr[0]);
785 		/* Mark it as identity address from RPA (0x02, 0x03) */
786 		cc->peer_addr_type += 2;
787 
788 		/* Store peer RPA */
789 		memcpy(&cc->peer_rpa[0], &peer_addr[0], BDADDR_SIZE);
790 	} else {
791 		memset(&cc->peer_rpa[0], 0x0, BDADDR_SIZE);
792 #else
793 	if (1) {
794 #endif /* CONFIG_BT_CTLR_PRIVACY */
795 		cc->peer_addr_type = peer_addr_type;
796 		memcpy(cc->peer_addr, &peer_addr[0], BDADDR_SIZE);
797 	}
798 
799 	scan = HDR_LLL2ULL(ftr->param);
800 
801 	cc->interval = lll->interval;
802 	cc->latency = lll->latency;
803 	cc->timeout = scan->lll.conn_timeout;
804 	cc->sca = lll_clock_sca_local_get();
805 
806 	conn = lll->hdr.parent;
807 	lll->handle = ll_conn_handle_get(conn);
808 	rx->handle = lll->handle;
809 
810 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL)
811 	lll->tx_pwr_lvl = RADIO_TXP_DEFAULT;
812 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL */
813 
814 	/* Use the link stored in the node rx to enqueue connection
815 	 * complete node rx towards LL context.
816 	 */
817 	link = rx->link;
818 
819 	/* Use Channel Selection Algorithm #2 if peer too supports it */
820 	if (IS_ENABLED(CONFIG_BT_CTLR_CHAN_SEL_2)) {
821 		struct node_rx_pdu *rx_csa;
822 		struct node_rx_cs *cs;
823 
824 		/* pick the rx node instance stored within the connection
825 		 * rx node.
826 		 */
827 		rx_csa = (void *)ftr->extra;
828 
829 		/* Enqueue the connection event */
830 		ll_rx_put(link, rx);
831 
832 		/* use the rx node for CSA event */
833 		rx = (void *)rx_csa;
834 		link = rx->link;
835 
836 		rx->handle = lll->handle;
837 		rx->type = NODE_RX_TYPE_CHAN_SEL_ALGO;
838 
839 		cs = (void *)rx_csa->pdu;
840 
841 		if (chan_sel) {
842 			lll->data_chan_sel = 1;
843 			lll->data_chan_id = lll_chan_id(lll->access_addr);
844 
845 			cs->csa = 0x01;
846 		} else {
847 			cs->csa = 0x00;
848 		}
849 	}
850 
851 	ll_rx_put(link, rx);
852 	ll_rx_sched();
853 
854 	ticks_slot_offset = MAX(conn->ull.ticks_active_to_start,
855 				conn->ull.ticks_prepare_to_start);
856 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
857 		ticks_slot_overhead = ticks_slot_offset;
858 	} else {
859 		ticks_slot_overhead = 0U;
860 	}
861 	ticks_slot_offset += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
862 
863 	conn_interval_us = lll->interval * CONN_INT_UNIT_US;
864 	conn_offset_us = ftr->radio_end_us;
865 	conn_offset_us += EVENT_TICKER_RES_MARGIN_US;
866 
867 #if defined(CONFIG_BT_CTLR_PHY)
868 	conn_offset_us -= lll_radio_tx_ready_delay_get(lll->phy_tx,
869 						      lll->phy_flags);
870 #else
871 	conn_offset_us -= lll_radio_tx_ready_delay_get(0, 0);
872 #endif
873 
874 
875 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
876 	/* disable ticker job, in order to chain stop and start to avoid RTC
877 	 * being stopped if no tickers active.
878 	 */
879 	mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 0);
880 #endif
881 
882 	/* Stop Scanner */
883 	ticker_id_scan = TICKER_ID_SCAN_BASE + ull_scan_handle_get(scan);
884 	ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR,
885 				    TICKER_USER_ID_ULL_HIGH,
886 				    ticker_id_scan, ticker_op_stop_scan_cb,
887 				    scan);
888 	LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
889 		  (ticker_status == TICKER_STATUS_BUSY));
890 
891 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_CTLR_PHY_CODED)
892 	/* Determine if coded PHY was also enabled, if so, reset the assigned
893 	 * connection context.
894 	 */
895 	struct ll_scan_set *scan_other =
896 				ull_scan_is_enabled_get(SCAN_HANDLE_PHY_CODED);
897 	if (scan_other) {
898 		if (scan_other == scan) {
899 			scan_other = ull_scan_is_enabled_get(SCAN_HANDLE_1M);
900 		}
901 
902 		if (scan_other) {
903 			ticker_id_scan = TICKER_ID_SCAN_BASE +
904 					 ull_scan_handle_get(scan_other);
905 			ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR,
906 						    TICKER_USER_ID_ULL_HIGH,
907 						    ticker_id_scan,
908 						    ticker_op_stop_scan_other_cb,
909 						    scan_other);
910 			LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
911 				  (ticker_status == TICKER_STATUS_BUSY));
912 		}
913 	}
914 #endif /* CONFIG_BT_CTLR_ADV_EXT && CONFIG_BT_CTLR_PHY_CODED */
915 
916 	/* Scanner stop can expire while here in this ISR.
917 	 * Deferred attempt to stop can fail as it would have
918 	 * expired, hence ignore failure.
919 	 */
920 	ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
921 		    TICKER_ID_SCAN_STOP, NULL, NULL);
922 
923 	/* Start central */
924 	ticker_id_conn = TICKER_ID_CONN_BASE + ll_conn_handle_get(conn);
925 	ticker_status = ticker_start(TICKER_INSTANCE_ID_CTLR,
926 				     TICKER_USER_ID_ULL_HIGH,
927 				     ticker_id_conn,
928 				     ftr->ticks_anchor - ticks_slot_offset,
929 				     HAL_TICKER_US_TO_TICKS(conn_offset_us),
930 				     HAL_TICKER_US_TO_TICKS(conn_interval_us),
931 				     HAL_TICKER_REMAINDER(conn_interval_us),
932 				     TICKER_NULL_LAZY,
933 				     (conn->ull.ticks_slot +
934 				      ticks_slot_overhead),
935 				     ull_central_ticker_cb, conn, ticker_op_cb,
936 				     (void *)__LINE__);
937 	LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
938 		  (ticker_status == TICKER_STATUS_BUSY));
939 
940 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
941 	/* enable ticker job, irrespective of disabled in this function so
942 	 * first connection event can be scheduled as soon as possible.
943 	 */
944 	mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW, 1);
945 #endif
946 }
947 
948 void ull_central_ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
949 			  uint32_t remainder, uint16_t lazy, uint8_t force,
950 			  void *param)
951 {
952 	static memq_link_t link;
953 	static struct mayfly mfy = {0, 0, &link, NULL, lll_central_prepare};
954 	static struct lll_prepare_param p;
955 	struct ll_conn *conn;
956 	uint32_t err;
957 	uint8_t ref;
958 
959 	DEBUG_RADIO_PREPARE_M(1);
960 
961 	conn = param;
962 
963 	/* Check if stopping ticker (on disconnection, race with ticker expiry)
964 	 */
965 	if (unlikely(conn->lll.handle == 0xFFFF)) {
966 		DEBUG_RADIO_CLOSE_M(0);
967 		return;
968 	}
969 
970 #if defined(CONFIG_BT_CTLR_CONN_META)
971 	conn->common.is_must_expire = (lazy == TICKER_LAZY_MUST_EXPIRE);
972 #endif
973 	/* If this is a must-expire callback, LLCP state machine does not need
974 	 * to know. Will be called with lazy > 0 when scheduled in air.
975 	 */
976 	if (!IS_ENABLED(CONFIG_BT_CTLR_CONN_META) ||
977 	    (lazy != TICKER_LAZY_MUST_EXPIRE)) {
978 		int ret;
979 
980 		/* Handle any LL Control Procedures */
981 		ret = ull_conn_llcp(conn, ticks_at_expire, lazy);
982 		if (ret) {
983 			/* NOTE: Under BT_CTLR_LOW_LAT, ULL_LOW context is
984 			 *       disabled inside radio events, hence, abort any
985 			 *       active radio event which will re-enable
986 			 *       ULL_LOW context that permits ticker job to run.
987 			 */
988 			if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) &&
989 			    (CONFIG_BT_CTLR_LLL_PRIO ==
990 			     CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
991 				ll_radio_state_abort();
992 			}
993 
994 			DEBUG_RADIO_CLOSE_M(0);
995 			return;
996 		}
997 	}
998 
999 	/* Increment prepare reference count */
1000 	ref = ull_ref_inc(&conn->ull);
1001 	LL_ASSERT(ref);
1002 
1003 	/* De-mux 2 tx node from FIFO, sufficient to be able to set MD bit */
1004 	ull_conn_tx_demux(2);
1005 
1006 	/* Enqueue towards LLL */
1007 	ull_conn_tx_lll_enqueue(conn, 2);
1008 
1009 	/* Append timing parameters */
1010 	p.ticks_at_expire = ticks_at_expire;
1011 	p.remainder = remainder;
1012 	p.lazy = lazy;
1013 	p.force = force;
1014 	p.param = &conn->lll;
1015 	mfy.param = &p;
1016 
1017 	/* Kick LLL prepare */
1018 	err = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
1019 			     0, &mfy);
1020 	LL_ASSERT(!err);
1021 
1022 	/* De-mux remaining tx nodes from FIFO */
1023 	ull_conn_tx_demux(UINT8_MAX);
1024 
1025 	/* Enqueue towards LLL */
1026 	ull_conn_tx_lll_enqueue(conn, UINT8_MAX);
1027 
1028 	DEBUG_RADIO_PREPARE_M(1);
1029 }
1030 
1031 uint8_t ull_central_chm_update(void)
1032 {
1033 	uint16_t handle;
1034 
1035 	handle = CONFIG_BT_MAX_CONN;
1036 	while (handle--) {
1037 		struct ll_conn *conn;
1038 		uint8_t ret;
1039 
1040 		conn = ll_connected_get(handle);
1041 		if (!conn || conn->lll.role) {
1042 			continue;
1043 		}
1044 
1045 		ret = ull_conn_llcp_req(conn);
1046 		if (ret) {
1047 			return ret;
1048 		}
1049 
1050 		/* Fill Channel Map here, fill instant when enqueued to LLL */
1051 		ull_chan_map_get(conn->llcp.chan_map.chm);
1052 		conn->llcp.chan_map.initiate = 1U;
1053 
1054 		conn->llcp_type = LLCP_CHAN_MAP;
1055 		conn->llcp_req++;
1056 	}
1057 
1058 	return 0;
1059 }
1060 
1061 static void ticker_op_stop_scan_cb(uint32_t status, void *param)
1062 {
1063 	/* NOTE: Nothing to do here, present here to add debug code if required
1064 	 */
1065 }
1066 
1067 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_CTLR_PHY_CODED)
1068 static void ticker_op_stop_scan_other_cb(uint32_t status, void *param)
1069 {
1070 	static memq_link_t link;
1071 	static struct mayfly mfy = {0, 0, &link, NULL, NULL};
1072 	struct ll_scan_set *scan;
1073 	struct ull_hdr *hdr;
1074 
1075 	/* Ignore if race between thread and ULL */
1076 	if (status != TICKER_STATUS_SUCCESS) {
1077 		/* TODO: detect race */
1078 
1079 		return;
1080 	}
1081 
1082 	/* NOTE: We are in ULL_LOW which can be pre-empted by ULL_HIGH.
1083 	 *       As we are in the callback after successful stop of the
1084 	 *       ticker, the ULL reference count will not be modified
1085 	 *       further hence it is safe to check and act on either the need
1086 	 *       to call lll_disable or not.
1087 	 */
1088 	scan = param;
1089 	hdr = &scan->ull;
1090 	mfy.param = &scan->lll;
1091 	if (ull_ref_get(hdr)) {
1092 		uint32_t ret;
1093 
1094 		mfy.fp = lll_disable;
1095 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
1096 				     TICKER_USER_ID_LLL, 0, &mfy);
1097 		LL_ASSERT(!ret);
1098 	}
1099 }
1100 #endif /* CONFIG_BT_CTLR_ADV_EXT && CONFIG_BT_CTLR_PHY_CODED */
1101 
1102 static void ticker_op_cb(uint32_t status, void *param)
1103 {
1104 	ARG_UNUSED(param);
1105 
1106 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1107 }
1108 
1109 static inline void conn_release(struct ll_scan_set *scan)
1110 {
1111 	struct node_rx_pdu *cc;
1112 	struct lll_conn *lll;
1113 	struct ll_conn *conn;
1114 	memq_link_t *link;
1115 
1116 	lll = scan->lll.conn;
1117 	LL_ASSERT(!lll->link_tx_free);
1118 	link = memq_deinit(&lll->memq_tx.head, &lll->memq_tx.tail);
1119 	LL_ASSERT(link);
1120 	lll->link_tx_free = link;
1121 
1122 	conn = HDR_LLL2ULL(lll);
1123 
1124 	cc = (void *)&conn->llcp_terminate.node_rx;
1125 	link = cc->hdr.link;
1126 	LL_ASSERT(link);
1127 
1128 	ll_rx_link_release(link);
1129 
1130 	ll_conn_release(conn);
1131 	scan->lll.conn = NULL;
1132 }
1133