1 /*
2  * Copyright (c) 2018-2021 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stddef.h>
8 #include <zephyr.h>
9 #include <soc.h>
10 #include <device.h>
11 #include <bluetooth/bluetooth.h>
12 #include <sys/byteorder.h>
13 
14 #include "hal/cpu.h"
15 #include "hal/ecb.h"
16 #include "hal/ccm.h"
17 #include "hal/ticker.h"
18 
19 #include "util/util.h"
20 #include "util/mem.h"
21 #include "util/memq.h"
22 #include "util/mfifo.h"
23 #include "util/mayfly.h"
24 
25 #include "ticker/ticker.h"
26 
27 #include "pdu.h"
28 
29 #include "lll.h"
30 #include "lll_clock.h"
31 #include "lll/lll_df_types.h"
32 #include "lll_conn.h"
33 #include "lll_conn_iso.h"
34 
35 #include "ull_conn_types.h"
36 #include "ull_conn_iso_types.h"
37 #include "ull_internal.h"
38 #include "ull_sched_internal.h"
39 #include "ull_chan_internal.h"
40 #include "ull_conn_internal.h"
41 #include "ull_periph_internal.h"
42 #include "ull_central_internal.h"
43 
44 #include "ull_iso_internal.h"
45 #include "ull_conn_iso_internal.h"
46 #include "ull_peripheral_iso_internal.h"
47 
48 #if defined(CONFIG_BT_CTLR_USER_EXT)
49 #include "ull_vendor.h"
50 #endif /* CONFIG_BT_CTLR_USER_EXT */
51 
52 #include "ll.h"
53 #include "ll_feat.h"
54 #include "ll_settings.h"
55 
56 #define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_HCI_DRIVER)
57 #define LOG_MODULE_NAME bt_ctlr_ull_conn
58 #include "common/log.h"
59 #include "hal/debug.h"
60 
61 /**
62  *  User CPR Interval
63  */
64 #if !defined(CONFIG_BT_CTLR_USER_CPR_INTERVAL_MIN)
65 /* Bluetooth defined CPR Interval Minimum (7.5ms) */
66 #define CONN_INTERVAL_MIN(x) (6)
67 #else /* CONFIG_BT_CTLR_USER_CPR_INTERVAL_MIN */
68 /* Proprietary user defined CPR Interval Minimum */
69 extern uint16_t ull_conn_interval_min_get(struct ll_conn *conn);
70 #define CONN_INTERVAL_MIN(x) (MAX(ull_conn_interval_min_get(x), 1))
71 #endif /* CONFIG_BT_CTLR_USER_CPR_INTERVAL_MIN */
72 
73 static int init_reset(void);
74 static void tx_demux(void *param);
75 static struct node_tx *tx_ull_dequeue(struct ll_conn *conn, struct node_tx *tx);
76 
77 static void ticker_update_conn_op_cb(uint32_t status, void *param);
78 static void ticker_stop_conn_op_cb(uint32_t status, void *param);
79 static void ticker_start_conn_op_cb(uint32_t status, void *param);
80 
81 static void conn_setup_adv_scan_disabled_cb(void *param);
82 static inline void disable(uint16_t handle);
83 static void conn_cleanup(struct ll_conn *conn, uint8_t reason);
84 static void conn_cleanup_finalize(struct ll_conn *conn);
85 static void tx_ull_flush(struct ll_conn *conn);
86 static void ticker_stop_op_cb(uint32_t status, void *param);
87 static void conn_disable(void *param);
88 static void disabled_cb(void *param);
89 static void tx_lll_flush(void *param);
90 
91 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
92 static int empty_data_start_release(struct ll_conn *conn, struct node_tx *tx);
93 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
94 
95 static inline void ctrl_tx_enqueue(struct ll_conn *conn, struct node_tx *tx);
96 static inline void event_fex_prep(struct ll_conn *conn);
97 static inline void event_vex_prep(struct ll_conn *conn);
98 static inline int event_conn_upd_prep(struct ll_conn *conn, uint16_t lazy,
99 				      uint32_t ticks_at_expire);
100 static inline void event_ch_map_prep(struct ll_conn *conn,
101 				     uint16_t event_counter);
102 
103 #if defined(CONFIG_BT_CTLR_LE_ENC)
104 static inline void ctrl_tx_check_and_resume(struct ll_conn *conn);
105 static bool is_enc_req_pause_tx(struct ll_conn *conn);
106 static inline void event_enc_prep(struct ll_conn *conn);
107 #if defined(CONFIG_BT_PERIPHERAL)
108 static int enc_rsp_send(struct ll_conn *conn);
109 #endif /* CONFIG_BT_PERIPHERAL */
110 static int start_enc_rsp_send(struct ll_conn *conn,
111 			      struct pdu_data *pdu_ctrl_tx);
112 static inline bool ctrl_is_unexpected(struct ll_conn *conn, uint8_t opcode);
113 #endif /* CONFIG_BT_CTLR_LE_ENC */
114 
115 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
116 /* NOTE: cpr_active_* functions are inline as they are simple assignment to
117  *       global variable, and if in future get called from more than two caller
118  *       functions, we dont want the caller function branching into these which
119  *       can add to CPU use inside ULL ISR.
120  */
121 static inline void cpr_active_check_and_reset(struct ll_conn *conn);
122 static inline void cpr_active_reset(void);
123 
124 static inline void event_conn_param_prep(struct ll_conn *conn,
125 					 uint16_t event_counter,
126 					 uint32_t ticks_at_expire);
127 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
128 
129 #if defined(CONFIG_BT_CTLR_LE_PING)
130 static inline void event_ping_prep(struct ll_conn *conn);
131 #endif /* CONFIG_BT_CTLR_LE_PING */
132 
133 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
134 static inline void event_len_prep(struct ll_conn *conn);
135 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
136 
137 #if defined(CONFIG_BT_CTLR_PHY)
138 static inline void event_phy_req_prep(struct ll_conn *conn);
139 static inline void event_phy_upd_ind_prep(struct ll_conn *conn,
140 					  uint16_t event_counter);
141 #endif /* CONFIG_BT_CTLR_PHY */
142 
143 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
144 static inline void event_send_cis_rsp(struct ll_conn *conn);
145 static inline void event_peripheral_iso_prep(struct ll_conn *conn,
146 					     uint16_t event_counter,
147 					     uint32_t ticks_at_expire);
148 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
149 
150 static inline void ctrl_tx_pre_ack(struct ll_conn *conn,
151 				   struct pdu_data *pdu_tx);
152 static inline void ctrl_tx_ack(struct ll_conn *conn, struct node_tx **tx,
153 			       struct pdu_data *pdu_tx);
154 static inline int ctrl_rx(memq_link_t *link, struct node_rx_pdu **rx,
155 			  struct pdu_data *pdu_rx, struct ll_conn *conn);
156 
157 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
158 static uint8_t force_md_cnt_calc(struct lll_conn *lll_conn, uint32_t tx_rate);
159 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
160 
161 #if !defined(BT_CTLR_USER_TX_BUFFER_OVERHEAD)
162 #define BT_CTLR_USER_TX_BUFFER_OVERHEAD 0
163 #endif /* BT_CTLR_USER_TX_BUFFER_OVERHEAD */
164 
165 #define CONN_TX_BUF_SIZE MROUND(offsetof(struct node_tx, pdu) + \
166 				offsetof(struct pdu_data, lldata) + \
167 				(CONFIG_BT_BUF_ACL_TX_SIZE + \
168 				BT_CTLR_USER_TX_BUFFER_OVERHEAD))
169 
170 /**
171  * One connection may take up to 4 TX buffers for procedures
172  * simultaneously, for example 2 for encryption, 1 for termination,
173  * and 1 one that is in flight and has not been returned to the pool
174  */
175 #define CONN_TX_CTRL_BUFFERS (4 * CONFIG_BT_CTLR_LLCP_CONN)
176 #define CONN_TX_CTRL_BUF_SIZE MROUND(offsetof(struct node_tx, pdu) + \
177 				     offsetof(struct pdu_data, llctrl) + \
178 				     sizeof(struct pdu_data_llctrl))
179 
180 /* Terminate procedure state values */
181 #define TERM_REQ   1
182 #define TERM_ACKED 3
183 
184 static MFIFO_DEFINE(conn_tx, sizeof(struct lll_tx), CONFIG_BT_BUF_ACL_TX_COUNT);
185 static MFIFO_DEFINE(conn_ack, sizeof(struct lll_tx),
186 		    (CONFIG_BT_BUF_ACL_TX_COUNT + CONN_TX_CTRL_BUFFERS));
187 
188 static struct {
189 	void *free;
190 	uint8_t pool[CONN_TX_BUF_SIZE * CONFIG_BT_BUF_ACL_TX_COUNT];
191 } mem_conn_tx;
192 
193 static struct {
194 	void *free;
195 	uint8_t pool[CONN_TX_CTRL_BUF_SIZE * CONN_TX_CTRL_BUFFERS];
196 } mem_conn_tx_ctrl;
197 
198 static struct {
199 	void *free;
200 	uint8_t pool[sizeof(memq_link_t) *
201 		  (CONFIG_BT_BUF_ACL_TX_COUNT + CONN_TX_CTRL_BUFFERS)];
202 } mem_link_tx;
203 
204 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
205 static uint16_t default_tx_octets;
206 static uint16_t default_tx_time;
207 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
208 
209 #if defined(CONFIG_BT_CTLR_PHY)
210 static uint8_t default_phy_tx;
211 static uint8_t default_phy_rx;
212 #endif /* CONFIG_BT_CTLR_PHY */
213 
214 static struct ll_conn conn_pool[CONFIG_BT_MAX_CONN];
215 static void *conn_free;
216 
ll_conn_acquire(void)217 struct ll_conn *ll_conn_acquire(void)
218 {
219 	return mem_acquire(&conn_free);
220 }
221 
ll_conn_release(struct ll_conn * conn)222 void ll_conn_release(struct ll_conn *conn)
223 {
224 	mem_release(conn, &conn_free);
225 }
226 
ll_conn_handle_get(struct ll_conn * conn)227 uint16_t ll_conn_handle_get(struct ll_conn *conn)
228 {
229 	return mem_index_get(conn, conn_pool, sizeof(struct ll_conn));
230 }
231 
ll_conn_get(uint16_t handle)232 struct ll_conn *ll_conn_get(uint16_t handle)
233 {
234 	return mem_get(conn_pool, sizeof(struct ll_conn), handle);
235 }
236 
ll_connected_get(uint16_t handle)237 struct ll_conn *ll_connected_get(uint16_t handle)
238 {
239 	struct ll_conn *conn;
240 
241 	if (handle >= CONFIG_BT_MAX_CONN) {
242 		return NULL;
243 	}
244 
245 	conn = ll_conn_get(handle);
246 	if (conn->lll.handle != handle) {
247 		return NULL;
248 	}
249 
250 	return conn;
251 }
252 
ll_conn_free_count_get(void)253 uint16_t ll_conn_free_count_get(void)
254 {
255 	return mem_free_count_get(conn_free);
256 }
257 
ll_tx_mem_acquire(void)258 void *ll_tx_mem_acquire(void)
259 {
260 	return mem_acquire(&mem_conn_tx.free);
261 }
262 
ll_tx_mem_release(void * tx)263 void ll_tx_mem_release(void *tx)
264 {
265 	mem_release(tx, &mem_conn_tx.free);
266 }
267 
ll_tx_mem_enqueue(uint16_t handle,void * tx)268 int ll_tx_mem_enqueue(uint16_t handle, void *tx)
269 {
270 #if defined(CONFIG_BT_CTLR_THROUGHPUT)
271 #define BT_CTLR_THROUGHPUT_PERIOD 1000000000UL
272 	static uint32_t tx_rate;
273 	static uint32_t tx_cnt;
274 #endif /* CONFIG_BT_CTLR_THROUGHPUT */
275 	struct lll_tx *lll_tx;
276 	struct ll_conn *conn;
277 	uint8_t idx;
278 
279 	conn = ll_connected_get(handle);
280 	if (!conn) {
281 		return -EINVAL;
282 	}
283 
284 	idx = MFIFO_ENQUEUE_GET(conn_tx, (void **) &lll_tx);
285 	if (!lll_tx) {
286 		return -ENOBUFS;
287 	}
288 
289 	lll_tx->handle = handle;
290 	lll_tx->node = tx;
291 
292 	MFIFO_ENQUEUE(conn_tx, idx);
293 
294 	if (ull_ref_get(&conn->ull)) {
295 		static memq_link_t link;
296 		static struct mayfly mfy = {0, 0, &link, NULL, tx_demux};
297 
298 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
299 		if (tx_cnt >= CONFIG_BT_BUF_ACL_TX_COUNT) {
300 			uint8_t previous, force_md_cnt;
301 
302 			force_md_cnt = force_md_cnt_calc(&conn->lll, tx_rate);
303 			previous = lll_conn_force_md_cnt_set(force_md_cnt);
304 			if (previous != force_md_cnt) {
305 				BT_INFO("force_md_cnt: old= %u, new= %u.",
306 					previous, force_md_cnt);
307 			}
308 		}
309 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
310 
311 		mfy.param = conn;
312 
313 		mayfly_enqueue(TICKER_USER_ID_THREAD, TICKER_USER_ID_ULL_HIGH,
314 			       0, &mfy);
315 
316 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
317 	} else {
318 		lll_conn_force_md_cnt_set(0U);
319 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
320 	}
321 
322 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
323 		ull_periph_latency_cancel(conn, handle);
324 	}
325 
326 #if defined(CONFIG_BT_CTLR_THROUGHPUT)
327 	static uint32_t last_cycle_stamp;
328 	static uint32_t tx_len;
329 	struct pdu_data *pdu;
330 	uint32_t cycle_stamp;
331 	uint64_t delta;
332 
333 	cycle_stamp = k_cycle_get_32();
334 	delta = k_cyc_to_ns_floor64(cycle_stamp - last_cycle_stamp);
335 	if (delta > BT_CTLR_THROUGHPUT_PERIOD) {
336 		BT_INFO("incoming Tx: count= %u, len= %u, rate= %u bps.",
337 			tx_cnt, tx_len, tx_rate);
338 
339 		last_cycle_stamp = cycle_stamp;
340 		tx_cnt = 0U;
341 		tx_len = 0U;
342 	}
343 
344 	pdu = (void *)((struct node_tx *)tx)->pdu;
345 	tx_len += pdu->len;
346 	tx_rate = ((uint64_t)tx_len << 3) * BT_CTLR_THROUGHPUT_PERIOD / delta;
347 	tx_cnt++;
348 #endif /* CONFIG_BT_CTLR_THROUGHPUT */
349 
350 	return 0;
351 }
352 
ll_conn_update(uint16_t handle,uint8_t cmd,uint8_t status,uint16_t interval_min,uint16_t interval_max,uint16_t latency,uint16_t timeout)353 uint8_t ll_conn_update(uint16_t handle, uint8_t cmd, uint8_t status, uint16_t interval_min,
354 		    uint16_t interval_max, uint16_t latency, uint16_t timeout)
355 {
356 	struct ll_conn *conn;
357 
358 	conn = ll_connected_get(handle);
359 	if (!conn) {
360 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
361 	}
362 
363 	if (!cmd) {
364 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
365 		if (!conn->llcp_conn_param.disabled &&
366 		    (!conn->common.fex_valid ||
367 		     (conn->llcp_feature.features_conn &
368 		      BIT64(BT_LE_FEAT_BIT_CONN_PARAM_REQ)))) {
369 			cmd++;
370 		} else if (conn->lll.role) {
371 			return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
372 		}
373 #else /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
374 		if (conn->lll.role) {
375 			return BT_HCI_ERR_CMD_DISALLOWED;
376 		}
377 #endif /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
378 	}
379 
380 	if (!cmd) {
381 		if (conn->llcp_cu.req != conn->llcp_cu.ack) {
382 			return BT_HCI_ERR_CMD_DISALLOWED;
383 		}
384 
385 		conn->llcp_cu.win_size = 1U;
386 		conn->llcp_cu.win_offset_us = 0U;
387 		conn->llcp_cu.interval = interval_max;
388 		conn->llcp_cu.latency = latency;
389 		conn->llcp_cu.timeout = timeout;
390 		conn->llcp_cu.state = LLCP_CUI_STATE_USE;
391 		conn->llcp_cu.cmd = 1U;
392 
393 		conn->llcp_cu.req++;
394 	} else {
395 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
396 		cmd--;
397 
398 		if (cmd) {
399 			if ((conn->llcp_conn_param.req ==
400 			     conn->llcp_conn_param.ack) ||
401 			    (conn->llcp_conn_param.state !=
402 			     LLCP_CPR_STATE_APP_WAIT)) {
403 				return BT_HCI_ERR_CMD_DISALLOWED;
404 			}
405 
406 			conn->llcp_conn_param.status = status;
407 			conn->llcp_conn_param.state = cmd;
408 			conn->llcp_conn_param.cmd = 1U;
409 		} else {
410 			if (conn->llcp_conn_param.req !=
411 			    conn->llcp_conn_param.ack) {
412 				return BT_HCI_ERR_CMD_DISALLOWED;
413 			}
414 
415 			conn->llcp_conn_param.status = 0U;
416 			conn->llcp_conn_param.interval_min = interval_min;
417 			conn->llcp_conn_param.interval_max = interval_max;
418 			conn->llcp_conn_param.latency = latency;
419 			conn->llcp_conn_param.timeout = timeout;
420 			conn->llcp_conn_param.state = cmd;
421 			conn->llcp_conn_param.cmd = 1U;
422 			conn->llcp_conn_param.req++;
423 
424 			if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
425 			    conn->lll.role) {
426 				ull_periph_latency_cancel(conn, handle);
427 			}
428 		}
429 
430 #else /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
431 		/* CPR feature not supported */
432 		return BT_HCI_ERR_CMD_DISALLOWED;
433 #endif /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
434 	}
435 
436 	return 0;
437 }
438 
ll_chm_get(uint16_t handle,uint8_t * chm)439 uint8_t ll_chm_get(uint16_t handle, uint8_t *chm)
440 {
441 	struct ll_conn *conn;
442 
443 	conn = ll_connected_get(handle);
444 	if (!conn) {
445 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
446 	}
447 
448 	/* Iterate until we are sure the ISR did not modify the value while
449 	 * we were reading it from memory.
450 	 */
451 	do {
452 		conn->chm_updated = 0U;
453 		memcpy(chm, conn->lll.data_chan_map,
454 		       sizeof(conn->lll.data_chan_map));
455 	} while (conn->chm_updated);
456 
457 	return 0;
458 }
459 
is_valid_disconnect_reason(uint8_t reason)460 static bool is_valid_disconnect_reason(uint8_t reason)
461 {
462 	switch (reason) {
463 	case BT_HCI_ERR_AUTH_FAIL:
464 	case BT_HCI_ERR_REMOTE_USER_TERM_CONN:
465 	case BT_HCI_ERR_REMOTE_LOW_RESOURCES:
466 	case BT_HCI_ERR_REMOTE_POWER_OFF:
467 	case BT_HCI_ERR_UNSUPP_REMOTE_FEATURE:
468 	case BT_HCI_ERR_PAIRING_NOT_SUPPORTED:
469 	case BT_HCI_ERR_UNACCEPT_CONN_PARAM:
470 		return true;
471 	default:
472 		return false;
473 	}
474 }
475 
ll_terminate_ind_send(uint16_t handle,uint8_t reason)476 uint8_t ll_terminate_ind_send(uint16_t handle, uint8_t reason)
477 {
478 	struct ll_conn *conn;
479 
480 	conn = ll_connected_get(handle);
481 	if (!conn) {
482 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
483 	}
484 
485 	if (conn->llcp_terminate.req != conn->llcp_terminate.ack) {
486 		return BT_HCI_ERR_CMD_DISALLOWED;
487 	}
488 
489 	if (!is_valid_disconnect_reason(reason)) {
490 		return BT_HCI_ERR_INVALID_PARAM;
491 	}
492 
493 	conn->llcp_terminate.reason_own = reason;
494 	conn->llcp_terminate.req++; /* (req - ack) == 1, TERM_REQ */
495 
496 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
497 		ull_periph_latency_cancel(conn, handle);
498 	}
499 
500 	return 0;
501 }
502 
ll_feature_req_send(uint16_t handle)503 uint8_t ll_feature_req_send(uint16_t handle)
504 {
505 	struct ll_conn *conn;
506 
507 	conn = ll_connected_get(handle);
508 	if (!conn) {
509 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
510 	}
511 
512 	if (conn->llcp_feature.req != conn->llcp_feature.ack) {
513 		return BT_HCI_ERR_CMD_DISALLOWED;
514 	}
515 
516 	conn->llcp_feature.req++;
517 
518 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
519 	    IS_ENABLED(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG) &&
520 	    conn->lll.role) {
521 		ull_periph_latency_cancel(conn, handle);
522 	}
523 
524 	return 0;
525 }
526 
ll_version_ind_send(uint16_t handle)527 uint8_t ll_version_ind_send(uint16_t handle)
528 {
529 	struct ll_conn *conn;
530 
531 	conn = ll_connected_get(handle);
532 	if (!conn) {
533 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
534 	}
535 
536 	if (conn->llcp_version.req != conn->llcp_version.ack) {
537 		return BT_HCI_ERR_CMD_DISALLOWED;
538 	}
539 
540 	conn->llcp_version.req++;
541 
542 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
543 		ull_periph_latency_cancel(conn, handle);
544 	}
545 
546 	return 0;
547 }
548 
549 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
ll_length_req_send(uint16_t handle,uint16_t tx_octets,uint16_t tx_time)550 uint32_t ll_length_req_send(uint16_t handle, uint16_t tx_octets,
551 			    uint16_t tx_time)
552 {
553 	struct ll_conn *conn;
554 
555 #if defined(CONFIG_BT_CTLR_PARAM_CHECK)
556 #if defined(CONFIG_BT_CTLR_PHY_CODED)
557 	uint16_t tx_time_max =
558 			PDU_DC_MAX_US(CONFIG_BT_BUF_ACL_TX_SIZE, PHY_CODED);
559 #else /* !CONFIG_BT_CTLR_PHY_CODED */
560 	uint16_t tx_time_max =
561 			PDU_DC_MAX_US(CONFIG_BT_BUF_ACL_TX_SIZE, PHY_1M);
562 #endif /* !CONFIG_BT_CTLR_PHY_CODED */
563 
564 	if ((tx_octets > CONFIG_BT_BUF_ACL_TX_SIZE) ||
565 	    (tx_time > tx_time_max)) {
566 		return BT_HCI_ERR_INVALID_PARAM;
567 	}
568 #endif /* CONFIG_BT_CTLR_PARAM_CHECK */
569 
570 	conn = ll_connected_get(handle);
571 	if (!conn) {
572 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
573 	}
574 
575 	if (conn->llcp_length.disabled ||
576 	    (conn->common.fex_valid &&
577 	     !(conn->llcp_feature.features_conn & BIT64(BT_LE_FEAT_BIT_DLE)))) {
578 		return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
579 	}
580 
581 	if (conn->llcp_length.req != conn->llcp_length.ack) {
582 		switch (conn->llcp_length.state) {
583 		case LLCP_LENGTH_STATE_RSP_ACK_WAIT:
584 		case LLCP_LENGTH_STATE_RESIZE_RSP:
585 		case LLCP_LENGTH_STATE_RESIZE_RSP_ACK_WAIT:
586 			/* cached until peer procedure completes */
587 			if (!conn->llcp_length.cache.tx_octets) {
588 				conn->llcp_length.cache.tx_octets = tx_octets;
589 #if defined(CONFIG_BT_CTLR_PHY)
590 				conn->llcp_length.cache.tx_time = tx_time;
591 #endif /* CONFIG_BT_CTLR_PHY */
592 				return 0;
593 			}
594 			__fallthrough;
595 		default:
596 			return BT_HCI_ERR_CMD_DISALLOWED;
597 		}
598 	}
599 
600 	/* TODO: parameter check tx_octets and tx_time */
601 
602 	conn->llcp_length.state = LLCP_LENGTH_STATE_REQ;
603 	conn->llcp_length.tx_octets = tx_octets;
604 
605 #if defined(CONFIG_BT_CTLR_PHY)
606 	conn->llcp_length.tx_time = tx_time;
607 #endif /* CONFIG_BT_CTLR_PHY */
608 
609 	conn->llcp_length.req++;
610 
611 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
612 		ull_periph_latency_cancel(conn, handle);
613 	}
614 
615 	return 0;
616 }
617 
ll_length_default_get(uint16_t * max_tx_octets,uint16_t * max_tx_time)618 void ll_length_default_get(uint16_t *max_tx_octets, uint16_t *max_tx_time)
619 {
620 	*max_tx_octets = default_tx_octets;
621 	*max_tx_time = default_tx_time;
622 }
623 
ll_length_default_set(uint16_t max_tx_octets,uint16_t max_tx_time)624 uint32_t ll_length_default_set(uint16_t max_tx_octets, uint16_t max_tx_time)
625 {
626 	/* TODO: parameter check (for BT 5.0 compliance) */
627 
628 	default_tx_octets = max_tx_octets;
629 	default_tx_time = max_tx_time;
630 
631 	return 0;
632 }
633 
ll_length_max_get(uint16_t * max_tx_octets,uint16_t * max_tx_time,uint16_t * max_rx_octets,uint16_t * max_rx_time)634 void ll_length_max_get(uint16_t *max_tx_octets, uint16_t *max_tx_time,
635 		       uint16_t *max_rx_octets, uint16_t *max_rx_time)
636 {
637 	*max_tx_octets = LL_LENGTH_OCTETS_RX_MAX;
638 	*max_rx_octets = LL_LENGTH_OCTETS_RX_MAX;
639 #if defined(CONFIG_BT_CTLR_PHY)
640 	*max_tx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, PHY_CODED);
641 	*max_rx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, PHY_CODED);
642 #else /* !CONFIG_BT_CTLR_PHY */
643 	/* Default is 1M packet timing */
644 	*max_tx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, PHY_1M);
645 	*max_rx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, PHY_1M);
646 #endif /* !CONFIG_BT_CTLR_PHY */
647 }
648 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
649 
650 #if defined(CONFIG_BT_CTLR_PHY)
ll_phy_get(uint16_t handle,uint8_t * tx,uint8_t * rx)651 uint8_t ll_phy_get(uint16_t handle, uint8_t *tx, uint8_t *rx)
652 {
653 	struct ll_conn *conn;
654 
655 	conn = ll_connected_get(handle);
656 	if (!conn) {
657 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
658 	}
659 
660 	/* TODO: context safe read */
661 	*tx = conn->lll.phy_tx;
662 	*rx = conn->lll.phy_rx;
663 
664 	return 0;
665 }
666 
ll_phy_default_set(uint8_t tx,uint8_t rx)667 uint8_t ll_phy_default_set(uint8_t tx, uint8_t rx)
668 {
669 	/* TODO: validate against supported phy */
670 
671 	default_phy_tx = tx;
672 	default_phy_rx = rx;
673 
674 	return 0;
675 }
676 
ll_phy_req_send(uint16_t handle,uint8_t tx,uint8_t flags,uint8_t rx)677 uint8_t ll_phy_req_send(uint16_t handle, uint8_t tx, uint8_t flags, uint8_t rx)
678 {
679 	struct ll_conn *conn;
680 
681 	conn = ll_connected_get(handle);
682 	if (!conn) {
683 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
684 	}
685 
686 	if (conn->llcp_phy.disabled ||
687 	    (conn->common.fex_valid &&
688 	     !(conn->llcp_feature.features_conn & BIT64(BT_LE_FEAT_BIT_PHY_2M)) &&
689 	     !(conn->llcp_feature.features_conn &
690 	       BIT64(BT_LE_FEAT_BIT_PHY_CODED)))) {
691 		return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
692 	}
693 
694 	if (conn->llcp_phy.req != conn->llcp_phy.ack) {
695 		return BT_HCI_ERR_CMD_DISALLOWED;
696 	}
697 
698 	conn->llcp_phy.state = LLCP_PHY_STATE_REQ;
699 	conn->llcp_phy.cmd = 1U;
700 	conn->llcp_phy.tx = tx;
701 	conn->llcp_phy.flags = flags;
702 	conn->llcp_phy.rx = rx;
703 	conn->llcp_phy.req++;
704 
705 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn->lll.role) {
706 		ull_periph_latency_cancel(conn, handle);
707 	}
708 
709 	return 0;
710 }
711 #endif /* CONFIG_BT_CTLR_PHY */
712 
713 #if defined(CONFIG_BT_CTLR_CONN_RSSI)
ll_rssi_get(uint16_t handle,uint8_t * rssi)714 uint8_t ll_rssi_get(uint16_t handle, uint8_t *rssi)
715 {
716 	struct ll_conn *conn;
717 
718 	conn = ll_connected_get(handle);
719 	if (!conn) {
720 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
721 	}
722 
723 	*rssi = conn->lll.rssi_latest;
724 
725 	return 0;
726 }
727 #endif /* CONFIG_BT_CTLR_CONN_RSSI */
728 
729 #if defined(CONFIG_BT_CTLR_LE_PING)
ll_apto_get(uint16_t handle,uint16_t * apto)730 uint8_t ll_apto_get(uint16_t handle, uint16_t *apto)
731 {
732 	struct ll_conn *conn;
733 
734 	conn = ll_connected_get(handle);
735 	if (!conn) {
736 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
737 	}
738 
739 	*apto = conn->apto_reload * conn->lll.interval * 125U / 1000;
740 
741 	return 0;
742 }
743 
ll_apto_set(uint16_t handle,uint16_t apto)744 uint8_t ll_apto_set(uint16_t handle, uint16_t apto)
745 {
746 	struct ll_conn *conn;
747 
748 	conn = ll_connected_get(handle);
749 	if (!conn) {
750 		return BT_HCI_ERR_UNKNOWN_CONN_ID;
751 	}
752 
753 	conn->apto_reload = RADIO_CONN_EVENTS(apto * 10U * 1000U,
754 					      conn->lll.interval *
755 					      CONN_INT_UNIT_US);
756 
757 	return 0;
758 }
759 #endif /* CONFIG_BT_CTLR_LE_PING */
760 
ull_conn_init(void)761 int ull_conn_init(void)
762 {
763 	int err;
764 
765 	err = init_reset();
766 	if (err) {
767 		return err;
768 	}
769 
770 	return 0;
771 }
772 
ull_conn_reset(void)773 int ull_conn_reset(void)
774 {
775 	uint16_t handle;
776 	int err;
777 
778 #if defined(CONFIG_BT_CENTRAL)
779 	/* Reset initiator */
780 	(void)ull_central_reset();
781 #endif /* CONFIG_BT_CENTRAL */
782 
783 	for (handle = 0U; handle < CONFIG_BT_MAX_CONN; handle++) {
784 		disable(handle);
785 	}
786 
787 	/* Re-initialize the Tx mfifo */
788 	MFIFO_INIT(conn_tx);
789 
790 	/* Re-initialize the Tx Ack mfifo */
791 	MFIFO_INIT(conn_ack);
792 
793 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
794 	/* Reset CPR mutex */
795 	cpr_active_reset();
796 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
797 
798 	err = init_reset();
799 	if (err) {
800 		return err;
801 	}
802 
803 	return 0;
804 }
805 
806 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
ull_conn_default_tx_octets_get(void)807 uint16_t ull_conn_default_tx_octets_get(void)
808 {
809 	return default_tx_octets;
810 }
811 
812 #if defined(CONFIG_BT_CTLR_PHY)
ull_conn_default_tx_time_get(void)813 uint16_t ull_conn_default_tx_time_get(void)
814 {
815 	return default_tx_time;
816 }
817 #endif /* CONFIG_BT_CTLR_PHY */
818 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
819 
820 #if defined(CONFIG_BT_CTLR_PHY)
ull_conn_default_phy_tx_get(void)821 uint8_t ull_conn_default_phy_tx_get(void)
822 {
823 	return default_phy_tx;
824 }
825 
ull_conn_default_phy_rx_get(void)826 uint8_t ull_conn_default_phy_rx_get(void)
827 {
828 	return default_phy_rx;
829 }
830 #endif /* CONFIG_BT_CTLR_PHY */
831 
832 #if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN)
ull_conn_peer_connected(uint8_t const own_id_addr_type,uint8_t const * const own_id_addr,uint8_t const peer_id_addr_type,uint8_t const * const peer_id_addr)833 bool ull_conn_peer_connected(uint8_t const own_id_addr_type,
834 			     uint8_t const *const own_id_addr,
835 			     uint8_t const peer_id_addr_type,
836 			     uint8_t const *const peer_id_addr)
837 {
838 	uint16_t handle;
839 
840 	for (handle = 0U; handle < CONFIG_BT_MAX_CONN; handle++) {
841 		struct ll_conn *conn = ll_connected_get(handle);
842 
843 		if (conn &&
844 		    conn->peer_id_addr_type == peer_id_addr_type &&
845 		    !memcmp(conn->peer_id_addr, peer_id_addr, BDADDR_SIZE) &&
846 		    conn->own_id_addr_type == own_id_addr_type &&
847 		    !memcmp(conn->own_id_addr, own_id_addr, BDADDR_SIZE)) {
848 			return true;
849 		}
850 	}
851 
852 	return false;
853 }
854 #endif /* CONFIG_BT_CTLR_CHECK_SAME_PEER_CONN */
855 
ull_conn_setup(memq_link_t * rx_link,struct node_rx_hdr * rx)856 void ull_conn_setup(memq_link_t *rx_link, struct node_rx_hdr *rx)
857 {
858 	struct node_rx_ftr *ftr;
859 	struct lll_conn *lll;
860 	struct ull_hdr *hdr;
861 
862 	/* Store the link in the node rx so that when done event is
863 	 * processed it can be used to enqueue node rx towards LL context
864 	 */
865 	rx->link = rx_link;
866 
867 	/* NOTE: LLL conn context SHALL be after lll_hdr in
868 	 *       struct lll_adv and struct lll_scan.
869 	 */
870 	ftr = &(rx->rx_ftr);
871 	lll = *((struct lll_conn **)((uint8_t *)ftr->param +
872 				     sizeof(struct lll_hdr)));
873 
874 	/* Check for reference count and decide to setup connection
875 	 * here or when done event arrives.
876 	 */
877 	hdr = HDR_LLL2ULL(ftr->param);
878 	if (ull_ref_get(hdr)) {
879 		/* Setup connection in ULL disabled callback,
880 		 * pass the node rx as disabled callback parameter.
881 		 */
882 		LL_ASSERT(!hdr->disabled_cb);
883 		hdr->disabled_param = rx;
884 		hdr->disabled_cb = conn_setup_adv_scan_disabled_cb;
885 	} else {
886 		conn_setup_adv_scan_disabled_cb(rx);
887 	}
888 }
889 
ull_conn_rx(memq_link_t * link,struct node_rx_pdu ** rx)890 int ull_conn_rx(memq_link_t *link, struct node_rx_pdu **rx)
891 {
892 	struct pdu_data *pdu_rx;
893 	struct ll_conn *conn;
894 
895 	conn = ll_connected_get((*rx)->hdr.handle);
896 	if (!conn) {
897 		/* Mark for buffer for release */
898 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
899 
900 		return 0;
901 	}
902 
903 	pdu_rx = (void *)(*rx)->pdu;
904 
905 	switch (pdu_rx->ll_id) {
906 	case PDU_DATA_LLID_CTRL:
907 	{
908 		int nack;
909 
910 		nack = ctrl_rx(link, rx, pdu_rx, conn);
911 		return nack;
912 	}
913 
914 	case PDU_DATA_LLID_DATA_CONTINUE:
915 	case PDU_DATA_LLID_DATA_START:
916 #if defined(CONFIG_BT_CTLR_LE_ENC)
917 		if (conn->llcp_enc.pause_rx) {
918 			conn->llcp_terminate.reason_final =
919 				BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
920 
921 			/* Mark for buffer for release */
922 			(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
923 		}
924 #endif /* CONFIG_BT_CTLR_LE_ENC */
925 		break;
926 
927 	case PDU_DATA_LLID_RESV:
928 	default:
929 #if defined(CONFIG_BT_CTLR_LE_ENC)
930 		if (conn->llcp_enc.pause_rx) {
931 			conn->llcp_terminate.reason_final =
932 				BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
933 		}
934 #endif /* CONFIG_BT_CTLR_LE_ENC */
935 
936 		/* Invalid LL id, drop it. */
937 
938 		/* Mark for buffer for release */
939 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
940 
941 		break;
942 	}
943 
944 
945 	return 0;
946 }
947 
ull_conn_llcp(struct ll_conn * conn,uint32_t ticks_at_expire,uint16_t lazy)948 int ull_conn_llcp(struct ll_conn *conn, uint32_t ticks_at_expire, uint16_t lazy)
949 {
950 	/* Check if no other procedure with instant is requested and not in
951 	 * Encryption setup.
952 	 */
953 	if ((conn->llcp_ack == conn->llcp_req) &&
954 #if defined(CONFIG_BT_CTLR_LE_ENC)
955 #if defined(CONFIG_BT_PERIPHERAL)
956 	    (!conn->lll.role || (conn->periph.llcp_type == LLCP_NONE)) &&
957 #endif /* CONFIG_BT_PERIPHERAL */
958 	    !conn->llcp_enc.pause_rx) {
959 #else /* !CONFIG_BT_CTLR_LE_ENC */
960 	    1) {
961 #endif /* !CONFIG_BT_CTLR_LE_ENC */
962 
963 		/* TODO: Optimize the checks below, maybe have common flag */
964 
965 		/* check if connection update procedure is requested */
966 		if (conn->llcp_cu.ack != conn->llcp_cu.req) {
967 			/* switch to LLCP_CONN_UPD state machine */
968 			conn->llcp_type = LLCP_CONN_UPD;
969 			conn->llcp_ack -= 2U;
970 
971 		/* check if feature exchange procedure is requested */
972 		} else if (conn->llcp_feature.ack != conn->llcp_feature.req) {
973 			/* handle feature exchange state machine */
974 			event_fex_prep(conn);
975 
976 		/* check if version info procedure is requested */
977 		} else if (conn->llcp_version.ack != conn->llcp_version.req) {
978 			/* handle version info state machine */
979 			event_vex_prep(conn);
980 
981 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
982 		/* check if CPR procedure is requested */
983 		} else if (conn->llcp_conn_param.ack !=
984 			   conn->llcp_conn_param.req) {
985 			struct lll_conn *lll = &conn->lll;
986 			uint16_t event_counter;
987 
988 			/* Calculate current event counter */
989 			event_counter = lll->event_counter +
990 					lll->latency_prepare + lazy;
991 
992 			/* handle CPR state machine */
993 			event_conn_param_prep(conn, event_counter,
994 					      ticks_at_expire);
995 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
996 
997 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
998 		/* check if DLE procedure is requested */
999 		} else if (conn->llcp_length.ack != conn->llcp_length.req) {
1000 			/* handle DLU state machine */
1001 			event_len_prep(conn);
1002 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1003 
1004 #if defined(CONFIG_BT_CTLR_PHY)
1005 		/* check if PHY Req procedure is requested */
1006 		} else if (conn->llcp_phy.ack != conn->llcp_phy.req) {
1007 			/* handle PHY Upd state machine */
1008 			event_phy_req_prep(conn);
1009 #endif /* CONFIG_BT_CTLR_PHY */
1010 
1011 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1012 		} else if (conn->llcp_cis.req != conn->llcp_cis.ack) {
1013 			if (conn->llcp_cis.state == LLCP_CIS_STATE_RSP_WAIT) {
1014 				/* Handle CIS response */
1015 				event_send_cis_rsp(conn);
1016 			} else if (conn->llcp_cis.state ==
1017 						LLCP_CIS_STATE_INST_WAIT) {
1018 				struct lll_conn *lll = &conn->lll;
1019 				uint16_t event_counter;
1020 
1021 				/* Calculate current event counter */
1022 				event_counter = lll->event_counter +
1023 						lll->latency_prepare + lazy;
1024 
1025 				/* Start CIS peripheral */
1026 				event_peripheral_iso_prep(conn,
1027 							  event_counter,
1028 							  ticks_at_expire);
1029 			}
1030 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
1031 		}
1032 	}
1033 
1034 	/* Check if procedures with instant or encryption setup is requested or
1035 	 * active.
1036 	 */
1037 	if (((conn->llcp_req - conn->llcp_ack) & 0x03) == 0x02) {
1038 		/* Process parallel procedures that are active */
1039 		if (0) {
1040 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1041 		/* Check if DLE in progress */
1042 		} else if (conn->llcp_length.ack != conn->llcp_length.req) {
1043 			if ((conn->llcp_length.state ==
1044 			     LLCP_LENGTH_STATE_RESIZE) ||
1045 			    (conn->llcp_length.state ==
1046 			     LLCP_LENGTH_STATE_RESIZE_RSP)) {
1047 				/* handle DLU state machine */
1048 				event_len_prep(conn);
1049 			}
1050 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1051 		}
1052 
1053 		/* Process procedures with instants or encryption setup */
1054 		/* FIXME: Make LE Ping cacheable */
1055 		switch (conn->llcp_type) {
1056 		case LLCP_CONN_UPD:
1057 		{
1058 			if (event_conn_upd_prep(conn, lazy,
1059 						ticks_at_expire) == 0) {
1060 				return -ECANCELED;
1061 			}
1062 		}
1063 		break;
1064 
1065 		case LLCP_CHAN_MAP:
1066 		{
1067 			struct lll_conn *lll = &conn->lll;
1068 			uint16_t event_counter;
1069 
1070 			/* Calculate current event counter */
1071 			event_counter = lll->event_counter +
1072 					lll->latency_prepare + lazy;
1073 
1074 			event_ch_map_prep(conn, event_counter);
1075 		}
1076 		break;
1077 
1078 #if defined(CONFIG_BT_CTLR_LE_ENC)
1079 		case LLCP_ENCRYPTION:
1080 			event_enc_prep(conn);
1081 			break;
1082 #endif /* CONFIG_BT_CTLR_LE_ENC */
1083 
1084 #if defined(CONFIG_BT_CTLR_LE_PING)
1085 		case LLCP_PING:
1086 			event_ping_prep(conn);
1087 			break;
1088 #endif /* CONFIG_BT_CTLR_LE_PING */
1089 
1090 #if defined(CONFIG_BT_CTLR_PHY)
1091 		case LLCP_PHY_UPD:
1092 		{
1093 			struct lll_conn *lll = &conn->lll;
1094 			uint16_t event_counter;
1095 
1096 			/* Calculate current event counter */
1097 			event_counter = lll->event_counter +
1098 					lll->latency_prepare + lazy;
1099 
1100 			event_phy_upd_ind_prep(conn, event_counter);
1101 		}
1102 		break;
1103 #endif /* CONFIG_BT_CTLR_PHY */
1104 
1105 		default:
1106 			LL_ASSERT(0);
1107 			break;
1108 		}
1109 	}
1110 
1111 #if defined(CONFIG_BT_PERIPHERAL) && defined(CONFIG_BT_CTLR_LE_ENC)
1112 	/* Run any pending local peripheral role initiated procedure stored when
1113 	 * peer central initiated a encryption procedure
1114 	 */
1115 	if (conn->lll.role && (conn->periph.llcp_type != LLCP_NONE)) {
1116 		switch (conn->periph.llcp_type) {
1117 		case LLCP_CONN_UPD:
1118 		{
1119 			if (event_conn_upd_prep(conn, lazy,
1120 						ticks_at_expire) == 0) {
1121 				return -ECANCELED;
1122 			}
1123 		}
1124 		break;
1125 
1126 		case LLCP_CHAN_MAP:
1127 		{
1128 			struct lll_conn *lll = &conn->lll;
1129 			uint16_t event_counter;
1130 
1131 			/* Calculate current event counter */
1132 			event_counter = lll->event_counter +
1133 					lll->latency_prepare + lazy;
1134 
1135 			event_ch_map_prep(conn, event_counter);
1136 		}
1137 		break;
1138 
1139 #if defined(CONFIG_BT_CTLR_PHY)
1140 		case LLCP_PHY_UPD:
1141 		{
1142 			struct lll_conn *lll = &conn->lll;
1143 			uint16_t event_counter;
1144 
1145 			/* Calculate current event counter */
1146 			event_counter = lll->event_counter +
1147 					lll->latency_prepare + lazy;
1148 
1149 			event_phy_upd_ind_prep(conn, event_counter);
1150 		}
1151 		break;
1152 #endif /* CONFIG_BT_CTLR_PHY */
1153 
1154 		default:
1155 			LL_ASSERT(0);
1156 			break;
1157 		}
1158 	}
1159 #endif /* CONFIG_BT_PERIPHERAL && CONFIG_BT_CTLR_LE_ENC */
1160 
1161 	/* Terminate Procedure Request */
1162 	if (((conn->llcp_terminate.req - conn->llcp_terminate.ack) & 0xFF) ==
1163 	    TERM_REQ) {
1164 		struct node_tx *tx;
1165 
1166 		tx = mem_acquire(&mem_conn_tx_ctrl.free);
1167 		if (tx) {
1168 			struct pdu_data *pdu_tx = (void *)tx->pdu;
1169 
1170 			/* Terminate Procedure initiated,
1171 			 * make (req - ack) == 2
1172 			 */
1173 			conn->llcp_terminate.ack--;
1174 
1175 			/* place the terminate ind packet in tx queue */
1176 			pdu_tx->ll_id = PDU_DATA_LLID_CTRL;
1177 			pdu_tx->len = offsetof(struct pdu_data_llctrl,
1178 						    terminate_ind) +
1179 				sizeof(struct pdu_data_llctrl_terminate_ind);
1180 			pdu_tx->llctrl.opcode =
1181 				PDU_DATA_LLCTRL_TYPE_TERMINATE_IND;
1182 			pdu_tx->llctrl.terminate_ind.error_code =
1183 				conn->llcp_terminate.reason_own;
1184 
1185 			ctrl_tx_enqueue(conn, tx);
1186 		}
1187 
1188 		if (!conn->procedure_expire) {
1189 			/* Terminate Procedure timeout is started, will
1190 			 * replace any other timeout running
1191 			 */
1192 			conn->procedure_expire = conn->supervision_reload;
1193 
1194 			/* NOTE: if supervision timeout equals connection
1195 			 * interval, dont timeout in current event.
1196 			 */
1197 			if (conn->procedure_expire <= 1U) {
1198 				conn->procedure_expire++;
1199 			}
1200 		}
1201 	}
1202 
1203 	return 0;
1204 }
1205 
1206 void ull_conn_done(struct node_rx_event_done *done)
1207 {
1208 	uint32_t ticks_drift_minus;
1209 	uint32_t ticks_drift_plus;
1210 	uint16_t latency_event;
1211 	uint16_t elapsed_event;
1212 	struct lll_conn *lll;
1213 	struct ll_conn *conn;
1214 	uint8_t reason_final;
1215 	uint16_t lazy;
1216 	uint8_t force;
1217 
1218 	/* Get reference to ULL context */
1219 	conn = CONTAINER_OF(done->param, struct ll_conn, ull);
1220 	lll = &conn->lll;
1221 
1222 	/* Skip if connection terminated by local host */
1223 	if (unlikely(lll->handle == LLL_HANDLE_INVALID)) {
1224 		return;
1225 	}
1226 
1227 #if defined(CONFIG_BT_CTLR_LE_ENC)
1228 	/* Check authenticated payload expiry or MIC failure */
1229 	switch (done->extra.mic_state) {
1230 	case LLL_CONN_MIC_NONE:
1231 #if defined(CONFIG_BT_CTLR_LE_PING)
1232 		if (lll->enc_rx || conn->llcp_enc.pause_rx) {
1233 			uint16_t appto_reload_new;
1234 
1235 			/* check for change in apto */
1236 			appto_reload_new = (conn->apto_reload >
1237 					    (lll->latency + 6)) ?
1238 					   (conn->apto_reload -
1239 					    (lll->latency + 6)) :
1240 					   conn->apto_reload;
1241 			if (conn->appto_reload != appto_reload_new) {
1242 				conn->appto_reload = appto_reload_new;
1243 				conn->apto_expire = 0U;
1244 			}
1245 
1246 			/* start authenticated payload (pre) timeout */
1247 			if (conn->apto_expire == 0U) {
1248 				conn->appto_expire = conn->appto_reload;
1249 				conn->apto_expire = conn->apto_reload;
1250 			}
1251 		}
1252 #endif /* CONFIG_BT_CTLR_LE_PING */
1253 		break;
1254 
1255 	case LLL_CONN_MIC_PASS:
1256 #if defined(CONFIG_BT_CTLR_LE_PING)
1257 		conn->appto_expire = conn->apto_expire = 0U;
1258 #endif /* CONFIG_BT_CTLR_LE_PING */
1259 		break;
1260 
1261 	case LLL_CONN_MIC_FAIL:
1262 		conn->llcp_terminate.reason_final =
1263 			BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
1264 		break;
1265 	}
1266 #endif /* CONFIG_BT_CTLR_LE_ENC */
1267 
1268 	/* Peripheral received terminate ind or
1269 	 * Central received ack for the transmitted terminate ind or
1270 	 * Central transmitted ack for the received terminate ind or
1271 	 * there has been MIC failure
1272 	 */
1273 	reason_final = conn->llcp_terminate.reason_final;
1274 	if (reason_final && (
1275 #if defined(CONFIG_BT_PERIPHERAL)
1276 			    lll->role ||
1277 #else /* CONFIG_BT_PERIPHERAL */
1278 			    0 ||
1279 #endif /* CONFIG_BT_PERIPHERAL */
1280 #if defined(CONFIG_BT_CENTRAL)
1281 			    (((conn->llcp_terminate.req -
1282 			       conn->llcp_terminate.ack) & 0xFF) ==
1283 			     TERM_ACKED) ||
1284 			    conn->central.terminate_ack ||
1285 			    (reason_final == BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL)
1286 #else /* CONFIG_BT_CENTRAL */
1287 			    1
1288 #endif /* CONFIG_BT_CENTRAL */
1289 			    )) {
1290 		conn_cleanup(conn, reason_final);
1291 
1292 		return;
1293 	}
1294 
1295 	/* Events elapsed used in timeout checks below */
1296 #if defined(CONFIG_BT_CTLR_CONN_META)
1297 	/* If event has shallow expiry do not add latency, but rely on
1298 	 * accumulated lazy count.
1299 	 */
1300 	latency_event = conn->common.is_must_expire ? 0 : lll->latency_event;
1301 #else
1302 	latency_event = lll->latency_event;
1303 #endif
1304 	elapsed_event = latency_event + 1;
1305 
1306 	/* Peripheral drift compensation calc and new latency or
1307 	 * central terminate acked
1308 	 */
1309 	ticks_drift_plus = 0U;
1310 	ticks_drift_minus = 0U;
1311 	if (done->extra.trx_cnt) {
1312 		if (0) {
1313 #if defined(CONFIG_BT_PERIPHERAL)
1314 		} else if (lll->role) {
1315 			ull_drift_ticks_get(done, &ticks_drift_plus,
1316 					    &ticks_drift_minus);
1317 
1318 			if (!conn->tx_head) {
1319 				ull_conn_tx_demux(UINT8_MAX);
1320 			}
1321 
1322 			if (conn->tx_head || memq_peek(lll->memq_tx.head,
1323 						       lll->memq_tx.tail,
1324 						       NULL)) {
1325 				lll->latency_event = 0;
1326 			} else if (lll->periph.latency_enabled) {
1327 				lll->latency_event = lll->latency;
1328 			}
1329 #endif /* CONFIG_BT_PERIPHERAL */
1330 
1331 #if defined(CONFIG_BT_CENTRAL)
1332 		} else if (reason_final) {
1333 			conn->central.terminate_ack = 1;
1334 #endif /* CONFIG_BT_CENTRAL */
1335 
1336 		}
1337 
1338 		/* Reset connection failed to establish countdown */
1339 		conn->connect_expire = 0U;
1340 	}
1341 
1342 	/* Reset supervision countdown */
1343 	if (done->extra.crc_valid) {
1344 		conn->supervision_expire = 0U;
1345 	}
1346 
1347 	/* check connection failed to establish */
1348 	else if (conn->connect_expire) {
1349 		if (conn->connect_expire > elapsed_event) {
1350 			conn->connect_expire -= elapsed_event;
1351 		} else {
1352 			conn_cleanup(conn, BT_HCI_ERR_CONN_FAIL_TO_ESTAB);
1353 
1354 			return;
1355 		}
1356 	}
1357 
1358 	/* if anchor point not sync-ed, start supervision timeout, and break
1359 	 * latency if any.
1360 	 */
1361 	else {
1362 		/* Start supervision timeout, if not started already */
1363 		if (!conn->supervision_expire) {
1364 			conn->supervision_expire = conn->supervision_reload;
1365 		}
1366 	}
1367 
1368 	/* check supervision timeout */
1369 	force = 0U;
1370 	if (conn->supervision_expire) {
1371 		if (conn->supervision_expire > elapsed_event) {
1372 			conn->supervision_expire -= elapsed_event;
1373 
1374 			/* break latency */
1375 			lll->latency_event = 0U;
1376 
1377 			/* Force both central and peripheral when close to
1378 			 * supervision timeout.
1379 			 */
1380 			if (conn->supervision_expire <= 6U) {
1381 				force = 1U;
1382 			}
1383 #if defined(CONFIG_BT_CTLR_CONN_RANDOM_FORCE)
1384 			/* use randomness to force peripheral role when anchor
1385 			 * points are being missed.
1386 			 */
1387 			else if (lll->role) {
1388 				if (latency_event) {
1389 					force = 1U;
1390 				} else {
1391 					force = conn->periph.force & 0x01;
1392 
1393 					/* rotate force bits */
1394 					conn->periph.force >>= 1U;
1395 					if (force) {
1396 						conn->periph.force |= BIT(31);
1397 					}
1398 				}
1399 			}
1400 #endif /* CONFIG_BT_CTLR_CONN_RANDOM_FORCE */
1401 		} else {
1402 			conn_cleanup(conn, BT_HCI_ERR_CONN_TIMEOUT);
1403 
1404 			return;
1405 		}
1406 	}
1407 
1408 	/* check procedure timeout */
1409 	if (conn->procedure_expire != 0U) {
1410 		if (conn->procedure_expire > elapsed_event) {
1411 			conn->procedure_expire -= elapsed_event;
1412 		} else {
1413 			conn_cleanup(conn, BT_HCI_ERR_LL_RESP_TIMEOUT);
1414 
1415 			return;
1416 		}
1417 	}
1418 
1419 #if defined(CONFIG_BT_CTLR_LE_PING)
1420 	/* check apto */
1421 	if (conn->apto_expire != 0U) {
1422 		if (conn->apto_expire > elapsed_event) {
1423 			conn->apto_expire -= elapsed_event;
1424 		} else {
1425 			struct node_rx_hdr *rx;
1426 
1427 			rx = ll_pdu_rx_alloc();
1428 			if (rx) {
1429 				conn->apto_expire = 0U;
1430 
1431 				rx->handle = lll->handle;
1432 				rx->type = NODE_RX_TYPE_APTO;
1433 
1434 				/* enqueue apto event into rx queue */
1435 				ll_rx_put(rx->link, rx);
1436 				ll_rx_sched();
1437 			} else {
1438 				conn->apto_expire = 1U;
1439 			}
1440 		}
1441 	}
1442 
1443 	/* check appto */
1444 	if (conn->appto_expire != 0U) {
1445 		if (conn->appto_expire > elapsed_event) {
1446 			conn->appto_expire -= elapsed_event;
1447 		} else {
1448 			conn->appto_expire = 0U;
1449 
1450 			if ((conn->procedure_expire == 0U) &&
1451 			    (conn->llcp_req == conn->llcp_ack)) {
1452 				conn->llcp_type = LLCP_PING;
1453 				conn->llcp_ack -= 2U;
1454 			}
1455 		}
1456 	}
1457 #endif /* CONFIG_BT_CTLR_LE_PING */
1458 
1459 #if defined(CONFIG_BT_CTLR_CONN_RSSI_EVENT)
1460 	/* generate RSSI event */
1461 	if (lll->rssi_sample_count == 0U) {
1462 		struct node_rx_pdu *rx;
1463 		struct pdu_data *pdu_data_rx;
1464 
1465 		rx = ll_pdu_rx_alloc();
1466 		if (rx) {
1467 			lll->rssi_reported = lll->rssi_latest;
1468 			lll->rssi_sample_count = LLL_CONN_RSSI_SAMPLE_COUNT;
1469 
1470 			/* Prepare the rx packet structure */
1471 			rx->hdr.handle = lll->handle;
1472 			rx->hdr.type = NODE_RX_TYPE_RSSI;
1473 
1474 			/* prepare connection RSSI structure */
1475 			pdu_data_rx = (void *)rx->pdu;
1476 			pdu_data_rx->rssi = lll->rssi_reported;
1477 
1478 			/* enqueue connection RSSI structure into queue */
1479 			ll_rx_put(rx->hdr.link, rx);
1480 			ll_rx_sched();
1481 		}
1482 	}
1483 #endif /* CONFIG_BT_CTLR_CONN_RSSI_EVENT */
1484 
1485 	/* break latency based on ctrl procedure pending */
1486 	if (((((conn->llcp_req - conn->llcp_ack) & 0x03) == 0x02) &&
1487 	     ((conn->llcp_type == LLCP_CONN_UPD) ||
1488 	      (conn->llcp_type == LLCP_CHAN_MAP))) ||
1489 	    (conn->llcp_cu.req != conn->llcp_cu.ack)) {
1490 		lll->latency_event = 0U;
1491 	}
1492 
1493 	/* check if latency needs update */
1494 	lazy = 0U;
1495 	if ((force) || (latency_event != lll->latency_event)) {
1496 		lazy = lll->latency_event + 1U;
1497 	}
1498 
1499 	/* update conn ticker */
1500 	if (ticks_drift_plus || ticks_drift_minus || lazy || force) {
1501 		uint8_t ticker_id = TICKER_ID_CONN_BASE + lll->handle;
1502 		struct ll_conn *conn = lll->hdr.parent;
1503 		uint32_t ticker_status;
1504 
1505 		/* Call to ticker_update can fail under the race
1506 		 * condition where in the peripheral role is being stopped but
1507 		 * at the same time it is preempted by peripheral event that
1508 		 * gets into close state. Accept failure when peripheral role
1509 		 * is being stopped.
1510 		 */
1511 		ticker_status = ticker_update(TICKER_INSTANCE_ID_CTLR,
1512 					      TICKER_USER_ID_ULL_HIGH,
1513 					      ticker_id,
1514 					      ticks_drift_plus,
1515 					      ticks_drift_minus, 0, 0,
1516 					      lazy, force,
1517 					      ticker_update_conn_op_cb,
1518 					      conn);
1519 		LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
1520 			  (ticker_status == TICKER_STATUS_BUSY) ||
1521 			  ((void *)conn == ull_disable_mark_get()));
1522 	}
1523 }
1524 
1525 void ull_conn_tx_demux(uint8_t count)
1526 {
1527 	do {
1528 		struct lll_tx *lll_tx;
1529 		struct ll_conn *conn;
1530 
1531 		lll_tx = MFIFO_DEQUEUE_GET(conn_tx);
1532 		if (!lll_tx) {
1533 			break;
1534 		}
1535 
1536 		conn = ll_connected_get(lll_tx->handle);
1537 		if (conn) {
1538 			struct node_tx *tx = lll_tx->node;
1539 
1540 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
1541 			if (empty_data_start_release(conn, tx)) {
1542 				goto ull_conn_tx_demux_release;
1543 			}
1544 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
1545 
1546 			tx->next = NULL;
1547 			if (!conn->tx_data) {
1548 				conn->tx_data = tx;
1549 				if (!conn->tx_head) {
1550 					conn->tx_head = tx;
1551 					conn->tx_data_last = NULL;
1552 				}
1553 			}
1554 
1555 			if (conn->tx_data_last) {
1556 				conn->tx_data_last->next = tx;
1557 			}
1558 
1559 			conn->tx_data_last = tx;
1560 		} else {
1561 			struct node_tx *tx = lll_tx->node;
1562 			struct pdu_data *p = (void *)tx->pdu;
1563 
1564 			p->ll_id = PDU_DATA_LLID_RESV;
1565 			ll_tx_ack_put(LLL_HANDLE_INVALID, tx);
1566 		}
1567 
1568 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
1569 ull_conn_tx_demux_release:
1570 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
1571 
1572 		MFIFO_DEQUEUE(conn_tx);
1573 	} while (--count);
1574 }
1575 
1576 void ull_conn_tx_lll_enqueue(struct ll_conn *conn, uint8_t count)
1577 {
1578 	bool pause_tx = false;
1579 
1580 	while (conn->tx_head &&
1581 	       ((
1582 #if defined(CONFIG_BT_CTLR_PHY)
1583 		 !conn->llcp_phy.pause_tx &&
1584 #endif /* CONFIG_BT_CTLR_PHY */
1585 #if defined(CONFIG_BT_CTLR_LE_ENC)
1586 		 !conn->llcp_enc.pause_tx &&
1587 		 !(pause_tx = is_enc_req_pause_tx(conn)) &&
1588 #endif /* CONFIG_BT_CTLR_LE_ENC */
1589 		 1) ||
1590 		(!pause_tx && (conn->tx_head == conn->tx_ctrl))) && count--) {
1591 		struct pdu_data *pdu_tx;
1592 		struct node_tx *tx;
1593 		memq_link_t *link;
1594 
1595 		tx = tx_ull_dequeue(conn, conn->tx_head);
1596 
1597 		pdu_tx = (void *)tx->pdu;
1598 		if (pdu_tx->ll_id == PDU_DATA_LLID_CTRL) {
1599 			ctrl_tx_pre_ack(conn, pdu_tx);
1600 		}
1601 
1602 		link = mem_acquire(&mem_link_tx.free);
1603 		LL_ASSERT(link);
1604 
1605 		memq_enqueue(link, tx, &conn->lll.memq_tx.tail);
1606 	}
1607 }
1608 
1609 void ull_conn_link_tx_release(void *link)
1610 {
1611 	mem_release(link, &mem_link_tx.free);
1612 }
1613 
1614 uint8_t ull_conn_ack_last_idx_get(void)
1615 {
1616 	return mfifo_conn_ack.l;
1617 }
1618 
1619 memq_link_t *ull_conn_ack_peek(uint8_t *ack_last, uint16_t *handle,
1620 			       struct node_tx **tx)
1621 {
1622 	struct lll_tx *lll_tx;
1623 
1624 	lll_tx = MFIFO_DEQUEUE_GET(conn_ack);
1625 	if (!lll_tx) {
1626 		return NULL;
1627 	}
1628 
1629 	*ack_last = mfifo_conn_ack.l;
1630 
1631 	*handle = lll_tx->handle;
1632 	*tx = lll_tx->node;
1633 
1634 	return (*tx)->link;
1635 }
1636 
1637 memq_link_t *ull_conn_ack_by_last_peek(uint8_t last, uint16_t *handle,
1638 				       struct node_tx **tx)
1639 {
1640 	struct lll_tx *lll_tx;
1641 
1642 	lll_tx = mfifo_dequeue_get(mfifo_conn_ack.m, mfifo_conn_ack.s,
1643 				   mfifo_conn_ack.f, last);
1644 	if (!lll_tx) {
1645 		return NULL;
1646 	}
1647 
1648 	*handle = lll_tx->handle;
1649 	*tx = lll_tx->node;
1650 
1651 	return (*tx)->link;
1652 }
1653 
1654 void *ull_conn_ack_dequeue(void)
1655 {
1656 	return MFIFO_DEQUEUE(conn_ack);
1657 }
1658 
1659 void ull_conn_lll_ack_enqueue(uint16_t handle, struct node_tx *tx)
1660 {
1661 	struct lll_tx *lll_tx;
1662 	uint8_t idx;
1663 
1664 	idx = MFIFO_ENQUEUE_GET(conn_ack, (void **)&lll_tx);
1665 	LL_ASSERT(lll_tx);
1666 
1667 	lll_tx->handle = handle;
1668 	lll_tx->node = tx;
1669 
1670 	MFIFO_ENQUEUE(conn_ack, idx);
1671 }
1672 
1673 void ull_conn_tx_ack(uint16_t handle, memq_link_t *link, struct node_tx *tx)
1674 {
1675 	struct pdu_data *pdu_tx;
1676 
1677 	pdu_tx = (void *)tx->pdu;
1678 	LL_ASSERT(pdu_tx->len);
1679 
1680 	if (pdu_tx->ll_id == PDU_DATA_LLID_CTRL) {
1681 		if (handle != LLL_HANDLE_INVALID) {
1682 			struct ll_conn *conn = ll_conn_get(handle);
1683 
1684 			ctrl_tx_ack(conn, &tx, pdu_tx);
1685 		}
1686 
1687 		/* release ctrl mem if points to itself */
1688 		if (link->next == (void *)tx) {
1689 			LL_ASSERT(link->next);
1690 
1691 			mem_release(tx, &mem_conn_tx_ctrl.free);
1692 			return;
1693 		} else if (!tx) {
1694 			/* Tx Node re-used to enqueue new ctrl PDU */
1695 			return;
1696 		} else {
1697 			LL_ASSERT(!link->next);
1698 		}
1699 	} else if (handle == LLL_HANDLE_INVALID) {
1700 		pdu_tx->ll_id = PDU_DATA_LLID_RESV;
1701 	} else {
1702 		LL_ASSERT(handle != LLL_HANDLE_INVALID);
1703 	}
1704 
1705 	ll_tx_ack_put(handle, tx);
1706 
1707 	return;
1708 }
1709 
1710 uint8_t ull_conn_llcp_req(void *conn)
1711 {
1712 	struct ll_conn * const conn_hdr = conn;
1713 	if (conn_hdr->llcp_req != conn_hdr->llcp_ack) {
1714 		return BT_HCI_ERR_CMD_DISALLOWED;
1715 	}
1716 
1717 	conn_hdr->llcp_req++;
1718 	if (((conn_hdr->llcp_req - conn_hdr->llcp_ack) & 0x03) != 1) {
1719 		conn_hdr->llcp_req--;
1720 		return BT_HCI_ERR_CMD_DISALLOWED;
1721 	}
1722 
1723 	return 0;
1724 }
1725 
1726 uint16_t ull_conn_lll_max_tx_octets_get(struct lll_conn *lll)
1727 {
1728 	uint16_t max_tx_octets;
1729 
1730 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1731 #if defined(CONFIG_BT_CTLR_PHY)
1732 	switch (lll->phy_tx_time) {
1733 	default:
1734 	case PHY_1M:
1735 		/* 1M PHY, 1us = 1 bit, hence divide by 8.
1736 		 * Deduct 10 bytes for preamble (1), access address (4),
1737 		 * header (2), and CRC (3).
1738 		 */
1739 		max_tx_octets = (lll->max_tx_time >> 3) - 10;
1740 		break;
1741 
1742 	case PHY_2M:
1743 		/* 2M PHY, 1us = 2 bits, hence divide by 4.
1744 		 * Deduct 11 bytes for preamble (2), access address (4),
1745 		 * header (2), and CRC (3).
1746 		 */
1747 		max_tx_octets = (lll->max_tx_time >> 2) - 11;
1748 		break;
1749 
1750 #if defined(CONFIG_BT_CTLR_PHY_CODED)
1751 	case PHY_CODED:
1752 		if (lll->phy_flags & 0x01) {
1753 			/* S8 Coded PHY, 8us = 1 bit, hence divide by
1754 			 * 64.
1755 			 * Subtract time for preamble (80), AA (256),
1756 			 * CI (16), TERM1 (24), CRC (192) and
1757 			 * TERM2 (24), total 592 us.
1758 			 * Subtract 2 bytes for header.
1759 			 */
1760 			max_tx_octets = ((lll->max_tx_time - 592) >>
1761 					  6) - 2;
1762 		} else {
1763 			/* S2 Coded PHY, 2us = 1 bit, hence divide by
1764 			 * 16.
1765 			 * Subtract time for preamble (80), AA (256),
1766 			 * CI (16), TERM1 (24), CRC (48) and
1767 			 * TERM2 (6), total 430 us.
1768 			 * Subtract 2 bytes for header.
1769 			 */
1770 			max_tx_octets = ((lll->max_tx_time - 430) >>
1771 					  4) - 2;
1772 		}
1773 		break;
1774 #endif /* CONFIG_BT_CTLR_PHY_CODED */
1775 	}
1776 
1777 #if defined(CONFIG_BT_CTLR_LE_ENC)
1778 	if (lll->enc_tx) {
1779 		/* deduct the MIC */
1780 		max_tx_octets -= 4U;
1781 	}
1782 #endif /* CONFIG_BT_CTLR_LE_ENC */
1783 
1784 	if (max_tx_octets > lll->max_tx_octets) {
1785 		max_tx_octets = lll->max_tx_octets;
1786 	}
1787 #else /* !CONFIG_BT_CTLR_PHY */
1788 	max_tx_octets = lll->max_tx_octets;
1789 #endif /* !CONFIG_BT_CTLR_PHY */
1790 #else /* !CONFIG_BT_CTLR_DATA_LENGTH */
1791 	max_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
1792 #endif /* !CONFIG_BT_CTLR_DATA_LENGTH */
1793 	return max_tx_octets;
1794 }
1795 
1796 static int init_reset(void)
1797 {
1798 	/* Initialize conn pool. */
1799 	mem_init(conn_pool, sizeof(struct ll_conn),
1800 		 sizeof(conn_pool) / sizeof(struct ll_conn), &conn_free);
1801 
1802 	/* Initialize tx pool. */
1803 	mem_init(mem_conn_tx.pool, CONN_TX_BUF_SIZE, CONFIG_BT_BUF_ACL_TX_COUNT,
1804 		 &mem_conn_tx.free);
1805 
1806 	/* Initialize tx ctrl pool. */
1807 	mem_init(mem_conn_tx_ctrl.pool, CONN_TX_CTRL_BUF_SIZE,
1808 		 CONN_TX_CTRL_BUFFERS, &mem_conn_tx_ctrl.free);
1809 
1810 	/* Initialize tx link pool. */
1811 	mem_init(mem_link_tx.pool, sizeof(memq_link_t),
1812 		 CONFIG_BT_BUF_ACL_TX_COUNT + CONN_TX_CTRL_BUFFERS,
1813 		 &mem_link_tx.free);
1814 
1815 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1816 	/* Initialize the DLE defaults */
1817 	default_tx_octets = PDU_DC_PAYLOAD_SIZE_MIN;
1818 	default_tx_time = PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M);
1819 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1820 
1821 #if defined(CONFIG_BT_CTLR_PHY)
1822 	/* Initialize the PHY defaults */
1823 	default_phy_tx = PHY_1M;
1824 	default_phy_rx = PHY_1M;
1825 
1826 #if defined(CONFIG_BT_CTLR_PHY_2M)
1827 	default_phy_tx |= PHY_2M;
1828 	default_phy_rx |= PHY_2M;
1829 #endif /* CONFIG_BT_CTLR_PHY_2M */
1830 
1831 #if defined(CONFIG_BT_CTLR_PHY_CODED)
1832 	default_phy_tx |= PHY_CODED;
1833 	default_phy_rx |= PHY_CODED;
1834 #endif /* CONFIG_BT_CTLR_PHY_CODED */
1835 #endif /* CONFIG_BT_CTLR_PHY */
1836 
1837 	return 0;
1838 }
1839 
1840 static void tx_demux(void *param)
1841 {
1842 	ull_conn_tx_demux(1);
1843 
1844 	ull_conn_tx_lll_enqueue(param, 1);
1845 }
1846 
1847 static struct node_tx *tx_ull_dequeue(struct ll_conn *conn, struct node_tx *tx)
1848 {
1849 #if defined(CONFIG_BT_CTLR_LE_ENC)
1850 	if (!conn->tx_ctrl && (conn->tx_head != conn->tx_data)) {
1851 		ctrl_tx_check_and_resume(conn);
1852 	}
1853 #endif /* CONFIG_BT_CTLR_LE_ENC */
1854 
1855 	if (conn->tx_head == conn->tx_ctrl) {
1856 		conn->tx_head = conn->tx_head->next;
1857 		if (conn->tx_ctrl == conn->tx_ctrl_last) {
1858 			conn->tx_ctrl = NULL;
1859 			conn->tx_ctrl_last = NULL;
1860 		} else {
1861 			conn->tx_ctrl = conn->tx_head;
1862 		}
1863 
1864 		/* point to self to indicate a control PDU mem alloc */
1865 		tx->next = tx;
1866 	} else {
1867 		if (conn->tx_head == conn->tx_data) {
1868 			conn->tx_data = conn->tx_data->next;
1869 		}
1870 		conn->tx_head = conn->tx_head->next;
1871 
1872 		/* point to NULL to indicate a Data PDU mem alloc */
1873 		tx->next = NULL;
1874 	}
1875 
1876 	return tx;
1877 }
1878 
1879 static void ticker_update_conn_op_cb(uint32_t status, void *param)
1880 {
1881 	/* Peripheral drift compensation succeeds, or it fails in a race condition
1882 	 * when disconnecting or connection update (race between ticker_update
1883 	 * and ticker_stop calls).
1884 	 */
1885 	LL_ASSERT(status == TICKER_STATUS_SUCCESS ||
1886 		  param == ull_update_mark_get() ||
1887 		  param == ull_disable_mark_get());
1888 }
1889 
1890 static void ticker_stop_conn_op_cb(uint32_t status, void *param)
1891 {
1892 	void *p;
1893 
1894 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1895 
1896 	p = ull_update_mark(param);
1897 	LL_ASSERT(p == param);
1898 }
1899 
1900 static void ticker_start_conn_op_cb(uint32_t status, void *param)
1901 {
1902 	void *p;
1903 
1904 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1905 
1906 	p = ull_update_unmark(param);
1907 	LL_ASSERT(p == param);
1908 }
1909 
1910 static void conn_setup_adv_scan_disabled_cb(void *param)
1911 {
1912 	struct node_rx_ftr *ftr;
1913 	struct node_rx_hdr *rx;
1914 	struct lll_conn *lll;
1915 
1916 	/* NOTE: LLL conn context SHALL be after lll_hdr in
1917 	 *       struct lll_adv and struct lll_scan.
1918 	 */
1919 	rx = param;
1920 	ftr = &(rx->rx_ftr);
1921 	lll = *((struct lll_conn **)((uint8_t *)ftr->param +
1922 				     sizeof(struct lll_hdr)));
1923 	switch (lll->role) {
1924 #if defined(CONFIG_BT_CENTRAL)
1925 	case 0:
1926 		ull_central_setup(rx, ftr, lll);
1927 		break;
1928 #endif /* CONFIG_BT_CENTRAL */
1929 
1930 #if defined(CONFIG_BT_PERIPHERAL)
1931 	case 1:
1932 		ull_periph_setup(rx, ftr, lll);
1933 		break;
1934 #endif /* CONFIG_BT_PERIPHERAL */
1935 
1936 	default:
1937 		LL_ASSERT(0);
1938 		break;
1939 	}
1940 }
1941 
1942 static inline void disable(uint16_t handle)
1943 {
1944 	struct ll_conn *conn;
1945 	int err;
1946 
1947 	conn = ll_conn_get(handle);
1948 
1949 	err = ull_ticker_stop_with_mark(TICKER_ID_CONN_BASE + handle,
1950 					conn, &conn->lll);
1951 	LL_ASSERT(err == 0 || err == -EALREADY);
1952 
1953 	conn->lll.handle = LLL_HANDLE_INVALID;
1954 	conn->lll.link_tx_free = NULL;
1955 }
1956 
1957 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1958 static void conn_cleanup_iso_cis_released_cb(struct ll_conn *conn)
1959 {
1960 	struct ll_conn_iso_stream *cis;
1961 
1962 	cis = ll_conn_iso_stream_get_by_acl(conn, NULL);
1963 	if (cis) {
1964 		/* More associated CISes - stop next */
1965 		ull_conn_iso_cis_stop(cis, conn_cleanup_iso_cis_released_cb);
1966 	} else {
1967 		/* No more CISes associated with conn - finalize */
1968 		conn_cleanup_finalize(conn);
1969 	}
1970 }
1971 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO */
1972 
1973 static void conn_cleanup_finalize(struct ll_conn *conn)
1974 {
1975 	struct lll_conn *lll = &conn->lll;
1976 	struct node_rx_pdu *rx;
1977 	uint32_t ticker_status;
1978 
1979 	/* release any llcp reserved rx node */
1980 	rx = conn->llcp_rx;
1981 	while (rx) {
1982 		struct node_rx_hdr *hdr;
1983 
1984 		/* traverse to next rx node */
1985 		hdr = &rx->hdr;
1986 		rx = hdr->link->mem;
1987 
1988 		/* Mark for buffer for release */
1989 		hdr->type = NODE_RX_TYPE_RELEASE;
1990 
1991 		/* enqueue rx node towards Thread */
1992 		ll_rx_put(hdr->link, hdr);
1993 	}
1994 
1995 	/* flush demux-ed Tx buffer still in ULL context */
1996 	tx_ull_flush(conn);
1997 
1998 	/* Stop Central or Peripheral role ticker */
1999 	ticker_status = ticker_stop(TICKER_INSTANCE_ID_CTLR,
2000 				    TICKER_USER_ID_ULL_HIGH,
2001 				    TICKER_ID_CONN_BASE + lll->handle,
2002 				    ticker_stop_op_cb, conn);
2003 	LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2004 		  (ticker_status == TICKER_STATUS_BUSY));
2005 
2006 	/* Invalidate the connection context */
2007 	lll->handle = LLL_HANDLE_INVALID;
2008 
2009 	/* Demux and flush Tx PDUs that remain enqueued in thread context */
2010 	ull_conn_tx_demux(UINT8_MAX);
2011 }
2012 
2013 static void conn_cleanup(struct ll_conn *conn, uint8_t reason)
2014 {
2015 	struct node_rx_pdu *rx;
2016 
2017 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
2018 	struct ll_conn_iso_stream *cis;
2019 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO */
2020 
2021 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
2022 	/* Reset CPR mutex */
2023 	cpr_active_check_and_reset(conn);
2024 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
2025 
2026 	/* Only termination structure is populated here in ULL context
2027 	 * but the actual enqueue happens in the LLL context in
2028 	 * tx_lll_flush. The reason being to avoid passing the reason
2029 	 * value and handle through the mayfly scheduling of the
2030 	 * tx_lll_flush.
2031 	 */
2032 	rx = (void *)&conn->llcp_terminate.node_rx;
2033 	rx->hdr.handle = conn->lll.handle;
2034 	rx->hdr.type = NODE_RX_TYPE_TERMINATE;
2035 	*((uint8_t *)rx->pdu) = reason;
2036 
2037 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
2038 	cis = ll_conn_iso_stream_get_by_acl(conn, NULL);
2039 	if (cis) {
2040 		/* Stop CIS and defer cleanup to after teardown. */
2041 		ull_conn_iso_cis_stop(cis, conn_cleanup_iso_cis_released_cb);
2042 		return;
2043 	}
2044 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO */
2045 
2046 	conn_cleanup_finalize(conn);
2047 }
2048 
2049 static void tx_ull_flush(struct ll_conn *conn)
2050 {
2051 	while (conn->tx_head) {
2052 		struct node_tx *tx;
2053 		memq_link_t *link;
2054 
2055 		tx = tx_ull_dequeue(conn, conn->tx_head);
2056 
2057 		link = mem_acquire(&mem_link_tx.free);
2058 		LL_ASSERT(link);
2059 
2060 		memq_enqueue(link, tx, &conn->lll.memq_tx.tail);
2061 	}
2062 }
2063 
2064 static void ticker_stop_op_cb(uint32_t status, void *param)
2065 {
2066 	static memq_link_t link;
2067 	static struct mayfly mfy = {0, 0, &link, NULL, conn_disable};
2068 	uint32_t ret;
2069 
2070 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
2071 
2072 	/* Check if any pending LLL events that need to be aborted */
2073 	mfy.param = param;
2074 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
2075 			     TICKER_USER_ID_ULL_HIGH, 0, &mfy);
2076 	LL_ASSERT(!ret);
2077 }
2078 
2079 static void conn_disable(void *param)
2080 {
2081 	struct ll_conn *conn;
2082 	struct ull_hdr *hdr;
2083 
2084 	/* Check ref count to determine if any pending LLL events in pipeline */
2085 	conn = param;
2086 	hdr = &conn->ull;
2087 	if (ull_ref_get(hdr)) {
2088 		static memq_link_t link;
2089 		static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
2090 		uint32_t ret;
2091 
2092 		mfy.param = &conn->lll;
2093 
2094 		/* Setup disabled callback to be called when ref count
2095 		 * returns to zero.
2096 		 */
2097 		LL_ASSERT(!hdr->disabled_cb);
2098 		hdr->disabled_param = mfy.param;
2099 		hdr->disabled_cb = disabled_cb;
2100 
2101 		/* Trigger LLL disable */
2102 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
2103 				     TICKER_USER_ID_LLL, 0, &mfy);
2104 		LL_ASSERT(!ret);
2105 	} else {
2106 		/* No pending LLL events */
2107 		disabled_cb(&conn->lll);
2108 	}
2109 }
2110 
2111 static void disabled_cb(void *param)
2112 {
2113 	static memq_link_t link;
2114 	static struct mayfly mfy = {0, 0, &link, NULL, tx_lll_flush};
2115 	uint32_t ret;
2116 
2117 	mfy.param = param;
2118 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
2119 			     TICKER_USER_ID_LLL, 0, &mfy);
2120 	LL_ASSERT(!ret);
2121 }
2122 
2123 static void tx_lll_flush(void *param)
2124 {
2125 	struct node_rx_pdu *rx;
2126 	struct lll_conn *lll;
2127 	struct ll_conn *conn;
2128 	struct node_tx *tx;
2129 	memq_link_t *link;
2130 	uint16_t handle;
2131 
2132 	/* Get reference to ULL context */
2133 	lll = param;
2134 	conn = HDR_LLL2ULL(lll);
2135 	handle = ll_conn_handle_get(conn);
2136 
2137 	lll_conn_flush(handle, lll);
2138 
2139 	link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head,
2140 			    (void **)&tx);
2141 	while (link) {
2142 		struct lll_tx *lll_tx;
2143 		uint8_t idx;
2144 
2145 		idx = MFIFO_ENQUEUE_GET(conn_ack, (void **)&lll_tx);
2146 		LL_ASSERT(lll_tx);
2147 
2148 		lll_tx->handle = LLL_HANDLE_INVALID;
2149 		lll_tx->node = tx;
2150 
2151 		/* TX node UPSTREAM, i.e. Tx node ack path */
2152 		link->next = tx->next; /* Indicates ctrl pool or data pool */
2153 		tx->next = link;
2154 
2155 		MFIFO_ENQUEUE(conn_ack, idx);
2156 
2157 		link = memq_dequeue(lll->memq_tx.tail, &lll->memq_tx.head,
2158 				    (void **)&tx);
2159 	}
2160 
2161 	/* Get the terminate structure reserved in the connection context.
2162 	 * The terminate reason and connection handle should already be
2163 	 * populated before this mayfly function was scheduled.
2164 	 */
2165 	rx = (void *)&conn->llcp_terminate.node_rx;
2166 	LL_ASSERT(rx->hdr.link);
2167 	link = rx->hdr.link;
2168 	rx->hdr.link = NULL;
2169 
2170 	/* Enqueue the terminate towards ULL context */
2171 	ull_rx_put(link, rx);
2172 	ull_rx_sched();
2173 }
2174 
2175 #if defined(CONFIG_BT_CTLR_LLID_DATA_START_EMPTY)
2176 static int empty_data_start_release(struct ll_conn *conn, struct node_tx *tx)
2177 {
2178 	struct pdu_data *p = (void *)tx->pdu;
2179 
2180 	if ((p->ll_id == PDU_DATA_LLID_DATA_START) && !p->len) {
2181 		conn->start_empty = 1U;
2182 
2183 		ll_tx_ack_put(conn->lll.handle, tx);
2184 
2185 		return -EINVAL;
2186 	} else if (p->len && conn->start_empty) {
2187 		conn->start_empty = 0U;
2188 
2189 		if (p->ll_id == PDU_DATA_LLID_DATA_CONTINUE) {
2190 			p->ll_id = PDU_DATA_LLID_DATA_START;
2191 		}
2192 	}
2193 
2194 	return 0;
2195 }
2196 #endif /* CONFIG_BT_CTLR_LLID_DATA_START_EMPTY */
2197 
2198 /* Check transaction violation and get free ctrl tx PDU */
2199 static struct node_tx *ctrl_tx_rsp_mem_acquire(struct ll_conn *conn,
2200 					       struct node_rx_pdu *rx,
2201 					       int *err)
2202 {
2203 	struct node_tx *tx;
2204 
2205 	/* Ignore duplicate requests without previous being acknowledged. */
2206 	if (conn->common.txn_lock) {
2207 		/* Mark for buffer for release */
2208 		rx->hdr.type = NODE_RX_TYPE_RELEASE;
2209 
2210 		/* Drop request */
2211 		*err = 0U;
2212 
2213 		return NULL;
2214 	}
2215 
2216 	/* Acquire ctrl tx mem */
2217 	tx = mem_acquire(&mem_conn_tx_ctrl.free);
2218 	if (!tx) {
2219 		*err = -ENOBUFS;
2220 
2221 		return NULL;
2222 	}
2223 
2224 	/* Lock further responses to duplicate requests before previous
2225 	 * response is acknowledged.
2226 	 */
2227 	conn->common.txn_lock = 1U;
2228 
2229 	/* NOTE: err value not required when returning valid ctrl tx PDU */
2230 
2231 	return tx;
2232 }
2233 
2234 #if defined(CONFIG_BT_CTLR_LE_ENC)
2235 static inline void  ctrl_tx_check_and_resume(struct ll_conn *conn)
2236 {
2237 	struct pdu_data *pdu_data_tx;
2238 
2239 	pdu_data_tx = (void *)conn->tx_head->pdu;
2240 	if ((pdu_data_tx->ll_id != PDU_DATA_LLID_CTRL) ||
2241 	    ((pdu_data_tx->llctrl.opcode !=
2242 	      PDU_DATA_LLCTRL_TYPE_ENC_REQ) &&
2243 	     (pdu_data_tx->llctrl.opcode !=
2244 	      PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_REQ))) {
2245 		conn->tx_ctrl = conn->tx_ctrl_last = conn->tx_head;
2246 	}
2247 }
2248 #endif /* CONFIG_BT_CTLR_LE_ENC */
2249 
2250 static inline void ctrl_tx_last_enqueue(struct ll_conn *conn,
2251 					struct node_tx *tx)
2252 {
2253 	tx->next = conn->tx_ctrl_last->next;
2254 	conn->tx_ctrl_last->next = tx;
2255 	conn->tx_ctrl_last = tx;
2256 }
2257 
2258 static inline void ctrl_tx_pause_enqueue(struct ll_conn *conn,
2259 					 struct node_tx *tx, bool pause)
2260 {
2261 	/* check if a packet was tx-ed and not acked by peer */
2262 	if (
2263 	    /* data/ctrl packet is in the head */
2264 	    conn->tx_head &&
2265 #if defined(CONFIG_BT_CTLR_LE_ENC)
2266 	    !conn->llcp_enc.pause_tx &&
2267 #endif /* CONFIG_BT_CTLR_LE_ENC */
2268 #if defined(CONFIG_BT_CTLR_PHY)
2269 	    !conn->llcp_phy.pause_tx &&
2270 #endif /* CONFIG_BT_CTLR_PHY */
2271 	    1) {
2272 		/* data or ctrl may have been transmitted once, but not acked
2273 		 * by peer, hence place this new ctrl after head
2274 		 */
2275 
2276 		/* if data transmitted once, keep it at head of the tx list,
2277 		 * as we will insert a ctrl after it, hence advance the
2278 		 * data pointer
2279 		 */
2280 		if (conn->tx_head == conn->tx_data) {
2281 			conn->tx_data = conn->tx_data->next;
2282 #if defined(CONFIG_BT_CTLR_LE_ENC)
2283 		} else if (!conn->tx_ctrl) {
2284 			ctrl_tx_check_and_resume(conn);
2285 #endif /* CONFIG_BT_CTLR_LE_ENC */
2286 		}
2287 
2288 		/* if no ctrl packet already queued, new ctrl added will be
2289 		 * the ctrl pointer and is inserted after head.
2290 		 */
2291 		if (!conn->tx_ctrl) {
2292 			tx->next = conn->tx_head->next;
2293 			conn->tx_head->next = tx;
2294 
2295 			/* If in Encryption Procedure, other control PDUs,
2296 			 * Feature Rsp and Version Ind, are placed before data
2297 			 * marker and after control last marker. Hence, if no
2298 			 * control marker i.e. this is the first control PDU and
2299 			 * to be paused, do not set the control marker. A valid
2300 			 * control PDU in Encryption Procedure that is not
2301 			 * implicitly paused, will set the control and control
2302 			 * last marker.
2303 			 */
2304 			if (!pause) {
2305 				conn->tx_ctrl = tx;
2306 				conn->tx_ctrl_last = tx;
2307 			}
2308 		} else {
2309 			/* ENC_REQ PDU is always allocated from data pool, hence
2310 			 * the head can not have the control marker, and pause
2311 			 * be true.
2312 			 */
2313 			LL_ASSERT(!pause);
2314 
2315 			ctrl_tx_last_enqueue(conn, tx);
2316 		}
2317 	} else {
2318 		/* No packet needing ACK. */
2319 
2320 		/* If first ctrl packet then add it as head else add it to the
2321 		 * tail of the ctrl packets.
2322 		 */
2323 		if (!conn->tx_ctrl) {
2324 			tx->next = conn->tx_head;
2325 			conn->tx_head = tx;
2326 			if (!pause) {
2327 				conn->tx_ctrl = tx;
2328 				conn->tx_ctrl_last = tx;
2329 			}
2330 		} else {
2331 			LL_ASSERT(!pause);
2332 
2333 			ctrl_tx_last_enqueue(conn, tx);
2334 		}
2335 	}
2336 
2337 	/* Update last pointer if ctrl added at end of tx list */
2338 	if (!tx->next) {
2339 		conn->tx_data_last = tx;
2340 	}
2341 }
2342 
2343 static inline void ctrl_tx_enqueue(struct ll_conn *conn, struct node_tx *tx)
2344 {
2345 	ctrl_tx_pause_enqueue(conn, tx, false);
2346 }
2347 
2348 static void ctrl_tx_sec_enqueue(struct ll_conn *conn, struct node_tx *tx)
2349 {
2350 	bool pause = false;
2351 
2352 #if defined(CONFIG_BT_CTLR_LE_ENC)
2353 	if (conn->llcp_enc.pause_tx) {
2354 		if (!conn->tx_ctrl) {
2355 			/* As data PDU tx is paused and no control PDU in queue,
2356 			 * its safe to add new control PDU at head.
2357 			 * Note, here the PDUs are stacked, not queued. Last In
2358 			 * First Out.
2359 			 */
2360 			tx->next = conn->tx_head;
2361 			conn->tx_head = tx;
2362 		} else {
2363 			/* As data PDU tx is paused and there are control PDUs
2364 			 * in the queue, add it after control PDUs last marker
2365 			 * and before the data start marker.
2366 			 * Note, here the PDUs are stacked, not queued. Last In
2367 			 * First Out.
2368 			 */
2369 			tx->next = conn->tx_ctrl_last->next;
2370 			conn->tx_ctrl_last->next = tx;
2371 		}
2372 
2373 		/* Update last pointer if ctrl added at end of tx list */
2374 		if (!tx->next) {
2375 			conn->tx_data_last = tx;
2376 		}
2377 	} else {
2378 		/* check if Encryption Request is at head, enqueue this control
2379 		 * PDU after control last marker and before data marker.
2380 		 * This way it is paused until Encryption Setup completes.
2381 		 */
2382 		if (conn->tx_head) {
2383 			struct pdu_data *pdu_data_tx;
2384 
2385 			pdu_data_tx = (void *)conn->tx_head->pdu;
2386 			if ((conn->llcp_req != conn->llcp_ack) &&
2387 			    (conn->llcp_type == LLCP_ENCRYPTION) &&
2388 			    (pdu_data_tx->ll_id == PDU_DATA_LLID_CTRL) &&
2389 			    ((pdu_data_tx->llctrl.opcode ==
2390 			      PDU_DATA_LLCTRL_TYPE_ENC_REQ) ||
2391 			     (pdu_data_tx->llctrl.opcode ==
2392 			      PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_REQ))) {
2393 				pause = true;
2394 			}
2395 		}
2396 
2397 #else /* !CONFIG_BT_CTLR_LE_ENC */
2398 	{
2399 #endif /* !CONFIG_BT_CTLR_LE_ENC */
2400 
2401 		ctrl_tx_pause_enqueue(conn, tx, pause);
2402 	}
2403 }
2404 
2405 #if defined(CONFIG_BT_CTLR_LE_ENC)
2406 static bool is_enc_req_pause_tx(struct ll_conn *conn)
2407 {
2408 	struct pdu_data *pdu_data_tx;
2409 
2410 	pdu_data_tx = (void *)conn->tx_head->pdu;
2411 	if ((pdu_data_tx->ll_id == PDU_DATA_LLID_CTRL) &&
2412 	    ((pdu_data_tx->llctrl.opcode ==
2413 	      PDU_DATA_LLCTRL_TYPE_ENC_REQ) ||
2414 	     (pdu_data_tx->llctrl.opcode ==
2415 	      PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_REQ))) {
2416 		if (((conn->llcp_req != conn->llcp_ack) &&
2417 		     (conn->llcp_type != LLCP_ENCRYPTION)) ||
2418 		    ((conn->llcp_req == conn->llcp_ack) &&
2419 		     ((conn->llcp_feature.ack != conn->llcp_feature.req) ||
2420 		      (conn->llcp_version.ack != conn->llcp_version.req) ||
2421 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
2422 		      (conn->llcp_conn_param.ack !=
2423 		       conn->llcp_conn_param.req) ||
2424 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
2425 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
2426 		      (conn->llcp_length.ack != conn->llcp_length.req) ||
2427 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
2428 #if defined(CONFIG_BT_CTLR_PHY)
2429 		      (conn->llcp_phy.ack != conn->llcp_phy.req) ||
2430 #endif /* CONFIG_BT_CTLR_PHY */
2431 		      0))) {
2432 			struct node_tx *tx;
2433 
2434 			/* if we have control packets enqueued after this PDU
2435 			 * bring it ahead, and move the enc_req to last of
2436 			 * ctrl queue.
2437 			 */
2438 			tx = conn->tx_head;
2439 			if ((tx->next != NULL) &&
2440 			    (tx->next == conn->tx_ctrl)) {
2441 				conn->tx_head = tx->next;
2442 				tx->next = conn->tx_ctrl_last->next;
2443 				conn->tx_ctrl_last->next = tx;
2444 				conn->tx_data = tx;
2445 				if (!conn->tx_data_last) {
2446 					conn->tx_data_last = tx;
2447 				}
2448 
2449 				/* Head now contains a control packet permitted
2450 				 * to be transmitted to peer.
2451 				 */
2452 				return false;
2453 			}
2454 
2455 			/* Head contains ENC_REQ packet deferred due to another
2456 			 * control procedure in progress.
2457 			 */
2458 			return true;
2459 		}
2460 
2461 		if (conn->llcp_req == conn->llcp_ack) {
2462 			conn->llcp.encryption.state = LLCP_ENC_STATE_INIT;
2463 
2464 			conn->llcp_type = LLCP_ENCRYPTION;
2465 			conn->llcp_ack -= 2U;
2466 		} else {
2467 			LL_ASSERT(conn->llcp_type == LLCP_ENCRYPTION);
2468 		}
2469 	}
2470 
2471 	/* Head contains a permitted data or control packet. */
2472 	return false;
2473 }
2474 #endif /* CONFIG_BT_CTLR_LE_ENC */
2475 
2476 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
2477 /* Connection context pointer used as CPR mutex to serialize connection
2478  * parameter requests procedures across simulataneous connections so that
2479  * offsets exchanged to the peer do not get changed.
2480  */
2481 static struct ll_conn *conn_upd_curr;
2482 
2483 static inline void cpr_active_check_and_set(struct ll_conn *conn)
2484 {
2485 	if (!conn_upd_curr) {
2486 		conn_upd_curr = conn;
2487 	}
2488 }
2489 
2490 static inline void cpr_active_set(struct ll_conn *conn)
2491 {
2492 	conn_upd_curr = conn;
2493 }
2494 
2495 static inline bool cpr_active_is_set(struct ll_conn *conn)
2496 {
2497 	return conn_upd_curr && (conn_upd_curr != conn);
2498 }
2499 
2500 static inline void cpr_active_check_and_reset(struct ll_conn *conn)
2501 {
2502 	if (conn == conn_upd_curr) {
2503 		conn_upd_curr = NULL;
2504 	}
2505 }
2506 
2507 static inline void cpr_active_reset(void)
2508 {
2509 	conn_upd_curr = NULL;
2510 }
2511 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
2512 
2513 static inline void event_conn_upd_init(struct ll_conn *conn,
2514 				       uint16_t event_counter,
2515 				       uint32_t ticks_at_expire,
2516 				       struct pdu_data *pdu_ctrl_tx,
2517 				       struct mayfly *mfy_sched_offset,
2518 				       void (*fp_mfy_select_or_use)(void *))
2519 {
2520 	/* place the conn update req packet as next in tx queue */
2521 	pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL;
2522 	pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, conn_update_ind) +
2523 			   sizeof(struct pdu_data_llctrl_conn_update_ind);
2524 	pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND;
2525 	pdu_ctrl_tx->llctrl.conn_update_ind.win_size = conn->llcp_cu.win_size;
2526 	pdu_ctrl_tx->llctrl.conn_update_ind.win_offset =
2527 		sys_cpu_to_le16(conn->llcp_cu.win_offset_us /
2528 			CONN_INT_UNIT_US);
2529 	pdu_ctrl_tx->llctrl.conn_update_ind.interval =
2530 		sys_cpu_to_le16(conn->llcp_cu.interval);
2531 	pdu_ctrl_tx->llctrl.conn_update_ind.latency =
2532 		sys_cpu_to_le16(conn->llcp_cu.latency);
2533 	pdu_ctrl_tx->llctrl.conn_update_ind.timeout =
2534 		sys_cpu_to_le16(conn->llcp_cu.timeout);
2535 
2536 #if defined(CONFIG_BT_CTLR_SCHED_ADVANCED)
2537 	/* move to offset calculation requested state */
2538 	conn->llcp_cu.state = LLCP_CUI_STATE_OFFS_REQ;
2539 
2540 	{
2541 		uint32_t retval;
2542 
2543 		/* calculate window offset that places the connection in the
2544 		 * next available slot after existing centrals.
2545 		 */
2546 		conn->llcp.conn_upd.ticks_anchor = ticks_at_expire;
2547 
2548 #if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
2549 		if (conn->ull.ticks_prepare_to_start & XON_BITMASK) {
2550 			uint32_t ticks_prepare_to_start =
2551 				MAX(conn->ull.ticks_active_to_start,
2552 				    conn->ull.ticks_preempt_to_start);
2553 
2554 			conn->llcp.conn_upd.ticks_anchor -=
2555 				(conn->ull.ticks_prepare_to_start &
2556 				 ~XON_BITMASK) - ticks_prepare_to_start;
2557 		}
2558 #endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
2559 
2560 		conn->llcp.conn_upd.pdu_win_offset = (uint16_t *)
2561 			&pdu_ctrl_tx->llctrl.conn_update_ind.win_offset;
2562 
2563 		mfy_sched_offset->fp = fp_mfy_select_or_use;
2564 		mfy_sched_offset->param = (void *)conn;
2565 
2566 		retval = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
2567 					TICKER_USER_ID_ULL_LOW, 1,
2568 					mfy_sched_offset);
2569 		LL_ASSERT(!retval);
2570 	}
2571 #else /* !CONFIG_BT_CTLR_SCHED_ADVANCED */
2572 	ARG_UNUSED(ticks_at_expire);
2573 	ARG_UNUSED(mfy_sched_offset);
2574 	ARG_UNUSED(fp_mfy_select_or_use);
2575 
2576 	/* move to in progress */
2577 	conn->llcp_cu.state = LLCP_CUI_STATE_INPROG;
2578 #endif /* !CONFIG_BT_CTLR_SCHED_ADVANCED */
2579 }
2580 
2581 static inline int event_conn_upd_prep(struct ll_conn *conn, uint16_t lazy,
2582 				      uint32_t ticks_at_expire)
2583 {
2584 	struct lll_conn *lll = &conn->lll;
2585 	uint16_t instant_latency;
2586 	uint16_t event_counter;
2587 
2588 	/* Calculate current event counter */
2589 	event_counter = lll->event_counter + lll->latency_prepare + lazy;
2590 
2591 	instant_latency = (event_counter - conn->llcp.conn_upd.instant) &
2592 			  0xffff;
2593 	if (conn->llcp_cu.state != LLCP_CUI_STATE_INPROG) {
2594 		struct pdu_data *pdu_ctrl_tx;
2595 		struct node_rx_pdu *rx;
2596 		struct node_tx *tx;
2597 #if defined(CONFIG_BT_CTLR_SCHED_ADVANCED)
2598 		static memq_link_t s_link;
2599 		static struct mayfly s_mfy_sched_offset = {0, 0,
2600 			&s_link, 0, 0 };
2601 		void (*fp_mfy_select_or_use)(void *) = NULL;
2602 
2603 		switch (conn->llcp_cu.state) {
2604 		case LLCP_CUI_STATE_USE:
2605 			fp_mfy_select_or_use = ull_sched_mfy_win_offset_use;
2606 			break;
2607 
2608 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
2609 		case LLCP_CUI_STATE_SELECT:
2610 			fp_mfy_select_or_use = ull_sched_mfy_win_offset_select;
2611 			break;
2612 
2613 		case LLCP_CUI_STATE_REJECT:
2614 			/* procedure request acked */
2615 			conn->llcp_ack = conn->llcp_req;
2616 			conn->llcp_cu.ack = conn->llcp_cu.req;
2617 			conn->llcp_conn_param.ack = conn->llcp_conn_param.req;
2618 
2619 			/* Reset CPR mutex */
2620 			cpr_active_reset();
2621 
2622 			/* enqueue control PDU */
2623 			pdu_ctrl_tx =
2624 				CONTAINER_OF(conn->llcp.conn_upd.pdu_win_offset,
2625 					     struct pdu_data,
2626 					     llctrl.conn_update_ind.win_offset);
2627 			tx = CONTAINER_OF(pdu_ctrl_tx, struct node_tx, pdu);
2628 			ctrl_tx_enqueue(conn, tx);
2629 			return -ECANCELED;
2630 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
2631 
2632 		case LLCP_CUI_STATE_OFFS_REQ:
2633 			return -EBUSY;
2634 
2635 		case LLCP_CUI_STATE_OFFS_RDY:
2636 			/* set instant */
2637 			conn->llcp.conn_upd.instant = event_counter +
2638 						      conn->lll.latency + 6;
2639 			pdu_ctrl_tx =
2640 				CONTAINER_OF(conn->llcp.conn_upd.pdu_win_offset,
2641 					     struct pdu_data,
2642 					     llctrl.conn_update_ind.win_offset);
2643 			pdu_ctrl_tx->llctrl.conn_update_ind.instant =
2644 				sys_cpu_to_le16(conn->llcp.conn_upd.instant);
2645 			/* move to in progress */
2646 			conn->llcp_cu.state = LLCP_CUI_STATE_INPROG;
2647 			/* enqueue control PDU */
2648 			tx = CONTAINER_OF(pdu_ctrl_tx, struct node_tx, pdu);
2649 			ctrl_tx_enqueue(conn, tx);
2650 			return -EINPROGRESS;
2651 
2652 		default:
2653 			LL_ASSERT(0);
2654 			break;
2655 		}
2656 #endif /* !CONFIG_BT_CTLR_SCHED_ADVANCED */
2657 
2658 		rx = ll_pdu_rx_alloc_peek(1);
2659 		if (!rx) {
2660 			return -ENOBUFS;
2661 		}
2662 
2663 		tx = mem_acquire(&mem_conn_tx_ctrl.free);
2664 		if (!tx) {
2665 			return -ENOBUFS;
2666 		}
2667 
2668 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
2669 		/* Set CPR mutex */
2670 		cpr_active_check_and_set(conn);
2671 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
2672 
2673 		(void)ll_pdu_rx_alloc();
2674 		rx->hdr.link->mem = conn->llcp_rx;
2675 		conn->llcp_rx = rx;
2676 
2677 		pdu_ctrl_tx = (void *)tx->pdu;
2678 
2679 #if defined(CONFIG_BT_CTLR_SCHED_ADVANCED)
2680 		event_conn_upd_init(conn, event_counter, ticks_at_expire,
2681 				    pdu_ctrl_tx, &s_mfy_sched_offset,
2682 				    fp_mfy_select_or_use);
2683 #else /* !CONFIG_BT_CTLR_SCHED_ADVANCED */
2684 		event_conn_upd_init(conn, event_counter, ticks_at_expire,
2685 				    pdu_ctrl_tx, NULL, NULL);
2686 		/* set instant */
2687 		conn->llcp.conn_upd.instant = event_counter +
2688 					      conn->lll.latency + 6;
2689 		pdu_ctrl_tx->llctrl.conn_update_ind.instant =
2690 			sys_cpu_to_le16(conn->llcp.conn_upd.instant);
2691 		/* enqueue control PDU */
2692 		ctrl_tx_enqueue(conn, tx);
2693 #endif /* !CONFIG_BT_CTLR_SCHED_ADVANCED */
2694 	} else if (instant_latency <= 0x7FFF) {
2695 		uint32_t ticks_win_offset = 0;
2696 		uint32_t ticks_slot_overhead;
2697 		uint16_t conn_interval_old;
2698 		uint16_t conn_interval_new;
2699 		uint32_t conn_interval_us;
2700 		struct node_rx_pdu *rx;
2701 		uint8_t ticker_id_conn;
2702 		uint32_t ticker_status;
2703 		uint32_t periodic_us;
2704 		uint16_t latency;
2705 
2706 #if defined(CONFIG_BT_PERIPHERAL) && defined(CONFIG_BT_CTLR_LE_ENC)
2707 		if (conn->lll.role && (conn->periph.llcp_type != LLCP_NONE)) {
2708 			/* Local peripheral initiated connection update
2709 			 * completed while a remote central had initiated
2710 			 * encryption procedure
2711 			 */
2712 			conn->periph.llcp_type = LLCP_NONE;
2713 		} else
2714 #endif /* CONFIG_BT_PERIPHERAL && CONFIG_BT_CTLR_LE_ENC */
2715 		{
2716 			/* procedure request acked */
2717 			conn->llcp_ack = conn->llcp_req;
2718 		}
2719 
2720 		/* procedure request acked */
2721 		conn->llcp_cu.ack = conn->llcp_cu.req;
2722 
2723 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
2724 		if ((conn->llcp_conn_param.req != conn->llcp_conn_param.ack) &&
2725 		    (conn->llcp_conn_param.state == LLCP_CPR_STATE_UPD)) {
2726 			conn->llcp_conn_param.ack = conn->llcp_conn_param.req;
2727 
2728 			/* Stop procedure timeout */
2729 			conn->procedure_expire = 0U;
2730 		}
2731 
2732 		/* Reset CPR mutex */
2733 		cpr_active_check_and_reset(conn);
2734 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
2735 
2736 		lll = &conn->lll;
2737 
2738 		/* Acquire Rx node */
2739 		rx = conn->llcp_rx;
2740 		LL_ASSERT(rx && rx->hdr.link);
2741 		conn->llcp_rx = rx->hdr.link->mem;
2742 
2743 		/* Prepare the rx packet structure */
2744 		if ((conn->llcp_cu.interval != lll->interval) ||
2745 		    (conn->llcp_cu.latency != lll->latency) ||
2746 		    (RADIO_CONN_EVENTS(conn->llcp_cu.timeout * 10000U,
2747 				       lll->interval * CONN_INT_UNIT_US) !=
2748 		     conn->supervision_reload)) {
2749 			struct node_rx_cu *cu;
2750 
2751 			rx->hdr.handle = lll->handle;
2752 			rx->hdr.type = NODE_RX_TYPE_CONN_UPDATE;
2753 
2754 			/* prepare connection update complete structure */
2755 			cu = (void *)rx->pdu;
2756 			cu->status = 0x00;
2757 			cu->interval = conn->llcp_cu.interval;
2758 			cu->latency = conn->llcp_cu.latency;
2759 			cu->timeout = conn->llcp_cu.timeout;
2760 		} else {
2761 			/* Mark for buffer for release */
2762 			rx->hdr.type = NODE_RX_TYPE_RELEASE;
2763 		}
2764 
2765 		/* enqueue rx node towards Thread */
2766 		ll_rx_put(rx->hdr.link, rx);
2767 		ll_rx_sched();
2768 
2769 #if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
2770 		/* restore to normal prepare */
2771 		if (conn->ull.ticks_prepare_to_start & XON_BITMASK) {
2772 			uint32_t ticks_prepare_to_start =
2773 				MAX(conn->ull.ticks_active_to_start,
2774 				    conn->ull.ticks_preempt_to_start);
2775 
2776 			conn->ull.ticks_prepare_to_start &= ~XON_BITMASK;
2777 			ticks_at_expire -= (conn->ull.ticks_prepare_to_start -
2778 					    ticks_prepare_to_start);
2779 		}
2780 #endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
2781 
2782 		/* compensate for instant_latency due to laziness */
2783 		conn_interval_old = instant_latency * lll->interval;
2784 		latency = conn_interval_old / conn->llcp_cu.interval;
2785 		conn_interval_new = latency * conn->llcp_cu.interval;
2786 		if (conn_interval_new > conn_interval_old) {
2787 			ticks_at_expire += HAL_TICKER_US_TO_TICKS(
2788 				(conn_interval_new - conn_interval_old) *
2789 				CONN_INT_UNIT_US);
2790 		} else {
2791 			ticks_at_expire -= HAL_TICKER_US_TO_TICKS(
2792 				(conn_interval_old - conn_interval_new) *
2793 				CONN_INT_UNIT_US);
2794 		}
2795 		lll->latency_prepare += lazy;
2796 		lll->latency_prepare -= (instant_latency - latency);
2797 
2798 		/* calculate the offset */
2799 		if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
2800 			ticks_slot_overhead =
2801 				MAX(conn->ull.ticks_active_to_start,
2802 				    conn->ull.ticks_prepare_to_start);
2803 
2804 		} else {
2805 			ticks_slot_overhead = 0U;
2806 		}
2807 
2808 		/* calculate the window widening and interval */
2809 		conn_interval_us = conn->llcp_cu.interval *
2810 			CONN_INT_UNIT_US;
2811 		periodic_us = conn_interval_us;
2812 
2813 		if (0) {
2814 #if defined(CONFIG_BT_PERIPHERAL)
2815 		} else if (lll->role) {
2816 			lll->periph.window_widening_prepare_us -=
2817 				lll->periph.window_widening_periodic_us *
2818 				instant_latency;
2819 
2820 			lll->periph.window_widening_periodic_us =
2821 				(((lll_clock_ppm_local_get() +
2822 				   lll_clock_ppm_get(conn->periph.sca)) *
2823 				  conn_interval_us) + (1000000 - 1)) / 1000000U;
2824 			lll->periph.window_widening_max_us =
2825 				(conn_interval_us >> 1) - EVENT_IFS_US;
2826 			lll->periph.window_size_prepare_us =
2827 				conn->llcp_cu.win_size * CONN_INT_UNIT_US;
2828 
2829 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
2830 			conn->periph.ticks_to_offset = 0U;
2831 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
2832 
2833 			lll->periph.window_widening_prepare_us +=
2834 				lll->periph.window_widening_periodic_us *
2835 				latency;
2836 			if (lll->periph.window_widening_prepare_us >
2837 			    lll->periph.window_widening_max_us) {
2838 				lll->periph.window_widening_prepare_us =
2839 					lll->periph.window_widening_max_us;
2840 			}
2841 
2842 			ticks_at_expire -= HAL_TICKER_US_TO_TICKS(
2843 				lll->periph.window_widening_periodic_us *
2844 				latency);
2845 			ticks_win_offset = HAL_TICKER_US_TO_TICKS(
2846 				(conn->llcp_cu.win_offset_us /
2847 				CONN_INT_UNIT_US) * CONN_INT_UNIT_US);
2848 			periodic_us -= lll->periph.window_widening_periodic_us;
2849 #endif /* CONFIG_BT_PERIPHERAL */
2850 
2851 #if defined(CONFIG_BT_CENTRAL)
2852 		} else if (!lll->role) {
2853 			ticks_win_offset = HAL_TICKER_US_TO_TICKS(
2854 				conn->llcp_cu.win_offset_us);
2855 
2856 			/* Workaround: Due to the missing remainder param in
2857 			 * ticker_start function for first interval; add a
2858 			 * tick so as to use the ceiled value.
2859 			 */
2860 			ticks_win_offset += 1U;
2861 #endif /* CONFIG_BT_CENTRAL */
2862 
2863 		} else {
2864 			LL_ASSERT(0);
2865 		}
2866 
2867 		lll->interval = conn->llcp_cu.interval;
2868 		lll->latency = conn->llcp_cu.latency;
2869 
2870 		conn->supervision_reload =
2871 			RADIO_CONN_EVENTS((conn->llcp_cu.timeout * 10U * 1000U),
2872 					  conn_interval_us);
2873 		conn->procedure_reload =
2874 			RADIO_CONN_EVENTS((40 * 1000 * 1000), conn_interval_us);
2875 
2876 #if defined(CONFIG_BT_CTLR_LE_PING)
2877 		/* APTO in no. of connection events */
2878 		conn->apto_reload = RADIO_CONN_EVENTS((30 * 1000 * 1000),
2879 						      conn_interval_us);
2880 		/* Dispatch LE Ping PDU 6 connection events (that peer would
2881 		 * listen to) before 30s timeout
2882 		 * TODO: "peer listens to" is greater than 30s due to latency
2883 		 */
2884 		conn->appto_reload = (conn->apto_reload > (lll->latency + 6)) ?
2885 				     (conn->apto_reload - (lll->latency + 6)) :
2886 				     conn->apto_reload;
2887 #endif /* CONFIG_BT_CTLR_LE_PING */
2888 
2889 		if (conn->llcp_cu.cmd) {
2890 			conn->supervision_expire = 0U;
2891 		}
2892 
2893 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
2894 		/* disable ticker job, in order to chain stop and start
2895 		 * to avoid RTC being stopped if no tickers active.
2896 		 */
2897 		uint32_t mayfly_was_enabled =
2898 			mayfly_is_enabled(TICKER_USER_ID_ULL_HIGH,
2899 					  TICKER_USER_ID_ULL_LOW);
2900 		mayfly_enable(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_ULL_LOW,
2901 			      0);
2902 #endif
2903 
2904 		/* start peripheral/central with new timings */
2905 		ticker_id_conn = TICKER_ID_CONN_BASE + ll_conn_handle_get(conn);
2906 		ticker_status =	ticker_stop(TICKER_INSTANCE_ID_CTLR,
2907 					    TICKER_USER_ID_ULL_HIGH,
2908 					    ticker_id_conn,
2909 					    ticker_stop_conn_op_cb,
2910 					    (void *)conn);
2911 		LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2912 			  (ticker_status == TICKER_STATUS_BUSY));
2913 		ticker_status =
2914 			ticker_start(TICKER_INSTANCE_ID_CTLR,
2915 				     TICKER_USER_ID_ULL_HIGH,
2916 				     ticker_id_conn,
2917 				     ticks_at_expire, ticks_win_offset,
2918 				     HAL_TICKER_US_TO_TICKS(periodic_us),
2919 				     HAL_TICKER_REMAINDER(periodic_us),
2920 #if defined(CONFIG_BT_TICKER_LOW_LAT)
2921 				     TICKER_NULL_LAZY,
2922 #else
2923 				     TICKER_LAZY_MUST_EXPIRE_KEEP,
2924 #endif /* CONFIG_BT_TICKER_LOW_LAT */
2925 				     (ticks_slot_overhead +
2926 				      conn->ull.ticks_slot),
2927 #if defined(CONFIG_BT_PERIPHERAL) && defined(CONFIG_BT_CENTRAL)
2928 				     lll->role ? ull_periph_ticker_cb :
2929 						 ull_central_ticker_cb,
2930 #elif defined(CONFIG_BT_PERIPHERAL)
2931 				     ull_periph_ticker_cb,
2932 #else
2933 				     ull_central_ticker_cb,
2934 #endif
2935 				     conn, ticker_start_conn_op_cb,
2936 				     (void *)conn);
2937 		LL_ASSERT((ticker_status == TICKER_STATUS_SUCCESS) ||
2938 			  (ticker_status == TICKER_STATUS_BUSY));
2939 
2940 #if (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
2941 		/* enable ticker job, if disabled in this function */
2942 		if (mayfly_was_enabled) {
2943 			mayfly_enable(TICKER_USER_ID_ULL_HIGH,
2944 				      TICKER_USER_ID_ULL_LOW, 1);
2945 		}
2946 #endif
2947 
2948 		return 0;
2949 	}
2950 
2951 	return -EINPROGRESS;
2952 }
2953 
2954 static inline void event_ch_map_prep(struct ll_conn *conn,
2955 				     uint16_t event_counter)
2956 {
2957 	if (conn->llcp.chan_map.initiate) {
2958 		struct node_tx *tx;
2959 
2960 		tx = mem_acquire(&mem_conn_tx_ctrl.free);
2961 		if (tx) {
2962 			struct pdu_data *pdu_ctrl_tx = (void *)tx->pdu;
2963 
2964 			/* reset initiate flag */
2965 			conn->llcp.chan_map.initiate = 0U;
2966 
2967 			/* set instant */
2968 			conn->llcp.chan_map.instant = event_counter +
2969 						      conn->lll.latency + 6;
2970 
2971 			/* place the channel map req packet as next in
2972 			 * tx queue
2973 			 */
2974 			pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL;
2975 			pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl,
2976 						    chan_map_ind) +
2977 				sizeof(struct pdu_data_llctrl_chan_map_ind);
2978 			pdu_ctrl_tx->llctrl.opcode =
2979 				PDU_DATA_LLCTRL_TYPE_CHAN_MAP_IND;
2980 			memcpy(&pdu_ctrl_tx->llctrl.chan_map_ind.chm[0],
2981 			       &conn->llcp.chan_map.chm[0],
2982 			       sizeof(pdu_ctrl_tx->llctrl.chan_map_ind.chm));
2983 			pdu_ctrl_tx->llctrl.chan_map_ind.instant =
2984 				sys_cpu_to_le16(conn->llcp.chan_map.instant);
2985 
2986 			ctrl_tx_enqueue(conn, tx);
2987 		}
2988 	} else if (((event_counter - conn->llcp.chan_map.instant) & 0xFFFF)
2989 			    <= 0x7FFF) {
2990 		struct lll_conn *lll = &conn->lll;
2991 
2992 #if defined(CONFIG_BT_PERIPHERAL) && defined(CONFIG_BT_CTLR_LE_ENC)
2993 		if (conn->lll.role && (conn->periph.llcp_type != LLCP_NONE)) {
2994 			/* Local peripheral initiated channel map update
2995 			 * completed while a remote central had initiated
2996 			 * encryption procedure
2997 			 */
2998 			conn->periph.llcp_type = LLCP_NONE;
2999 		} else
3000 #endif /* CONFIG_BT_PERIPHERAL && CONFIG_BT_CTLR_LE_ENC */
3001 		{
3002 			/* procedure request acked */
3003 			conn->llcp_ack = conn->llcp_req;
3004 		}
3005 
3006 		/* copy to active channel map */
3007 		memcpy(&lll->data_chan_map[0],
3008 		       &conn->llcp.chan_map.chm[0],
3009 		       sizeof(lll->data_chan_map));
3010 		lll->data_chan_count =
3011 			util_ones_count_get(&lll->data_chan_map[0],
3012 					    sizeof(lll->data_chan_map));
3013 		conn->chm_updated = 1U;
3014 	}
3015 
3016 }
3017 
3018 #if defined(CONFIG_BT_CTLR_LE_ENC)
3019 static inline void event_enc_reject_prep(struct ll_conn *conn,
3020 					 struct pdu_data *pdu)
3021 {
3022 	pdu->ll_id = PDU_DATA_LLID_CTRL;
3023 
3024 	if (conn->common.fex_valid &&
3025 	    (conn->llcp_feature.features_conn &
3026 	     BIT64(BT_LE_FEAT_BIT_EXT_REJ_IND))) {
3027 		struct pdu_data_llctrl_reject_ext_ind *p;
3028 
3029 		pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND;
3030 
3031 		p = (void *)&pdu->llctrl.reject_ext_ind;
3032 		p->reject_opcode = PDU_DATA_LLCTRL_TYPE_ENC_REQ;
3033 		p->error_code = conn->llcp.encryption.error_code;
3034 
3035 		pdu->len = sizeof(struct pdu_data_llctrl_reject_ext_ind);
3036 	} else {
3037 		struct pdu_data_llctrl_reject_ind *p;
3038 
3039 		pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_REJECT_IND;
3040 
3041 		p = (void *)&pdu->llctrl.reject_ind;
3042 		p->error_code =	conn->llcp.encryption.error_code;
3043 
3044 		pdu->len = sizeof(struct pdu_data_llctrl_reject_ind);
3045 	}
3046 
3047 	pdu->len += offsetof(struct pdu_data_llctrl, reject_ind);
3048 
3049 	conn->llcp.encryption.error_code = 0U;
3050 }
3051 
3052 static inline void event_enc_prep(struct ll_conn *conn)
3053 {
3054 	struct lll_conn *lll = &conn->lll;
3055 	struct pdu_data *pdu_ctrl_tx;
3056 	struct node_tx *tx;
3057 
3058 	if (conn->llcp.encryption.state) {
3059 #if defined(CONFIG_BT_PERIPHERAL) && !defined(CONFIG_BT_CTLR_FAST_ENC)
3060 		if (lll->role &&
3061 		    (conn->llcp.encryption.state == LLCP_ENC_STATE_INIT)) {
3062 			struct node_rx_pdu *rx;
3063 			struct pdu_data *pdu;
3064 			uint8_t err;
3065 
3066 			/* TODO BT Spec. text: may finalize the sending
3067 			 * of additional data channel PDUs queued in the
3068 			 * controller.
3069 			 */
3070 			err = enc_rsp_send(conn);
3071 			if (err) {
3072 				return;
3073 			}
3074 
3075 			/* get a rx node for ULL->LL */
3076 			rx = ll_pdu_rx_alloc();
3077 			if (!rx) {
3078 				return;
3079 			}
3080 
3081 			/* prepare enc req structure */
3082 			rx->hdr.handle = conn->lll.handle;
3083 			rx->hdr.type = NODE_RX_TYPE_DC_PDU;
3084 			pdu = (void *)rx->pdu;
3085 			pdu->ll_id = PDU_DATA_LLID_CTRL;
3086 			pdu->len = offsetof(struct pdu_data_llctrl, enc_req) +
3087 				   sizeof(struct pdu_data_llctrl_enc_req);
3088 			pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_ENC_REQ;
3089 			memcpy(&pdu->llctrl.enc_req.rand[0],
3090 			       &conn->llcp_enc.rand[0],
3091 			       sizeof(pdu->llctrl.enc_req.rand));
3092 			pdu->llctrl.enc_req.ediv[0] = conn->llcp_enc.ediv[0];
3093 			pdu->llctrl.enc_req.ediv[1] = conn->llcp_enc.ediv[1];
3094 
3095 			/* enqueue enc req structure into rx queue */
3096 			ll_rx_put(rx->hdr.link, rx);
3097 			ll_rx_sched();
3098 
3099 			/* Wait for LTK reply */
3100 			conn->llcp.encryption.state = LLCP_ENC_STATE_LTK_WAIT;
3101 		}
3102 #endif /* CONFIG_BT_PERIPHERAL && !CONFIG_BT_CTLR_FAST_ENC */
3103 
3104 		return;
3105 	}
3106 
3107 	tx = mem_acquire(&mem_conn_tx_ctrl.free);
3108 	if (!tx) {
3109 		return;
3110 	}
3111 
3112 	pdu_ctrl_tx = (void *)tx->pdu;
3113 
3114 	/* central sends encrypted enc start rsp in control priority */
3115 	if (!lll->role) {
3116 		/* calc the Session Key */
3117 		ecb_encrypt(&conn->llcp_enc.ltk[0],
3118 			    &conn->llcp.encryption.skd[0], NULL,
3119 			    &lll->ccm_rx.key[0]);
3120 
3121 		/* copy the Session Key */
3122 		memcpy(&lll->ccm_tx.key[0], &lll->ccm_rx.key[0],
3123 		       sizeof(lll->ccm_tx.key));
3124 
3125 		/* copy the IV */
3126 		memcpy(&lll->ccm_tx.iv[0], &lll->ccm_rx.iv[0],
3127 		       sizeof(lll->ccm_tx.iv));
3128 
3129 		/* initialise counter */
3130 		lll->ccm_rx.counter = 0;
3131 		lll->ccm_tx.counter = 0;
3132 
3133 		/* set direction: peripheral to central = 0,
3134 		 * central to peripheral = 1
3135 		 */
3136 		lll->ccm_rx.direction = 0;
3137 		lll->ccm_tx.direction = 1;
3138 
3139 		/* enable receive encryption */
3140 		lll->enc_rx = 1;
3141 
3142 		/* send enc start resp */
3143 		start_enc_rsp_send(conn, pdu_ctrl_tx);
3144 
3145 		ctrl_tx_enqueue(conn, tx);
3146 	}
3147 
3148 	/* peripheral send reject ind or start enc req at control priority */
3149 
3150 #if defined(CONFIG_BT_CTLR_FAST_ENC)
3151 	else {
3152 #else /* !CONFIG_BT_CTLR_FAST_ENC */
3153 	else if (!lll->enc_rx) {
3154 #endif /* !CONFIG_BT_CTLR_FAST_ENC */
3155 
3156 		/* place the reject ind packet as next in tx queue */
3157 		if (conn->llcp.encryption.error_code) {
3158 			event_enc_reject_prep(conn, pdu_ctrl_tx);
3159 
3160 			ctrl_tx_enqueue(conn, tx);
3161 
3162 			/* procedure request acked */
3163 			conn->llcp_ack = conn->llcp_req;
3164 
3165 			return;
3166 		}
3167 		/* place the start enc req packet as next in tx queue */
3168 		else {
3169 			/* calc the Session Key */
3170 			ecb_encrypt(&conn->llcp_enc.ltk[0],
3171 				    &conn->llcp.encryption.skd[0], NULL,
3172 				    &lll->ccm_rx.key[0]);
3173 
3174 			/* copy the Session Key */
3175 			memcpy(&lll->ccm_tx.key[0],
3176 			       &lll->ccm_rx.key[0],
3177 			       sizeof(lll->ccm_tx.key));
3178 
3179 			/* copy the IV */
3180 			memcpy(&lll->ccm_tx.iv[0], &lll->ccm_rx.iv[0],
3181 			       sizeof(lll->ccm_tx.iv));
3182 
3183 			/* initialise counter */
3184 			lll->ccm_rx.counter = 0U;
3185 			lll->ccm_tx.counter = 0U;
3186 
3187 			/* set direction: peripheral to central = 0,
3188 			 * central to peripheral = 1
3189 			 */
3190 			lll->ccm_rx.direction = 1U;
3191 			lll->ccm_tx.direction = 0U;
3192 
3193 			/* enable receive encryption (transmit turned
3194 			 * on when start enc resp from central is
3195 			 * received)
3196 			 */
3197 			lll->enc_rx = 1U;
3198 
3199 			/* prepare the start enc req */
3200 			pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL;
3201 			pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl,
3202 						    start_enc_req) +
3203 				sizeof(struct pdu_data_llctrl_start_enc_req);
3204 			pdu_ctrl_tx->llctrl.opcode =
3205 				PDU_DATA_LLCTRL_TYPE_START_ENC_REQ;
3206 
3207 			ctrl_tx_enqueue(conn, tx);
3208 		}
3209 
3210 #if !defined(CONFIG_BT_CTLR_FAST_ENC)
3211 	/* Peripheral sends start enc rsp after reception of start enc rsp */
3212 	} else {
3213 		start_enc_rsp_send(conn, pdu_ctrl_tx);
3214 
3215 		ctrl_tx_enqueue(conn, tx);
3216 #endif /* !CONFIG_BT_CTLR_FAST_ENC */
3217 	}
3218 
3219 	/* Wait for encryption setup to complete */
3220 	conn->llcp.encryption.state = LLCP_ENC_STATE_ENC_WAIT;
3221 }
3222 #endif /* CONFIG_BT_CTLR_LE_ENC */
3223 
3224 static inline void event_fex_prep(struct ll_conn *conn)
3225 {
3226 	struct node_tx *tx;
3227 
3228 	/* If waiting for response, do nothing */
3229 	if (!((conn->llcp_feature.ack - conn->llcp_feature.req) & 0x01)) {
3230 		return;
3231 	}
3232 
3233 	if (conn->common.fex_valid) {
3234 		struct node_rx_pdu *rx;
3235 		struct pdu_data *pdu;
3236 
3237 		/* get a rx node for ULL->LL */
3238 		rx = ll_pdu_rx_alloc();
3239 		if (!rx) {
3240 			return;
3241 		}
3242 
3243 		/* procedure request acked */
3244 		conn->llcp_feature.ack = conn->llcp_feature.req;
3245 
3246 		/* prepare feature rsp structure */
3247 		rx->hdr.handle = conn->lll.handle;
3248 		rx->hdr.type = NODE_RX_TYPE_DC_PDU;
3249 		pdu = (void *)rx->pdu;
3250 		pdu->ll_id = PDU_DATA_LLID_CTRL;
3251 		pdu->len = offsetof(struct pdu_data_llctrl, feature_rsp) +
3252 			   sizeof(struct pdu_data_llctrl_feature_rsp);
3253 		pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_FEATURE_RSP;
3254 		(void)memset(&pdu->llctrl.feature_rsp.features[0], 0x00,
3255 			sizeof(pdu->llctrl.feature_rsp.features));
3256 		sys_put_le64(conn->llcp_feature.features_peer,
3257 			     pdu->llctrl.feature_req.features);
3258 
3259 		/* enqueue feature rsp structure into rx queue */
3260 		ll_rx_put(rx->hdr.link, rx);
3261 		ll_rx_sched();
3262 
3263 		return;
3264 	}
3265 
3266 	tx = mem_acquire(&mem_conn_tx_ctrl.free);
3267 	if (tx) {
3268 		struct pdu_data *pdu = (void *)tx->pdu;
3269 
3270 		/* procedure request acked, move to waiting state */
3271 		conn->llcp_feature.ack--;
3272 
3273 		/* place the feature exchange req packet as next in tx queue */
3274 		pdu->ll_id = PDU_DATA_LLID_CTRL;
3275 		pdu->len = offsetof(struct pdu_data_llctrl, feature_req) +
3276 			   sizeof(struct pdu_data_llctrl_feature_req);
3277 		pdu->llctrl.opcode = !conn->lll.role ?
3278 				     PDU_DATA_LLCTRL_TYPE_FEATURE_REQ :
3279 				     PDU_DATA_LLCTRL_TYPE_PER_INIT_FEAT_XCHG;
3280 		(void)memset(&pdu->llctrl.feature_req.features[0],
3281 			     0x00,
3282 			     sizeof(pdu->llctrl.feature_req.features));
3283 		sys_put_le64(conn->llcp_feature.features_conn,
3284 			     pdu->llctrl.feature_req.features);
3285 
3286 		ctrl_tx_enqueue(conn, tx);
3287 
3288 		/* Start Procedure Timeout (TODO: this shall not replace
3289 		 * terminate procedure)
3290 		 */
3291 		conn->procedure_expire = conn->procedure_reload;
3292 	}
3293 
3294 }
3295 
3296 static inline void event_vex_prep(struct ll_conn *conn)
3297 {
3298 	/* If waiting for response, do nothing */
3299 	if (!((conn->llcp_version.ack - conn->llcp_version.req) & 0x01)) {
3300 		return;
3301 	}
3302 
3303 	if (conn->llcp_version.tx == 0U) {
3304 		struct node_tx *tx;
3305 
3306 		tx = mem_acquire(&mem_conn_tx_ctrl.free);
3307 		if (tx) {
3308 			struct pdu_data *pdu = (void *)tx->pdu;
3309 			uint16_t cid;
3310 			uint16_t svn;
3311 
3312 			/* procedure request acked, move to waiting state  */
3313 			conn->llcp_version.ack--;
3314 
3315 			/* set version ind tx-ed flag */
3316 			conn->llcp_version.tx = 1U;
3317 
3318 			/* place the version ind packet as next in tx queue */
3319 			pdu->ll_id = PDU_DATA_LLID_CTRL;
3320 			pdu->len =
3321 				offsetof(struct pdu_data_llctrl, version_ind) +
3322 				sizeof(struct pdu_data_llctrl_version_ind);
3323 			pdu->llctrl.opcode =
3324 				PDU_DATA_LLCTRL_TYPE_VERSION_IND;
3325 			pdu->llctrl.version_ind.version_number =
3326 				LL_VERSION_NUMBER;
3327 			cid = sys_cpu_to_le16(ll_settings_company_id());
3328 			svn = sys_cpu_to_le16(ll_settings_subversion_number());
3329 			pdu->llctrl.version_ind.company_id = cid;
3330 			pdu->llctrl.version_ind.sub_version_number = svn;
3331 
3332 			ctrl_tx_enqueue(conn, tx);
3333 
3334 			/* Start Procedure Timeout (TODO: this shall not
3335 			 * replace terminate procedure)
3336 			 */
3337 			conn->procedure_expire = conn->procedure_reload;
3338 		}
3339 	} else if (conn->llcp_version.rx) {
3340 		struct node_rx_pdu *rx;
3341 		struct pdu_data *pdu;
3342 
3343 		/* get a rx node for ULL->LL */
3344 		rx = ll_pdu_rx_alloc();
3345 		if (!rx) {
3346 			return;
3347 		}
3348 
3349 		/* procedure request acked */
3350 		conn->llcp_version.ack = conn->llcp_version.req;
3351 
3352 		rx->hdr.handle = conn->lll.handle;
3353 		rx->hdr.type = NODE_RX_TYPE_DC_PDU;
3354 
3355 		/* prepare version ind structure */
3356 		pdu = (void *)rx->pdu;
3357 		pdu->ll_id = PDU_DATA_LLID_CTRL;
3358 		pdu->len = offsetof(struct pdu_data_llctrl, version_ind) +
3359 			   sizeof(struct pdu_data_llctrl_version_ind);
3360 		pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_VERSION_IND;
3361 		pdu->llctrl.version_ind.version_number =
3362 			conn->llcp_version.version_number;
3363 		pdu->llctrl.version_ind.company_id =
3364 			sys_cpu_to_le16(conn->llcp_version.company_id);
3365 		pdu->llctrl.version_ind.sub_version_number =
3366 			sys_cpu_to_le16(conn->llcp_version.sub_version_number);
3367 
3368 		/* enqueue version ind structure into rx queue */
3369 		ll_rx_put(rx->hdr.link, rx);
3370 		ll_rx_sched();
3371 	} else {
3372 		/* tx-ed but no rx, and new request placed */
3373 		LL_ASSERT(0);
3374 	}
3375 }
3376 
3377 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
3378 static inline void event_conn_param_req(struct ll_conn *conn,
3379 					uint16_t event_counter,
3380 					uint32_t ticks_at_expire)
3381 {
3382 	struct pdu_data_llctrl_conn_param_req *p;
3383 	struct pdu_data *pdu_ctrl_tx;
3384 	struct node_tx *tx;
3385 
3386 	tx = mem_acquire(&mem_conn_tx_ctrl.free);
3387 	if (!tx) {
3388 		return;
3389 	}
3390 
3391 	/* place the conn param req packet as next in tx queue */
3392 	pdu_ctrl_tx = (void *)tx->pdu;
3393 	pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL;
3394 	pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, conn_param_req) +
3395 		sizeof(struct pdu_data_llctrl_conn_param_req);
3396 	pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ;
3397 	p = (void *)&pdu_ctrl_tx->llctrl.conn_param_req;
3398 	p->interval_min = sys_cpu_to_le16(conn->llcp_conn_param.interval_min);
3399 	p->interval_max = sys_cpu_to_le16(conn->llcp_conn_param.interval_max);
3400 	p->latency = sys_cpu_to_le16(conn->llcp_conn_param.latency);
3401 	p->timeout = sys_cpu_to_le16(conn->llcp_conn_param.timeout);
3402 	p->preferred_periodicity = 0U;
3403 	p->offset0 = sys_cpu_to_le16(0x0000);
3404 	p->offset1 = sys_cpu_to_le16(0xffff);
3405 	p->offset2 = sys_cpu_to_le16(0xffff);
3406 	p->offset3 = sys_cpu_to_le16(0xffff);
3407 	p->offset4 = sys_cpu_to_le16(0xffff);
3408 	p->offset5 = sys_cpu_to_le16(0xffff);
3409 
3410 	/* Set CPR mutex */
3411 	cpr_active_set(conn);
3412 
3413 	/* Start Procedure Timeout (TODO: this shall not replace
3414 	 * terminate procedure).
3415 	 */
3416 	conn->procedure_expire = conn->procedure_reload;
3417 
3418 #if defined(CONFIG_BT_CTLR_SCHED_ADVANCED)
3419 	/* move to wait for offset calculations */
3420 	conn->llcp_conn_param.state = LLCP_CPR_STATE_OFFS_REQ;
3421 
3422 	{
3423 		static memq_link_t s_link;
3424 		static struct mayfly s_mfy_sched_offset = {0, 0, &s_link, NULL,
3425 			ull_sched_mfy_free_win_offset_calc};
3426 		uint32_t retval;
3427 
3428 		conn->llcp_conn_param.ticks_ref = ticks_at_expire;
3429 
3430 #if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
3431 		if (conn->ull.ticks_prepare_to_start & XON_BITMASK) {
3432 			uint32_t ticks_prepare_to_start =
3433 				MAX(conn->ull.ticks_active_to_start,
3434 				    conn->ull.ticks_preempt_to_start);
3435 
3436 			conn->llcp_conn_param.ticks_ref -=
3437 				(conn->ull.ticks_prepare_to_start &
3438 				 ~XON_BITMASK) - ticks_prepare_to_start;
3439 		}
3440 #endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
3441 
3442 		conn->llcp_conn_param.pdu_win_offset0 = (uint16_t *)&p->offset0;
3443 
3444 		s_mfy_sched_offset.param = (void *)conn;
3445 
3446 		retval = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
3447 					TICKER_USER_ID_ULL_LOW, 1,
3448 					&s_mfy_sched_offset);
3449 		LL_ASSERT(!retval);
3450 	}
3451 #else /* !CONFIG_BT_CTLR_SCHED_ADVANCED */
3452 	ARG_UNUSED(ticks_at_expire);
3453 
3454 	/* set reference counter value */
3455 	p->reference_conn_event_count = sys_cpu_to_le16(event_counter);
3456 	/* move to wait for conn_update_rsp/rej */
3457 	conn->llcp_conn_param.state = LLCP_CPR_STATE_RSP_WAIT;
3458 	/* enqueue control PDU */
3459 	ctrl_tx_enqueue(conn, tx);
3460 #endif /* !CONFIG_BT_CTLR_SCHED_ADVANCED */
3461 }
3462 
3463 static inline void event_conn_param_rsp(struct ll_conn *conn)
3464 {
3465 	struct pdu_data_llctrl_conn_param_rsp *rsp;
3466 	struct node_tx *tx;
3467 	struct pdu_data *pdu;
3468 
3469 	/* handle rejects */
3470 	if (conn->llcp_conn_param.status) {
3471 		struct pdu_data_llctrl_reject_ext_ind *rej;
3472 
3473 		tx = mem_acquire(&mem_conn_tx_ctrl.free);
3474 		if (!tx) {
3475 			return;
3476 		}
3477 
3478 		/* central/peripheral response with reject ext ind */
3479 		pdu = (void *)tx->pdu;
3480 		pdu->ll_id = PDU_DATA_LLID_CTRL;
3481 		pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND;
3482 		pdu->len = offsetof(struct pdu_data_llctrl, reject_ext_ind) +
3483 			   sizeof(struct pdu_data_llctrl_reject_ext_ind);
3484 
3485 		rej = (void *)&pdu->llctrl.reject_ext_ind;
3486 		rej->reject_opcode = PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ;
3487 		rej->error_code = conn->llcp_conn_param.status;
3488 
3489 		ctrl_tx_enqueue(conn, tx);
3490 
3491 		/* procedure request acked */
3492 		conn->llcp_conn_param.ack = conn->llcp_conn_param.req;
3493 
3494 		/* Reset CPR mutex */
3495 		cpr_active_reset();
3496 
3497 		return;
3498 	}
3499 
3500 	/* central respond with connection update */
3501 	if (!conn->lll.role) {
3502 		uint16_t interval_max;
3503 		uint8_t preferred_periodicity;
3504 
3505 		if (conn->llcp_cu.req != conn->llcp_cu.ack) {
3506 			return;
3507 		}
3508 
3509 		/* Move to waiting for connection update completion */
3510 		conn->llcp_conn_param.state = LLCP_CPR_STATE_UPD;
3511 
3512 		/* Initiate connection update procedure */
3513 		conn->llcp_cu.win_size = 1U;
3514 		conn->llcp_cu.win_offset_us = 0U;
3515 
3516 		interval_max = conn->llcp_conn_param.interval_max;
3517 		preferred_periodicity = conn->llcp_conn_param.preferred_periodicity;
3518 		if (preferred_periodicity) {
3519 			/* Find interval with preferred periodicity by rounding down from max */
3520 			conn->llcp_cu.interval = (interval_max / preferred_periodicity) *
3521 						  preferred_periodicity;
3522 			/* Use maximum in case of underflowing minimum interval */
3523 			if (conn->llcp_cu.interval < conn->llcp_conn_param.interval_min) {
3524 				conn->llcp_cu.interval = interval_max;
3525 			}
3526 		} else {
3527 			/* Choose maximum interval as default */
3528 			conn->llcp_cu.interval = interval_max;
3529 		}
3530 		conn->llcp_cu.latency = conn->llcp_conn_param.latency;
3531 		conn->llcp_cu.timeout = conn->llcp_conn_param.timeout;
3532 		conn->llcp_cu.state = LLCP_CUI_STATE_SELECT;
3533 		conn->llcp_cu.cmd = conn->llcp_conn_param.cmd;
3534 		conn->llcp_cu.ack--;
3535 
3536 		return;
3537 	}
3538 
3539 	/* peripheral response with connection parameter response */
3540 	tx = mem_acquire(&mem_conn_tx_ctrl.free);
3541 	if (!tx) {
3542 		return;
3543 	}
3544 
3545 	/* place the conn param rsp packet as next in tx queue */
3546 	pdu = (void *)tx->pdu;
3547 	pdu->ll_id = PDU_DATA_LLID_CTRL;
3548 	pdu->len = offsetof(struct pdu_data_llctrl, conn_param_rsp) +
3549 		sizeof(struct pdu_data_llctrl_conn_param_rsp);
3550 	pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_CONN_PARAM_RSP;
3551 	rsp = (void *)&pdu->llctrl.conn_param_rsp;
3552 	rsp->interval_min =
3553 		sys_cpu_to_le16(conn->llcp_conn_param.interval_min);
3554 	rsp->interval_max =
3555 		sys_cpu_to_le16(conn->llcp_conn_param.interval_max);
3556 	rsp->latency =
3557 		sys_cpu_to_le16(conn->llcp_conn_param.latency);
3558 	rsp->timeout =
3559 		sys_cpu_to_le16(conn->llcp_conn_param.timeout);
3560 	rsp->preferred_periodicity =
3561 		conn->llcp_conn_param.preferred_periodicity;
3562 	rsp->reference_conn_event_count =
3563 		sys_cpu_to_le16(conn->llcp_conn_param.reference_conn_event_count);
3564 	rsp->offset0 = sys_cpu_to_le16(conn->llcp_conn_param.offset0);
3565 	rsp->offset1 = sys_cpu_to_le16(conn->llcp_conn_param.offset1);
3566 	rsp->offset2 = sys_cpu_to_le16(conn->llcp_conn_param.offset2);
3567 	rsp->offset3 = sys_cpu_to_le16(conn->llcp_conn_param.offset3);
3568 	rsp->offset4 = sys_cpu_to_le16(conn->llcp_conn_param.offset4);
3569 	rsp->offset5 = sys_cpu_to_le16(conn->llcp_conn_param.offset5);
3570 
3571 	ctrl_tx_enqueue(conn, tx);
3572 
3573 	/* Wait for connection update to be initiated by
3574 	 * peer central device
3575 	 */
3576 	conn->llcp_conn_param.state = LLCP_CPR_STATE_UPD_WAIT;
3577 }
3578 
3579 static inline void event_conn_param_app_req(struct ll_conn *conn)
3580 {
3581 	struct pdu_data_llctrl_conn_param_req *p;
3582 	struct node_rx_pdu *rx;
3583 	struct pdu_data *pdu;
3584 
3585 #if defined(CONFIG_BT_CTLR_LE_ENC)
3586 	/* defer until encryption setup is complete */
3587 	if (conn->llcp_enc.pause_tx) {
3588 		return;
3589 	}
3590 #endif /* CONFIG_BT_CTLR_LE_ENC */
3591 
3592 	/* wait for free rx buffer */
3593 	rx = ll_pdu_rx_alloc();
3594 	if (!rx) {
3595 		return;
3596 	}
3597 
3598 	/* move to wait for conn_update/rsp/rej */
3599 	conn->llcp_conn_param.state = LLCP_CPR_STATE_APP_WAIT;
3600 
3601 	/* Emulate as Rx-ed CPR data channel PDU */
3602 	rx->hdr.handle = conn->lll.handle;
3603 	rx->hdr.type = NODE_RX_TYPE_DC_PDU;
3604 
3605 	/* place the conn param req packet as next in rx queue */
3606 	pdu = (void *)rx->pdu;
3607 	pdu->ll_id = PDU_DATA_LLID_CTRL;
3608 	pdu->len = offsetof(struct pdu_data_llctrl, conn_param_req) +
3609 		sizeof(struct pdu_data_llctrl_conn_param_req);
3610 	pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ;
3611 	p = (void *) &pdu->llctrl.conn_param_req;
3612 	p->interval_min = sys_cpu_to_le16(conn->llcp_conn_param.interval_min);
3613 	p->interval_max = sys_cpu_to_le16(conn->llcp_conn_param.interval_max);
3614 	p->latency = sys_cpu_to_le16(conn->llcp_conn_param.latency);
3615 	p->timeout = sys_cpu_to_le16(conn->llcp_conn_param.timeout);
3616 
3617 	/* enqueue connection parameter request into rx queue */
3618 	ll_rx_put(rx->hdr.link, rx);
3619 	ll_rx_sched();
3620 }
3621 
3622 static inline void event_conn_param_prep(struct ll_conn *conn,
3623 					 uint16_t event_counter,
3624 					 uint32_t ticks_at_expire)
3625 {
3626 	/* Defer new CPR if another in progress across active connections */
3627 	if (cpr_active_is_set(conn)) {
3628 		return;
3629 	}
3630 
3631 	switch (conn->llcp_conn_param.state) {
3632 	case LLCP_CPR_STATE_REQ:
3633 		event_conn_param_req(conn, event_counter, ticks_at_expire);
3634 		break;
3635 
3636 	case LLCP_CPR_STATE_RSP:
3637 		event_conn_param_rsp(conn);
3638 		break;
3639 
3640 	case LLCP_CPR_STATE_APP_REQ:
3641 		event_conn_param_app_req(conn);
3642 		break;
3643 
3644 	case LLCP_CPR_STATE_APP_WAIT:
3645 	case LLCP_CPR_STATE_RSP_WAIT:
3646 	case LLCP_CPR_STATE_UPD_WAIT:
3647 	case LLCP_CPR_STATE_UPD:
3648 		/* Do nothing */
3649 		break;
3650 
3651 #if defined(CONFIG_BT_CTLR_SCHED_ADVANCED)
3652 	case LLCP_CPR_STATE_OFFS_REQ:
3653 		/* Do nothing */
3654 		break;
3655 
3656 	case LLCP_CPR_STATE_OFFS_RDY:
3657 	{
3658 		struct pdu_data *pdu_ctrl_tx;
3659 		struct node_tx *tx;
3660 
3661 		/* set reference counter value */
3662 		pdu_ctrl_tx =
3663 			CONTAINER_OF(conn->llcp_conn_param.pdu_win_offset0,
3664 				     struct pdu_data,
3665 				     llctrl.conn_param_req.offset0);
3666 		pdu_ctrl_tx->llctrl.conn_param_req.reference_conn_event_count =
3667 			sys_cpu_to_le16(event_counter);
3668 		/* move to wait for conn_update_rsp/rej */
3669 		conn->llcp_conn_param.state = LLCP_CPR_STATE_RSP_WAIT;
3670 		/* enqueue control PDU */
3671 		tx = CONTAINER_OF(pdu_ctrl_tx, struct node_tx, pdu);
3672 		ctrl_tx_enqueue(conn, tx);
3673 	}
3674 	break;
3675 #endif /* CONFIG_BT_CTLR_SCHED_ADVANCED */
3676 
3677 	default:
3678 		LL_ASSERT(0);
3679 		break;
3680 	}
3681 }
3682 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
3683 
3684 #if defined(CONFIG_BT_CTLR_LE_PING)
3685 static inline void event_ping_prep(struct ll_conn *conn)
3686 {
3687 	struct node_tx *tx;
3688 
3689 	tx = mem_acquire(&mem_conn_tx_ctrl.free);
3690 	if (tx) {
3691 		struct pdu_data *pdu_ctrl_tx = (void *)tx->pdu;
3692 
3693 		/* procedure request acked */
3694 		conn->llcp_ack = conn->llcp_req;
3695 
3696 		/* place the ping req packet as next in tx queue */
3697 		pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL;
3698 		pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, ping_req) +
3699 				   sizeof(struct pdu_data_llctrl_ping_req);
3700 		pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_PING_REQ;
3701 
3702 		ctrl_tx_enqueue(conn, tx);
3703 
3704 		/* Start Procedure Timeout (TODO: this shall not replace
3705 		 * terminate procedure)
3706 		 */
3707 		conn->procedure_expire = conn->procedure_reload;
3708 	}
3709 
3710 }
3711 #endif /* CONFIG_BT_CTLR_LE_PING */
3712 
3713 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
3714 static inline void dle_max_time_get(const struct ll_conn *conn,
3715 				    uint16_t *max_rx_time,
3716 				    uint16_t *max_tx_time)
3717 {
3718 	uint64_t feature_coded_phy = 0;
3719 	uint64_t feature_phy_2m = 0;
3720 	uint16_t rx_time = 0;
3721 	uint16_t tx_time = 0;
3722 
3723 #if defined(CONFIG_BT_CTLR_PHY)
3724 #if defined(CONFIG_BT_CTLR_PHY_CODED)
3725 	feature_coded_phy = (conn->llcp_feature.features_conn &
3726 			     BIT64(BT_LE_FEAT_BIT_PHY_CODED));
3727 #else
3728 	feature_coded_phy = 0;
3729 #endif
3730 
3731 #if defined(CONFIG_BT_CTLR_PHY_2M)
3732 	feature_phy_2m = (conn->llcp_feature.features_conn &
3733 			  BIT64(BT_LE_FEAT_BIT_PHY_2M));
3734 #else
3735 	feature_phy_2m = 0;
3736 #endif
3737 #else
3738 	feature_coded_phy = 0;
3739 	feature_phy_2m = 0;
3740 #endif
3741 
3742 	if (!conn->common.fex_valid ||
3743 	    (!feature_coded_phy && !feature_phy_2m)) {
3744 		rx_time = PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, PHY_1M);
3745 #if defined(CONFIG_BT_CTLR_PHY)
3746 		tx_time = CLAMP(conn->default_tx_time,
3747 				PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M),
3748 				PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, PHY_1M));
3749 #else /* !CONFIG_BT_CTLR_PHY */
3750 		tx_time = PDU_DC_MAX_US(conn->default_tx_octets, PHY_1M);
3751 #endif /* !CONFIG_BT_CTLR_PHY */
3752 
3753 #if defined(CONFIG_BT_CTLR_PHY)
3754 #if defined(CONFIG_BT_CTLR_PHY_CODED)
3755 	} else if (feature_coded_phy) {
3756 		rx_time = MAX(PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX,
3757 					    PHY_CODED),
3758 			      PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN,
3759 					    PHY_CODED));
3760 		tx_time = MIN(PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX,
3761 					    PHY_CODED),
3762 			      conn->default_tx_time);
3763 		tx_time = MAX(PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN,
3764 					    PHY_1M), tx_time);
3765 #endif /* CONFIG_BT_CTLR_PHY_CODED */
3766 
3767 #if defined(CONFIG_BT_CTLR_PHY_2M)
3768 	} else if (feature_phy_2m) {
3769 		rx_time = MAX(PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX, PHY_2M),
3770 			      PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_2M));
3771 		tx_time = MAX(PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, PHY_1M),
3772 			      MIN(PDU_DC_MAX_US(LL_LENGTH_OCTETS_RX_MAX,
3773 						PHY_2M),
3774 				  conn->default_tx_time));
3775 #endif /* CONFIG_BT_CTLR_PHY_2M */
3776 #endif /* CONFIG_BT_CTLR_PHY */
3777 	}
3778 
3779 	/*
3780 	 * see Vol. 6 Part B chapter 4.5.10
3781 	 * minimum value for time is 328 us
3782 	 */
3783 	rx_time = MAX(PDU_DC_PAYLOAD_TIME_MIN, rx_time);
3784 	tx_time = MAX(PDU_DC_PAYLOAD_TIME_MIN, tx_time);
3785 
3786 	*max_rx_time = rx_time;
3787 	*max_tx_time = tx_time;
3788 }
3789 
3790 static inline void event_len_prep(struct ll_conn *conn)
3791 {
3792 	switch (conn->llcp_length.state) {
3793 	case LLCP_LENGTH_STATE_REQ:
3794 	{
3795 		struct pdu_data_llctrl_length_req *lr;
3796 		struct pdu_data *pdu_ctrl_tx;
3797 		struct node_tx *tx;
3798 		/*
3799 		 * initialize to 0 to eliminate compiler warnings
3800 		 */
3801 		uint16_t rx_time = 0;
3802 		uint16_t tx_time = 0;
3803 
3804 		tx = mem_acquire(&mem_conn_tx_ctrl.free);
3805 		if (!tx) {
3806 			return;
3807 		}
3808 
3809 		/* wait for resp before completing the procedure */
3810 		conn->llcp_length.state = LLCP_LENGTH_STATE_REQ_ACK_WAIT;
3811 
3812 		/* set the default tx octets/time to requested value */
3813 		conn->default_tx_octets = conn->llcp_length.tx_octets;
3814 
3815 #if defined(CONFIG_BT_CTLR_PHY)
3816 		conn->default_tx_time = conn->llcp_length.tx_time;
3817 #endif
3818 
3819 		/* place the length req packet as next in tx queue */
3820 		pdu_ctrl_tx = (void *) tx->pdu;
3821 		pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL;
3822 		pdu_ctrl_tx->len =
3823 			offsetof(struct pdu_data_llctrl, length_req) +
3824 			sizeof(struct pdu_data_llctrl_length_req);
3825 		pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_LENGTH_REQ;
3826 
3827 		lr = &pdu_ctrl_tx->llctrl.length_req;
3828 		lr->max_rx_octets = sys_cpu_to_le16(LL_LENGTH_OCTETS_RX_MAX);
3829 		lr->max_tx_octets = sys_cpu_to_le16(conn->default_tx_octets);
3830 
3831 		dle_max_time_get(conn, &rx_time, &tx_time);
3832 		lr->max_rx_time = sys_cpu_to_le16(rx_time);
3833 		lr->max_tx_time = sys_cpu_to_le16(tx_time);
3834 
3835 		ctrl_tx_enqueue(conn, tx);
3836 
3837 		/* Start Procedure Timeout (TODO: this shall not replace
3838 		 * terminate procedure).
3839 		 */
3840 		conn->procedure_expire = conn->procedure_reload;
3841 	}
3842 	break;
3843 
3844 	case LLCP_LENGTH_STATE_RESIZE:
3845 	case LLCP_LENGTH_STATE_RESIZE_RSP:
3846 	{
3847 		struct pdu_data_llctrl_length_rsp *lr;
3848 		struct pdu_data *pdu_ctrl_rx;
3849 		struct node_rx_pdu *rx;
3850 		struct lll_conn *lll;
3851 		uint16_t tx_octets;
3852 
3853 		lll = &conn->lll;
3854 
3855 		/* Use the new rx octets/time in the connection */
3856 		lll->max_rx_octets = conn->llcp_length.rx_octets;
3857 
3858 		/* backup tx_octets */
3859 		tx_octets = conn->llcp_length.tx_octets;
3860 
3861 #if defined(CONFIG_BT_CTLR_PHY)
3862 		/* Use the new rx time in the connection */
3863 		lll->max_rx_time = conn->llcp_length.rx_time;
3864 
3865 		/* backup tx time */
3866 		uint16_t tx_time = conn->llcp_length.tx_time;
3867 #endif /* CONFIG_BT_CTLR_PHY */
3868 
3869 		/* switch states, to wait for ack, to request cached values or
3870 		 * complete the procedure
3871 		 */
3872 		if (conn->llcp_length.state == LLCP_LENGTH_STATE_RESIZE) {
3873 			/* check cache */
3874 			if (!conn->llcp_length.cache.tx_octets) {
3875 				/* Procedure complete */
3876 				conn->llcp_length.ack = conn->llcp_length.req;
3877 				conn->procedure_expire = 0U;
3878 			} else {
3879 				/* Initiate cached procedure */
3880 				conn->llcp_length.tx_octets =
3881 					conn->llcp_length.cache.tx_octets;
3882 				conn->llcp_length.cache.tx_octets = 0;
3883 #if defined(CONFIG_BT_CTLR_PHY)
3884 				conn->llcp_length.tx_time =
3885 					conn->llcp_length.cache.tx_time;
3886 #endif /* CONFIG_BT_CTLR_PHY */
3887 				conn->llcp_length.state = LLCP_LENGTH_STATE_REQ;
3888 			}
3889 		} else {
3890 			conn->llcp_length.state =
3891 				LLCP_LENGTH_STATE_RESIZE_RSP_ACK_WAIT;
3892 		}
3893 
3894 		/* Prepare the rx packet structure */
3895 		rx = conn->llcp_rx;
3896 		LL_ASSERT(rx && rx->hdr.link);
3897 		conn->llcp_rx = rx->hdr.link->mem;
3898 
3899 		rx->hdr.handle = conn->lll.handle;
3900 		rx->hdr.type = NODE_RX_TYPE_DC_PDU;
3901 
3902 		/* prepare length rsp structure */
3903 		pdu_ctrl_rx = (void *)rx->pdu;
3904 		pdu_ctrl_rx->ll_id = PDU_DATA_LLID_CTRL;
3905 		pdu_ctrl_rx->len =
3906 			offsetof(struct pdu_data_llctrl, length_rsp) +
3907 			sizeof(struct pdu_data_llctrl_length_rsp);
3908 		pdu_ctrl_rx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_LENGTH_RSP;
3909 
3910 		lr = &pdu_ctrl_rx->llctrl.length_rsp;
3911 		lr->max_rx_octets = sys_cpu_to_le16(lll->max_rx_octets);
3912 		lr->max_tx_octets = sys_cpu_to_le16(tx_octets);
3913 #if !defined(CONFIG_BT_CTLR_PHY)
3914 		lr->max_rx_time =
3915 			sys_cpu_to_le16(PDU_DC_MAX_US(lll->max_rx_octets,
3916 						      PHY_1M));
3917 		lr->max_tx_time = sys_cpu_to_le16(PDU_DC_MAX_US(tx_octets,
3918 								PHY_1M));
3919 #else /* CONFIG_BT_CTLR_PHY */
3920 		lr->max_rx_time = sys_cpu_to_le16(lll->max_rx_time);
3921 		lr->max_tx_time = sys_cpu_to_le16(tx_time);
3922 #endif /* CONFIG_BT_CTLR_PHY */
3923 
3924 		/* enqueue rx node towards Thread */
3925 		ll_rx_put(rx->hdr.link, rx);
3926 		ll_rx_sched();
3927 	}
3928 	break;
3929 
3930 	case LLCP_LENGTH_STATE_REQ_ACK_WAIT:
3931 	case LLCP_LENGTH_STATE_RSP_WAIT:
3932 	case LLCP_LENGTH_STATE_RSP_ACK_WAIT:
3933 	case LLCP_LENGTH_STATE_RESIZE_RSP_ACK_WAIT:
3934 		/* no nothing */
3935 		break;
3936 
3937 	default:
3938 		LL_ASSERT(0);
3939 		break;
3940 	}
3941 }
3942 
3943 #if defined(CONFIG_BT_CTLR_PHY)
3944 static uint16_t calc_eff_time(uint8_t max_octets, uint8_t phy, uint16_t default_time)
3945 {
3946 	uint16_t eff_time;
3947 
3948 	eff_time = MAX(PDU_DC_PAYLOAD_TIME_MIN, PDU_DC_MAX_US(max_octets, phy));
3949 	eff_time = MIN(eff_time, default_time);
3950 #if defined(CONFIG_BT_CTLR_PHY_CODED)
3951 	eff_time = MAX(eff_time, PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, phy));
3952 #endif
3953 
3954 	return eff_time;
3955 }
3956 #endif /* CONFIG_BT_CTLR_PHY */
3957 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
3958 
3959 #if defined(CONFIG_BT_CTLR_PHY)
3960 static inline void event_phy_req_prep(struct ll_conn *conn)
3961 {
3962 	switch (conn->llcp_phy.state) {
3963 	case LLCP_PHY_STATE_REQ:
3964 	{
3965 		struct pdu_data_llctrl_phy_req *pr;
3966 		struct pdu_data *pdu_ctrl_tx;
3967 		struct node_tx *tx;
3968 
3969 		tx = mem_acquire(&mem_conn_tx_ctrl.free);
3970 		if (!tx) {
3971 			break;
3972 		}
3973 
3974 		conn->llcp_phy.state = LLCP_PHY_STATE_ACK_WAIT;
3975 
3976 		/* update preferred phy */
3977 		conn->phy_pref_tx = conn->llcp_phy.tx;
3978 		conn->phy_pref_rx = conn->llcp_phy.rx;
3979 		conn->lll.phy_flags = conn->llcp_phy.flags;
3980 
3981 		/* place the phy req packet as next in tx queue */
3982 		pdu_ctrl_tx = (void *)tx->pdu;
3983 		pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL;
3984 		pdu_ctrl_tx->len =
3985 			offsetof(struct pdu_data_llctrl, phy_req) +
3986 			sizeof(struct pdu_data_llctrl_phy_req);
3987 		pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_PHY_REQ;
3988 
3989 		pr = &pdu_ctrl_tx->llctrl.phy_req;
3990 		pr->tx_phys = conn->llcp_phy.tx;
3991 		pr->rx_phys = conn->llcp_phy.rx;
3992 
3993 		ctrl_tx_enqueue(conn, tx);
3994 
3995 		/* Start Procedure Timeout (TODO: this shall not replace
3996 		 * terminate procedure).
3997 		 */
3998 		conn->procedure_expire = conn->procedure_reload;
3999 	}
4000 	break;
4001 
4002 	case LLCP_PHY_STATE_UPD:
4003 	{
4004 		/* Defer if another procedure in progress */
4005 		if (conn->llcp_ack != conn->llcp_req) {
4006 			return;
4007 		}
4008 
4009 		/* Procedure complete */
4010 		conn->llcp_phy.ack = conn->llcp_phy.req;
4011 
4012 		/* select only one tx phy, prefer 2M */
4013 		if (conn->llcp_phy.tx & PHY_2M) {
4014 			conn->llcp_phy.tx = PHY_2M;
4015 		} else if (conn->llcp_phy.tx & PHY_1M) {
4016 			conn->llcp_phy.tx = PHY_1M;
4017 		} else if (conn->llcp_phy.tx & PHY_CODED) {
4018 			conn->llcp_phy.tx = PHY_CODED;
4019 		} else {
4020 			conn->llcp_phy.tx = 0U;
4021 		}
4022 
4023 		/* select only one rx phy, prefer 2M */
4024 		if (conn->llcp_phy.rx & PHY_2M) {
4025 			conn->llcp_phy.rx = PHY_2M;
4026 		} else if (conn->llcp_phy.rx & PHY_1M) {
4027 			conn->llcp_phy.rx = PHY_1M;
4028 		} else if (conn->llcp_phy.rx & PHY_CODED) {
4029 			conn->llcp_phy.rx = PHY_CODED;
4030 		} else {
4031 			conn->llcp_phy.rx = 0U;
4032 		}
4033 
4034 		/* Initiate PHY Update Ind */
4035 		if (conn->llcp_phy.tx != conn->lll.phy_tx) {
4036 			conn->llcp.phy_upd_ind.tx = conn->llcp_phy.tx;
4037 		} else {
4038 			conn->llcp.phy_upd_ind.tx = 0U;
4039 		}
4040 		if (conn->llcp_phy.rx != conn->lll.phy_rx) {
4041 			conn->llcp.phy_upd_ind.rx = conn->llcp_phy.rx;
4042 		} else {
4043 			conn->llcp.phy_upd_ind.rx = 0U;
4044 		}
4045 		/* conn->llcp.phy_upd_ind.instant = 0; */
4046 		conn->llcp.phy_upd_ind.initiate = 1U;
4047 		conn->llcp.phy_upd_ind.cmd = conn->llcp_phy.cmd;
4048 
4049 		conn->llcp_type = LLCP_PHY_UPD;
4050 		conn->llcp_ack -= 2U;
4051 	}
4052 	break;
4053 
4054 	case LLCP_PHY_STATE_ACK_WAIT:
4055 	case LLCP_PHY_STATE_RSP_WAIT:
4056 		/* no nothing */
4057 		break;
4058 
4059 	default:
4060 		LL_ASSERT(0);
4061 		break;
4062 	}
4063 }
4064 
4065 static inline void event_phy_upd_ind_prep(struct ll_conn *conn,
4066 					  uint16_t event_counter)
4067 {
4068 	struct node_rx_pu *upd;
4069 
4070 	if (conn->llcp.phy_upd_ind.initiate) {
4071 		struct pdu_data_llctrl_phy_upd_ind *ind;
4072 		struct pdu_data *pdu_ctrl_tx;
4073 		struct node_rx_pdu *rx;
4074 		struct node_tx *tx;
4075 
4076 		/* Delay until all pending Tx in LLL is acknowledged,
4077 		 * conn->llcp_phy.pause_tx is true, new Tx PDUs will not be
4078 		 * enqueued until we proceed to initiate PHY update.
4079 		 * This is required to ensure PDU with instant can be
4080 		 * transmitted before instant expires.
4081 		 */
4082 		if (memq_peek(conn->lll.memq_tx.head, conn->lll.memq_tx.tail,
4083 			      NULL)) {
4084 			return;
4085 		}
4086 
4087 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
4088 		rx = ll_pdu_rx_alloc_peek(2);
4089 #else /* !CONFIG_BT_CTLR_DATA_LENGTH */
4090 		rx = ll_pdu_rx_alloc_peek(1);
4091 #endif /* !CONFIG_BT_CTLR_DATA_LENGTH */
4092 		if (!rx) {
4093 			return;
4094 		}
4095 
4096 		tx = mem_acquire(&mem_conn_tx_ctrl.free);
4097 		if (!tx) {
4098 			return;
4099 		}
4100 
4101 		/* reset initiate flag */
4102 		conn->llcp.phy_upd_ind.initiate = 0U;
4103 
4104 		/* Check if both tx and rx PHY unchanged */
4105 		if (!((conn->llcp.phy_upd_ind.tx |
4106 		       conn->llcp.phy_upd_ind.rx) & 0x07)) {
4107 			/* Procedure complete */
4108 			conn->llcp_ack = conn->llcp_req;
4109 
4110 			/* 0 instant */
4111 			conn->llcp.phy_upd_ind.instant = 0U;
4112 
4113 			/* generate phy update event */
4114 			if (conn->llcp.phy_upd_ind.cmd) {
4115 				struct lll_conn *lll = &conn->lll;
4116 
4117 				(void)ll_pdu_rx_alloc();
4118 
4119 				rx->hdr.handle = lll->handle;
4120 				rx->hdr.type = NODE_RX_TYPE_PHY_UPDATE;
4121 
4122 				upd = (void *)rx->pdu;
4123 				upd->status = 0U;
4124 				upd->tx = lll->phy_tx;
4125 				upd->rx = lll->phy_rx;
4126 
4127 				/* Enqueue Rx node */
4128 				ll_rx_put(rx->hdr.link, rx);
4129 				ll_rx_sched();
4130 			}
4131 		} else {
4132 			struct lll_conn *lll = &conn->lll;
4133 
4134 			/* set instant */
4135 			conn->llcp.phy_upd_ind.instant = event_counter +
4136 							 lll->latency +
4137 							 6;
4138 			/* reserve rx node for event generation at instant */
4139 			(void)ll_pdu_rx_alloc();
4140 			rx->hdr.link->mem = conn->llcp_rx;
4141 			conn->llcp_rx = rx;
4142 
4143 			/* reserve rx node for DLE event generation */
4144 			if (IS_ENABLED(CONFIG_BT_CTLR_DATA_LENGTH)) {
4145 				rx = ll_pdu_rx_alloc();
4146 				rx->hdr.link->mem = conn->llcp_rx;
4147 				conn->llcp_rx = rx;
4148 			}
4149 		}
4150 
4151 		/* place the phy update ind packet as next in
4152 		 * tx queue
4153 		 */
4154 		pdu_ctrl_tx = (void *)tx->pdu;
4155 		pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL;
4156 		pdu_ctrl_tx->len =
4157 			offsetof(struct pdu_data_llctrl, phy_upd_ind) +
4158 			sizeof(struct pdu_data_llctrl_phy_upd_ind);
4159 		pdu_ctrl_tx->llctrl.opcode =
4160 			PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND;
4161 		ind = &pdu_ctrl_tx->llctrl.phy_upd_ind;
4162 		ind->c_to_p_phy = conn->llcp.phy_upd_ind.tx;
4163 		ind->p_to_c_phy = conn->llcp.phy_upd_ind.rx;
4164 		ind->instant = sys_cpu_to_le16(conn->llcp.phy_upd_ind.instant);
4165 
4166 		ctrl_tx_enqueue(conn, tx);
4167 	} else if (((event_counter - conn->llcp.phy_upd_ind.instant) &
4168 		    0xFFFF) <= 0x7FFF) {
4169 		struct lll_conn *lll = &conn->lll;
4170 		struct node_rx_pdu *rx;
4171 		uint8_t old_tx, old_rx;
4172 		uint8_t phy_bitmask;
4173 
4174 		/* Acquire additional rx node for Data length notification as
4175 		 * a peripheral.
4176 		 */
4177 		if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
4178 		    IS_ENABLED(CONFIG_BT_CTLR_DATA_LENGTH) &&
4179 		    conn->lll.role) {
4180 			rx = ll_pdu_rx_alloc();
4181 			if (!rx) {
4182 				return;
4183 			}
4184 
4185 			rx->hdr.link->mem = conn->llcp_rx;
4186 			conn->llcp_rx = rx;
4187 		}
4188 
4189 #if defined(CONFIG_BT_PERIPHERAL) && defined(CONFIG_BT_CTLR_LE_ENC)
4190 		if (conn->lll.role && (conn->periph.llcp_type != LLCP_NONE)) {
4191 			/* Local peripheral initiated PHY update completed while
4192 			 * a remote central had initiated encryption procedure
4193 			 */
4194 			conn->periph.llcp_type = LLCP_NONE;
4195 		} else
4196 #endif /* CONFIG_BT_PERIPHERAL && CONFIG_BT_CTLR_LE_ENC */
4197 		{
4198 			/* procedure request acked */
4199 			conn->llcp_ack = conn->llcp_req;
4200 		}
4201 
4202 		/* supported PHYs mask */
4203 		phy_bitmask = PHY_1M;
4204 		if (IS_ENABLED(CONFIG_BT_CTLR_PHY_2M)) {
4205 			phy_bitmask |= PHY_2M;
4206 		}
4207 		if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
4208 			phy_bitmask |= PHY_CODED;
4209 		}
4210 
4211 		/* apply new phy */
4212 		old_tx = lll->phy_tx;
4213 		old_rx = lll->phy_rx;
4214 
4215 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
4216 		uint16_t eff_tx_time = lll->max_tx_time;
4217 		uint16_t eff_rx_time = lll->max_rx_time;
4218 		uint16_t max_rx_time, max_tx_time;
4219 
4220 		dle_max_time_get(conn, &max_rx_time, &max_tx_time);
4221 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
4222 
4223 		if (conn->llcp.phy_upd_ind.tx) {
4224 			if (conn->llcp.phy_upd_ind.tx & phy_bitmask) {
4225 				lll->phy_tx = conn->llcp.phy_upd_ind.tx &
4226 					      phy_bitmask;
4227 			}
4228 
4229 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
4230 			eff_tx_time = calc_eff_time(lll->max_tx_octets,
4231 						    lll->phy_tx,
4232 						    max_tx_time);
4233 
4234 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
4235 		}
4236 		if (conn->llcp.phy_upd_ind.rx) {
4237 			if (conn->llcp.phy_upd_ind.rx & phy_bitmask) {
4238 				lll->phy_rx = conn->llcp.phy_upd_ind.rx &
4239 					      phy_bitmask;
4240 			}
4241 
4242 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
4243 			eff_rx_time =
4244 				calc_eff_time(lll->max_rx_octets, lll->phy_rx,
4245 					      max_rx_time);
4246 
4247 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
4248 		}
4249 
4250 		/* Acquire Rx node */
4251 		rx = conn->llcp_rx;
4252 		LL_ASSERT(rx && rx->hdr.link);
4253 		conn->llcp_rx = rx->hdr.link->mem;
4254 
4255 		/* generate event if phy changed or initiated by cmd */
4256 		if (!conn->llcp.phy_upd_ind.cmd && (lll->phy_tx == old_tx) &&
4257 		    (lll->phy_rx == old_rx)) {
4258 			/* Mark for buffer for release */
4259 			rx->hdr.type = NODE_RX_TYPE_RELEASE;
4260 
4261 			/* enqueue rx node towards Thread */
4262 			ll_rx_put(rx->hdr.link, rx);
4263 
4264 			/* Release rx node that was reserved for Data Length
4265 			 * notification.
4266 			 */
4267 			if (IS_ENABLED(CONFIG_BT_CTLR_DATA_LENGTH)) {
4268 				/* Get the DLE rx node reserved for ULL->LL */
4269 				rx = conn->llcp_rx;
4270 				LL_ASSERT(rx && rx->hdr.link);
4271 				conn->llcp_rx = rx->hdr.link->mem;
4272 
4273 				/* Mark for buffer for release */
4274 				rx->hdr.type = NODE_RX_TYPE_RELEASE;
4275 
4276 				/* enqueue rx node towards Thread */
4277 				ll_rx_put(rx->hdr.link, rx);
4278 			}
4279 
4280 			ll_rx_sched();
4281 
4282 			return;
4283 		}
4284 
4285 		rx->hdr.handle = lll->handle;
4286 		rx->hdr.type = NODE_RX_TYPE_PHY_UPDATE;
4287 
4288 		upd = (void *)rx->pdu;
4289 		upd->status = 0U;
4290 		upd->tx = lll->phy_tx;
4291 		upd->rx = lll->phy_rx;
4292 
4293 		/* enqueue rx node towards Thread */
4294 		ll_rx_put(rx->hdr.link, rx);
4295 
4296 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
4297 		/* get a rx node for ULL->LL */
4298 		rx = conn->llcp_rx;
4299 		LL_ASSERT(rx && rx->hdr.link);
4300 		conn->llcp_rx = rx->hdr.link->mem;
4301 
4302 		/* Update max tx and/or max rx if changed */
4303 		if ((eff_tx_time <= lll->max_tx_time) &&
4304 		    (lll->max_tx_time <= max_tx_time) &&
4305 		    (eff_rx_time <= lll->max_rx_time) &&
4306 		    (lll->max_rx_time <= max_rx_time)) {
4307 			/* Mark buffer for release */
4308 			rx->hdr.type = NODE_RX_TYPE_RELEASE;
4309 
4310 			/* enqueue rx node towards Thread */
4311 			ll_rx_put(rx->hdr.link, rx);
4312 			ll_rx_sched();
4313 			return;
4314 		}
4315 		lll->max_tx_time = eff_tx_time;
4316 		lll->max_rx_time = eff_rx_time;
4317 
4318 		/* prepare length rsp structure */
4319 		rx->hdr.handle = lll->handle;
4320 		rx->hdr.type = NODE_RX_TYPE_DC_PDU;
4321 
4322 		struct pdu_data *pdu_rx = (void *)rx->pdu;
4323 
4324 		pdu_rx->ll_id = PDU_DATA_LLID_CTRL;
4325 		pdu_rx->len = offsetof(struct pdu_data_llctrl, length_rsp) +
4326 			      sizeof(struct pdu_data_llctrl_length_rsp);
4327 		pdu_rx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_LENGTH_RSP;
4328 
4329 		struct pdu_data_llctrl_length_req *lr =
4330 			(void *)&pdu_rx->llctrl.length_rsp;
4331 
4332 		lr->max_rx_octets = sys_cpu_to_le16(lll->max_rx_octets);
4333 		lr->max_tx_octets = sys_cpu_to_le16(lll->max_tx_octets);
4334 		lr->max_rx_time = sys_cpu_to_le16(lll->max_rx_time);
4335 		lr->max_tx_time = sys_cpu_to_le16(lll->max_tx_time);
4336 
4337 		/* enqueue rx node towards Thread */
4338 		ll_rx_put(rx->hdr.link, rx);
4339 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
4340 
4341 		ll_rx_sched();
4342 	}
4343 }
4344 #endif /* CONFIG_BT_CTLR_PHY */
4345 
4346 #if defined(CONFIG_BT_PERIPHERAL)
4347 static uint8_t conn_upd_recv(struct ll_conn *conn, memq_link_t *link,
4348 			  struct node_rx_pdu **rx, struct pdu_data *pdu)
4349 {
4350 	uint16_t instant;
4351 
4352 	instant = sys_le16_to_cpu(pdu->llctrl.conn_update_ind.instant);
4353 	if (((instant - conn->lll.event_counter) & 0xFFFF) > 0x7FFF) {
4354 		/* Mark for buffer for release */
4355 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
4356 
4357 		return BT_HCI_ERR_INSTANT_PASSED;
4358 	}
4359 
4360 	/* different transaction collision */
4361 	if (((conn->llcp_req - conn->llcp_ack) & 0x03) == 0x02) {
4362 		/* Mark for buffer for release */
4363 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
4364 
4365 		return BT_HCI_ERR_DIFF_TRANS_COLLISION;
4366 	}
4367 
4368 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
4369 	/* Set CPR mutex, if only not already set. As a central the mutex shall
4370 	 * be set, but a peripheral we accept it as new 'set' of mutex.
4371 	 */
4372 	cpr_active_check_and_set(conn);
4373 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
4374 
4375 	conn->llcp_cu.win_size = pdu->llctrl.conn_update_ind.win_size;
4376 	conn->llcp_cu.win_offset_us =
4377 		sys_le16_to_cpu(pdu->llctrl.conn_update_ind.win_offset) *
4378 			CONN_INT_UNIT_US;
4379 	conn->llcp_cu.interval =
4380 		sys_le16_to_cpu(pdu->llctrl.conn_update_ind.interval);
4381 	conn->llcp_cu.latency =
4382 		sys_le16_to_cpu(pdu->llctrl.conn_update_ind.latency);
4383 	conn->llcp_cu.timeout =
4384 		sys_le16_to_cpu(pdu->llctrl.conn_update_ind.timeout);
4385 	conn->llcp.conn_upd.instant = instant;
4386 	conn->llcp_cu.state = LLCP_CUI_STATE_INPROG;
4387 	conn->llcp_cu.cmd = 1U;
4388 	conn->llcp_cu.ack--;
4389 
4390 	link->mem = conn->llcp_rx;
4391 	(*rx)->hdr.link = link;
4392 	conn->llcp_rx = *rx;
4393 	*rx = NULL;
4394 
4395 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
4396 	if ((conn->llcp_conn_param.req != conn->llcp_conn_param.ack) &&
4397 	    ((conn->llcp_conn_param.state == LLCP_CPR_STATE_RSP_WAIT) ||
4398 	     (conn->llcp_conn_param.state == LLCP_CPR_STATE_UPD_WAIT))) {
4399 		conn->llcp_conn_param.ack = conn->llcp_conn_param.req;
4400 	}
4401 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
4402 
4403 	return 0;
4404 }
4405 
4406 static uint8_t chan_map_upd_recv(struct ll_conn *conn, struct node_rx_pdu *rx,
4407 			      struct pdu_data *pdu)
4408 {
4409 	uint8_t err = 0U;
4410 	uint16_t instant;
4411 
4412 	instant = sys_le16_to_cpu(pdu->llctrl.chan_map_ind.instant);
4413 	if (((instant - conn->lll.event_counter) & 0xffff) > 0x7fff) {
4414 		err = BT_HCI_ERR_INSTANT_PASSED;
4415 
4416 		goto chan_map_upd_recv_exit;
4417 	}
4418 
4419 	/* different transaction collision */
4420 	if (((conn->llcp_req - conn->llcp_ack) & 0x03) == 0x02) {
4421 		err = BT_HCI_ERR_DIFF_TRANS_COLLISION;
4422 
4423 		goto chan_map_upd_recv_exit;
4424 	}
4425 
4426 
4427 	memcpy(&conn->llcp.chan_map.chm[0], &pdu->llctrl.chan_map_ind.chm[0],
4428 	       sizeof(conn->llcp.chan_map.chm));
4429 	conn->llcp.chan_map.instant = instant;
4430 	conn->llcp.chan_map.initiate = 0U;
4431 
4432 	conn->llcp_type = LLCP_CHAN_MAP;
4433 	conn->llcp_ack -= 2U;
4434 
4435 chan_map_upd_recv_exit:
4436 	/* Mark for buffer for release */
4437 	rx->hdr.type = NODE_RX_TYPE_RELEASE;
4438 
4439 	return err;
4440 }
4441 #endif /* CONFIG_BT_PERIPHERAL */
4442 
4443 static void terminate_ind_recv(struct ll_conn *conn, struct node_rx_pdu *rx,
4444 			      struct pdu_data *pdu)
4445 {
4446 	/* Ack and then terminate */
4447 	conn->llcp_terminate.reason_final =
4448 		pdu->llctrl.terminate_ind.error_code;
4449 
4450 	/* Mark for buffer for release */
4451 	rx->hdr.type = NODE_RX_TYPE_RELEASE;
4452 }
4453 
4454 #if defined(CONFIG_BT_CTLR_LE_ENC)
4455 #if defined(CONFIG_BT_CENTRAL)
4456 static void enc_req_reused_send(struct ll_conn *conn, struct node_tx **tx)
4457 {
4458 	struct pdu_data *pdu_ctrl_tx;
4459 
4460 	pdu_ctrl_tx = (void *)(*tx)->pdu;
4461 	pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL;
4462 	pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, enc_req) +
4463 			   sizeof(struct pdu_data_llctrl_enc_req);
4464 	pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_ENC_REQ;
4465 	memcpy(&pdu_ctrl_tx->llctrl.enc_req.rand[0], &conn->llcp_enc.rand[0],
4466 	       sizeof(pdu_ctrl_tx->llctrl.enc_req.rand));
4467 	pdu_ctrl_tx->llctrl.enc_req.ediv[0] = conn->llcp_enc.ediv[0];
4468 	pdu_ctrl_tx->llctrl.enc_req.ediv[1] = conn->llcp_enc.ediv[1];
4469 
4470 	/*
4471 	 * Take advantage of the fact that ivm and skdm fields, which both have
4472 	 * to be filled with random data, are adjacent and use single call to
4473 	 * the entropy driver.
4474 	 */
4475 	BUILD_ASSERT(offsetof(__typeof(pdu_ctrl_tx->llctrl.enc_req), ivm) ==
4476 		     (offsetof(__typeof(pdu_ctrl_tx->llctrl.enc_req), skdm) +
4477 		     sizeof(pdu_ctrl_tx->llctrl.enc_req.skdm)));
4478 
4479 	/* NOTE: if not sufficient random numbers, ignore waiting */
4480 	lll_csrand_isr_get(pdu_ctrl_tx->llctrl.enc_req.skdm,
4481 			   sizeof(pdu_ctrl_tx->llctrl.enc_req.skdm) +
4482 			   sizeof(pdu_ctrl_tx->llctrl.enc_req.ivm));
4483 
4484 	ctrl_tx_enqueue(conn, *tx);
4485 
4486 	/* dont release ctrl PDU memory */
4487 	*tx = NULL;
4488 }
4489 #endif /* CONFIG_BT_CENTRAL */
4490 
4491 #if defined(CONFIG_BT_PERIPHERAL)
4492 static int enc_rsp_send(struct ll_conn *conn)
4493 {
4494 	struct pdu_data *pdu_ctrl_tx;
4495 	struct node_tx *tx;
4496 
4497 	/* acquire tx mem */
4498 	tx = mem_acquire(&mem_conn_tx_ctrl.free);
4499 	if (!tx) {
4500 		return -ENOBUFS;
4501 	}
4502 
4503 	pdu_ctrl_tx = (void *)tx->pdu;
4504 	pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL;
4505 	pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, enc_rsp) +
4506 			   sizeof(struct pdu_data_llctrl_enc_rsp);
4507 	pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_ENC_RSP;
4508 
4509 	/*
4510 	 * Take advantage of the fact that ivs and skds fields, which both have
4511 	 * to be filled with random data, are adjacent and use single call to
4512 	 * the entropy driver.
4513 	 */
4514 	BUILD_ASSERT(offsetof(__typeof(pdu_ctrl_tx->llctrl.enc_rsp), ivs) ==
4515 		     (offsetof(__typeof(pdu_ctrl_tx->llctrl.enc_rsp), skds) +
4516 		     sizeof(pdu_ctrl_tx->llctrl.enc_rsp.skds)));
4517 
4518 	/* NOTE: if not sufficient random numbers, ignore waiting */
4519 	lll_csrand_isr_get(pdu_ctrl_tx->llctrl.enc_rsp.skds,
4520 			   sizeof(pdu_ctrl_tx->llctrl.enc_rsp.skds) +
4521 			   sizeof(pdu_ctrl_tx->llctrl.enc_rsp.ivs));
4522 
4523 	/* things from peripheral stored for session key calculation */
4524 	memcpy(&conn->llcp.encryption.skd[8],
4525 	       &pdu_ctrl_tx->llctrl.enc_rsp.skds[0], 8);
4526 	memcpy(&conn->lll.ccm_rx.iv[4],
4527 	       &pdu_ctrl_tx->llctrl.enc_rsp.ivs[0], 4);
4528 
4529 	ctrl_tx_enqueue(conn, tx);
4530 
4531 	return 0;
4532 }
4533 #endif /* CONFIG_BT_PERIPHERAL */
4534 
4535 static int start_enc_rsp_send(struct ll_conn *conn,
4536 			      struct pdu_data *pdu_ctrl_tx)
4537 {
4538 	struct node_tx *tx = NULL;
4539 
4540 	if (!pdu_ctrl_tx) {
4541 		/* acquire tx mem */
4542 		tx = mem_acquire(&mem_conn_tx_ctrl.free);
4543 		if (!tx) {
4544 			return -ENOBUFS;
4545 		}
4546 
4547 		pdu_ctrl_tx = (void *)tx->pdu;
4548 	}
4549 
4550 	/* enable transmit encryption */
4551 	conn->lll.enc_tx = 1;
4552 
4553 	pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL;
4554 	pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, enc_rsp);
4555 	pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_START_ENC_RSP;
4556 
4557 	if (tx) {
4558 		ctrl_tx_enqueue(conn, tx);
4559 	}
4560 
4561 	return 0;
4562 }
4563 
4564 static inline bool ctrl_is_unexpected(struct ll_conn *conn, uint8_t opcode)
4565 {
4566 	return (!conn->lll.role &&
4567 		((!conn->llcp_enc.refresh &&
4568 		  (opcode != PDU_DATA_LLCTRL_TYPE_TERMINATE_IND) &&
4569 		  (opcode != PDU_DATA_LLCTRL_TYPE_START_ENC_REQ) &&
4570 		  (opcode != PDU_DATA_LLCTRL_TYPE_START_ENC_RSP) &&
4571 		  (opcode != PDU_DATA_LLCTRL_TYPE_REJECT_IND) &&
4572 		  (opcode != PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND)) ||
4573 		 (conn->llcp_enc.refresh &&
4574 		  (opcode != PDU_DATA_LLCTRL_TYPE_TERMINATE_IND) &&
4575 		  (opcode != PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_RSP) &&
4576 		  (opcode != PDU_DATA_LLCTRL_TYPE_ENC_RSP) &&
4577 		  (opcode != PDU_DATA_LLCTRL_TYPE_START_ENC_REQ) &&
4578 		  (opcode != PDU_DATA_LLCTRL_TYPE_START_ENC_RSP) &&
4579 		  (opcode != PDU_DATA_LLCTRL_TYPE_REJECT_IND) &&
4580 		  (opcode != PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND)))) ||
4581 	       (conn->lll.role &&
4582 		((!conn->llcp_enc.refresh &&
4583 		  (opcode != PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP) &&
4584 		  (opcode != PDU_DATA_LLCTRL_TYPE_TERMINATE_IND) &&
4585 		  (opcode != PDU_DATA_LLCTRL_TYPE_START_ENC_RSP) &&
4586 		  (opcode != PDU_DATA_LLCTRL_TYPE_REJECT_IND) &&
4587 		  (opcode != PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND)) ||
4588 		 (conn->llcp_enc.refresh &&
4589 		  (opcode != PDU_DATA_LLCTRL_TYPE_TERMINATE_IND) &&
4590 		  (opcode != PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_RSP) &&
4591 		  (opcode != PDU_DATA_LLCTRL_TYPE_ENC_REQ) &&
4592 		  (opcode != PDU_DATA_LLCTRL_TYPE_START_ENC_RSP) &&
4593 		  (opcode != PDU_DATA_LLCTRL_TYPE_REJECT_IND) &&
4594 		  (opcode != PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND))));
4595 }
4596 
4597 #endif /* CONFIG_BT_CTLR_LE_ENC */
4598 
4599 static int unknown_rsp_send(struct ll_conn *conn, struct node_rx_pdu *rx,
4600 			    uint8_t type)
4601 {
4602 	struct pdu_data *pdu;
4603 	struct node_tx *tx;
4604 	int err;
4605 
4606 	/* Check transaction violation and get free ctrl tx PDU */
4607 	tx = ctrl_tx_rsp_mem_acquire(conn, rx, &err);
4608 	if (!tx) {
4609 		return err;
4610 	}
4611 
4612 	pdu = (void *)tx->pdu;
4613 	pdu->ll_id = PDU_DATA_LLID_CTRL;
4614 	pdu->len = offsetof(struct pdu_data_llctrl, unknown_rsp) +
4615 			   sizeof(struct pdu_data_llctrl_unknown_rsp);
4616 	pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP;
4617 	pdu->llctrl.unknown_rsp.type = type;
4618 
4619 	ctrl_tx_enqueue(conn, tx);
4620 
4621 	/* Mark for buffer for release */
4622 	rx->hdr.type = NODE_RX_TYPE_RELEASE;
4623 
4624 	return 0;
4625 }
4626 
4627 static inline uint64_t feat_get(uint8_t *features)
4628 {
4629 	uint64_t feat;
4630 
4631 	feat = sys_get_le64(features) | ~LL_FEAT_BIT_MASK_VALID;
4632 	feat &= LL_FEAT_BIT_MASK;
4633 
4634 	return feat;
4635 }
4636 
4637 /*
4638  * Perform a logical and on octet0 and keep the remaining bits of the
4639  * first input parameter
4640  */
4641 static inline uint64_t feat_land_octet0(uint64_t feat_to_keep,
4642 					uint64_t feat_octet0)
4643 {
4644 	uint64_t feat_result;
4645 
4646 	feat_result = feat_to_keep & feat_octet0;
4647 	feat_result &= 0xFF;
4648 	feat_result |= feat_to_keep & LL_FEAT_FILTER_OCTET0;
4649 
4650 	return feat_result;
4651 }
4652 
4653 #if defined(CONFIG_BT_PERIPHERAL) || \
4654 	(defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG))
4655 static int feature_rsp_send(struct ll_conn *conn, struct node_rx_pdu *rx,
4656 			    struct pdu_data *pdu_rx)
4657 {
4658 	struct pdu_data_llctrl_feature_req *req;
4659 	struct pdu_data *pdu_tx;
4660 	struct node_tx *tx;
4661 	uint64_t feat;
4662 	int err;
4663 
4664 	/* Check transaction violation and get free ctrl tx PDU */
4665 	tx = ctrl_tx_rsp_mem_acquire(conn, rx, &err);
4666 	if (!tx) {
4667 		return err;
4668 	}
4669 
4670 	/* AND the feature set to get Feature USED */
4671 	req = &pdu_rx->llctrl.feature_req;
4672 	conn->llcp_feature.features_conn &= feat_get(&req->features[0]);
4673 	/*
4674 	 * Get all the features of peer, except octet 0.
4675 	 * Octet 0 is the actual features used on the link
4676 	 * See BTCore V5.2, Vol. 6, Part B, chapter 5.1.4
4677 	 */
4678 	conn->llcp_feature.features_peer =
4679 		feat_land_octet0(feat_get(&req->features[0]), ll_feat_get());
4680 
4681 	/* features exchanged */
4682 	conn->common.fex_valid = 1U;
4683 
4684 	/* Enqueue feature response */
4685 	pdu_tx = (void *)tx->pdu;
4686 	pdu_tx->ll_id = PDU_DATA_LLID_CTRL;
4687 	pdu_tx->len = offsetof(struct pdu_data_llctrl, feature_rsp) +
4688 		sizeof(struct pdu_data_llctrl_feature_rsp);
4689 	pdu_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_FEATURE_RSP;
4690 	(void)memset(&pdu_tx->llctrl.feature_rsp.features[0], 0x00,
4691 		     sizeof(pdu_tx->llctrl.feature_rsp.features));
4692 	/*
4693 	 * On feature response we send the local supported features.
4694 	 * See BTCore V5.2 VOl 6 Part B, chapter 5.1.4
4695 	 */
4696 	feat = feat_land_octet0(ll_feat_get(),
4697 				conn->llcp_feature.features_conn);
4698 	sys_put_le64(feat, pdu_tx->llctrl.feature_rsp.features);
4699 
4700 	ctrl_tx_sec_enqueue(conn, tx);
4701 
4702 	/* Mark for buffer for release */
4703 	rx->hdr.type = NODE_RX_TYPE_RELEASE;
4704 
4705 	return 0;
4706 }
4707 #endif /* PERIPHERAL || (CENTRAL && PER_INIT_FEAT_XCHG) */
4708 
4709 #if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
4710 static void feature_rsp_recv(struct ll_conn *conn, struct pdu_data *pdu_rx)
4711 {
4712 	struct pdu_data_llctrl_feature_rsp *rsp;
4713 
4714 	rsp = &pdu_rx->llctrl.feature_rsp;
4715 
4716 	/* AND the feature set to get Feature USED */
4717 	conn->llcp_feature.features_conn &= feat_get(&rsp->features[0]);
4718 	/*
4719 	 * Get all the features of peer, except octet 0.
4720 	 * Octet 0 is the actual features used on the link
4721 	 * See BTCore V5.2, Vol. 6, Part B, chapter 5.1.4
4722 	 */
4723 	conn->llcp_feature.features_peer =
4724 		feat_land_octet0(feat_get(&rsp->features[0]), ll_feat_get());
4725 
4726 	/* features exchanged */
4727 	conn->common.fex_valid = 1U;
4728 
4729 	/* Procedure complete */
4730 	conn->llcp_feature.ack = conn->llcp_feature.req;
4731 	conn->procedure_expire = 0U;
4732 }
4733 #endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
4734 
4735 #if defined(CONFIG_BT_CTLR_LE_ENC)
4736 static int pause_enc_rsp_send(struct ll_conn *conn, struct node_rx_pdu *rx,
4737 			      uint8_t req)
4738 {
4739 	struct pdu_data *pdu_ctrl_tx;
4740 	struct node_tx *tx;
4741 
4742 	if (req) {
4743 		/* acquire tx mem */
4744 		tx = mem_acquire(&mem_conn_tx_ctrl.free);
4745 		if (!tx) {
4746 			return -ENOBUFS;
4747 		}
4748 
4749 		/* key refresh */
4750 		conn->llcp_enc.refresh = 1U;
4751 	} else if (!conn->lll.role) {
4752 		/* acquire tx mem */
4753 		tx = mem_acquire(&mem_conn_tx_ctrl.free);
4754 		if (!tx) {
4755 			return -ENOBUFS;
4756 		}
4757 
4758 		/* disable transmit encryption */
4759 		conn->lll.enc_tx = 0;
4760 	} else {
4761 		/* disable transmit encryption */
4762 		conn->lll.enc_tx = 0;
4763 
4764 		goto pause_enc_rsp_send_exit;
4765 	}
4766 
4767 	/* pause data packet rx */
4768 	conn->llcp_enc.pause_rx = 1U;
4769 
4770 	/* disable receive encryption */
4771 	conn->lll.enc_rx = 0;
4772 
4773 	/* Enqueue pause enc rsp */
4774 	pdu_ctrl_tx = (void *)tx->pdu;
4775 	pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL;
4776 	pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, enc_rsp);
4777 	pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_RSP;
4778 
4779 	ctrl_tx_enqueue(conn, tx);
4780 
4781 pause_enc_rsp_send_exit:
4782 	/* Mark for buffer for release */
4783 	rx->hdr.type = NODE_RX_TYPE_RELEASE;
4784 
4785 	return 0;
4786 }
4787 #endif /* CONFIG_BT_CTLR_LE_ENC */
4788 
4789 static int version_ind_send(struct ll_conn *conn, struct node_rx_pdu *rx,
4790 			    struct pdu_data *pdu_rx)
4791 {
4792 	struct pdu_data_llctrl_version_ind *v;
4793 	struct pdu_data *pdu_tx;
4794 	struct node_tx *tx;
4795 
4796 	if (!conn->llcp_version.tx) {
4797 		tx = mem_acquire(&mem_conn_tx_ctrl.free);
4798 		if (!tx) {
4799 			return -ENOBUFS;
4800 		}
4801 		conn->llcp_version.tx = 1U;
4802 
4803 		pdu_tx = (void *)tx->pdu;
4804 		pdu_tx->ll_id = PDU_DATA_LLID_CTRL;
4805 		pdu_tx->len =
4806 			offsetof(struct pdu_data_llctrl, version_ind) +
4807 			sizeof(struct pdu_data_llctrl_version_ind);
4808 		pdu_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_VERSION_IND;
4809 		v = &pdu_tx->llctrl.version_ind;
4810 		v->version_number = LL_VERSION_NUMBER;
4811 		v->company_id =	sys_cpu_to_le16(ll_settings_company_id());
4812 		v->sub_version_number =
4813 			sys_cpu_to_le16(ll_settings_subversion_number());
4814 
4815 		ctrl_tx_sec_enqueue(conn, tx);
4816 
4817 		/* Mark for buffer for release */
4818 		rx->hdr.type = NODE_RX_TYPE_RELEASE;
4819 	} else if (!conn->llcp_version.rx) {
4820 		/* procedure request acked */
4821 		conn->llcp_version.ack = conn->llcp_version.req;
4822 
4823 		/* Procedure complete */
4824 		conn->procedure_expire = 0U;
4825 	} else {
4826 		/* Tx-ed and Rx-ed before, ignore this invalid Rx. */
4827 
4828 		/* Mark for buffer for release */
4829 		rx->hdr.type = NODE_RX_TYPE_RELEASE;
4830 
4831 		return 0;
4832 	}
4833 
4834 	v = &pdu_rx->llctrl.version_ind;
4835 	conn->llcp_version.version_number = v->version_number;
4836 	conn->llcp_version.company_id = sys_le16_to_cpu(v->company_id);
4837 	conn->llcp_version.sub_version_number =
4838 		sys_le16_to_cpu(v->sub_version_number);
4839 	conn->llcp_version.rx = 1U;
4840 
4841 	return 0;
4842 }
4843 
4844 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) || defined(CONFIG_BT_CTLR_PHY)
4845 static int reject_ext_ind_send(struct ll_conn *conn, struct node_rx_pdu *rx,
4846 			       uint8_t reject_opcode, uint8_t error_code)
4847 {
4848 	struct pdu_data *pdu_ctrl_tx;
4849 	struct node_tx *tx;
4850 	int err;
4851 
4852 	/* Check transaction violation and get free ctrl tx PDU */
4853 	tx = ctrl_tx_rsp_mem_acquire(conn, rx, &err);
4854 	if (!tx) {
4855 		return err;
4856 	}
4857 
4858 	pdu_ctrl_tx = (void *)tx->pdu;
4859 	pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL;
4860 	pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, reject_ext_ind) +
4861 		sizeof(struct pdu_data_llctrl_reject_ext_ind);
4862 	pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND;
4863 	pdu_ctrl_tx->llctrl.reject_ext_ind.reject_opcode = reject_opcode;
4864 	pdu_ctrl_tx->llctrl.reject_ext_ind.error_code = error_code;
4865 
4866 	ctrl_tx_enqueue(conn, tx);
4867 
4868 	/* Mark for buffer for release */
4869 	rx->hdr.type = NODE_RX_TYPE_RELEASE;
4870 
4871 	return 0;
4872 }
4873 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ  || PHY */
4874 
4875 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
4876 static inline int reject_ind_conn_upd_recv(struct ll_conn *conn,
4877 					   struct node_rx_pdu *rx,
4878 					   struct pdu_data *pdu_rx)
4879 {
4880 	struct pdu_data_llctrl_reject_ext_ind *rej_ext_ind;
4881 	struct node_rx_cu *cu;
4882 	struct lll_conn *lll;
4883 
4884 	/* Unsupported remote feature */
4885 	lll = &conn->lll;
4886 	rej_ext_ind = (void *)&pdu_rx->llctrl.reject_ext_ind;
4887 	if (!lll->role && (rej_ext_ind->error_code ==
4888 			   BT_HCI_ERR_UNSUPP_REMOTE_FEATURE)) {
4889 		LL_ASSERT(conn->llcp_cu.req == conn->llcp_cu.ack);
4890 
4891 		conn->llcp_conn_param.state = LLCP_CPR_STATE_UPD;
4892 
4893 		conn->llcp_cu.win_size = 1U;
4894 		conn->llcp_cu.win_offset_us = 0U;
4895 		conn->llcp_cu.interval = conn->llcp_conn_param.interval_max;
4896 		conn->llcp_cu.latency = conn->llcp_conn_param.latency;
4897 		conn->llcp_cu.timeout = conn->llcp_conn_param.timeout;
4898 		conn->llcp_cu.state = LLCP_CUI_STATE_USE;
4899 		conn->llcp_cu.cmd = conn->llcp_conn_param.cmd;
4900 		conn->llcp_cu.ack--;
4901 
4902 		return -EINVAL;
4903 	}
4904 	/* FIXME: handle unsupported LL parameters error */
4905 	else if (rej_ext_ind->error_code != BT_HCI_ERR_LL_PROC_COLLISION) {
4906 #if defined(CONFIG_BT_PERIPHERAL)
4907 		/* update to next ticks offset */
4908 		if (lll->role) {
4909 			conn->periph.ticks_to_offset =
4910 			    conn->llcp_conn_param.ticks_to_offset_next;
4911 		}
4912 #endif /* CONFIG_BT_PERIPHERAL */
4913 	}
4914 
4915 	if (conn->llcp_conn_param.state == LLCP_CPR_STATE_RSP_WAIT) {
4916 		/* Reset CPR mutex */
4917 		cpr_active_reset();
4918 
4919 		/* Procedure complete */
4920 		conn->llcp_conn_param.ack = conn->llcp_conn_param.req;
4921 
4922 		/* Stop procedure timeout */
4923 		conn->procedure_expire = 0U;
4924 	}
4925 
4926 	/* skip event generation if not cmd initiated */
4927 	if (!conn->llcp_conn_param.cmd) {
4928 		return -EINVAL;
4929 	}
4930 
4931 	/* generate conn update complete event with error code */
4932 	rx->hdr.type = NODE_RX_TYPE_CONN_UPDATE;
4933 
4934 	/* prepare connection update complete structure */
4935 	cu = (void *)pdu_rx;
4936 	cu->status = rej_ext_ind->error_code;
4937 	cu->interval = lll->interval;
4938 	cu->latency = lll->latency;
4939 	cu->timeout = conn->supervision_reload *
4940 		      lll->interval * 125U / 1000;
4941 
4942 	return 0;
4943 }
4944 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
4945 
4946 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
4947 static inline int reject_ind_dle_recv(struct ll_conn *conn,
4948 				      struct pdu_data *pdu_rx)
4949 {
4950 	struct pdu_data_llctrl_length_req *lr;
4951 
4952 	/* Procedure complete */
4953 	conn->llcp_length.ack = conn->llcp_length.req;
4954 	conn->procedure_expire = 0U;
4955 
4956 	/* prepare length rsp structure */
4957 	pdu_rx->len = offsetof(struct pdu_data_llctrl, length_rsp) +
4958 		      sizeof(struct pdu_data_llctrl_length_rsp);
4959 	pdu_rx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_LENGTH_RSP;
4960 
4961 	lr = (void *)&pdu_rx->llctrl.length_req;
4962 	lr->max_rx_octets = sys_cpu_to_le16(conn->lll.max_rx_octets);
4963 	lr->max_tx_octets = sys_cpu_to_le16(conn->lll.max_tx_octets);
4964 #if !defined(CONFIG_BT_CTLR_PHY)
4965 	lr->max_rx_time =
4966 		sys_cpu_to_le16(PDU_DC_MAX_US(conn->lll.max_rx_octets, PHY_1M));
4967 	lr->max_tx_time =
4968 		sys_cpu_to_le16(PDU_DC_MAX_US(conn->lll.max_tx_octets, PHY_1M));
4969 #else /* CONFIG_BT_CTLR_PHY */
4970 	lr->max_rx_time = sys_cpu_to_le16(conn->lll.max_rx_time);
4971 	lr->max_tx_time = sys_cpu_to_le16(conn->lll.max_tx_time);
4972 #endif /* CONFIG_BT_CTLR_PHY */
4973 
4974 	return 0;
4975 }
4976 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
4977 
4978 #if defined(CONFIG_BT_CTLR_PHY)
4979 static inline int reject_ind_phy_upd_recv(struct ll_conn *conn,
4980 					  struct node_rx_pdu *rx,
4981 					  struct pdu_data *pdu_rx)
4982 {
4983 	struct pdu_data_llctrl_reject_ext_ind *rej_ext_ind;
4984 	struct node_rx_pu *p;
4985 
4986 	/* Same Procedure or Different Procedure Collision */
4987 
4988 	/* If not same procedure, stop procedure timeout, else
4989 	 * continue timer until phy upd ind is received.
4990 	 */
4991 	rej_ext_ind = (void *)&pdu_rx->llctrl.reject_ext_ind;
4992 	if (rej_ext_ind->error_code != BT_HCI_ERR_LL_PROC_COLLISION) {
4993 		/* Procedure complete */
4994 		conn->llcp_phy.ack = conn->llcp_phy.req;
4995 
4996 		/* Reset packet timing restrictions */
4997 		conn->lll.phy_tx_time = conn->lll.phy_tx;
4998 		conn->llcp_phy.pause_tx = 0U;
4999 
5000 		/* Stop procedure timeout */
5001 		conn->procedure_expire = 0U;
5002 	}
5003 
5004 	/* skip event generation if not cmd initiated */
5005 	if (!conn->llcp_phy.cmd) {
5006 		return -EINVAL;
5007 	}
5008 
5009 	/* generate phy update complete event with error code */
5010 	rx->hdr.type = NODE_RX_TYPE_PHY_UPDATE;
5011 
5012 	p = (void *)pdu_rx;
5013 	p->status = rej_ext_ind->error_code;
5014 	p->tx = conn->lll.phy_tx;
5015 	p->rx = conn->lll.phy_rx;
5016 
5017 	return 0;
5018 }
5019 #endif /* CONFIG_BT_CTLR_PHY */
5020 
5021 #if defined(CONFIG_BT_CTLR_LE_ENC)
5022 static inline int reject_ind_enc_recv(struct ll_conn *conn)
5023 {
5024 	/* resume data packet rx and tx */
5025 	conn->llcp_enc.pause_rx = 0U;
5026 	conn->llcp_enc.pause_tx = 0U;
5027 
5028 	/* Procedure complete */
5029 	conn->llcp_ack = conn->llcp_req;
5030 	conn->procedure_expire = 0U;
5031 
5032 	return 0;
5033 }
5034 
5035 static inline int reject_ext_ind_enc_recv(struct ll_conn *conn,
5036 					  struct pdu_data *pdu_rx)
5037 {
5038 	struct pdu_data_llctrl_reject_ext_ind *rej_ext_ind;
5039 
5040 	reject_ind_enc_recv(conn);
5041 
5042 	/* enqueue as if it were a reject ind */
5043 	rej_ext_ind = (void *)&pdu_rx->llctrl.reject_ext_ind;
5044 	pdu_rx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_REJECT_IND;
5045 	pdu_rx->llctrl.reject_ind.error_code = rej_ext_ind->error_code;
5046 
5047 	return 0;
5048 }
5049 #endif /* CONFIG_BT_CTLR_LE_ENC */
5050 
5051 static inline void reject_ind_recv(struct ll_conn *conn, struct node_rx_pdu *rx,
5052 				   struct pdu_data *pdu_rx)
5053 {
5054 	int err = -EINVAL;
5055 
5056 
5057 	if (0) {
5058 
5059 #if defined(CONFIG_BT_CTLR_LE_ENC)
5060 	} else if ((conn->llcp_ack != conn->llcp_req) &&
5061 		   (conn->llcp_type == LLCP_ENCRYPTION)) {
5062 		err = reject_ind_enc_recv(conn);
5063 #endif /* CONFIG_BT_CTLR_LE_ENC */
5064 
5065 #if defined(CONFIG_BT_CTLR_PHY)
5066 	} else if (conn->llcp_phy.ack != conn->llcp_phy.req) {
5067 		struct pdu_data_llctrl_reject_ext_ind *rej_ext_ind;
5068 		struct pdu_data_llctrl_reject_ind *rej_ind;
5069 
5070 		rej_ext_ind = (void *)&pdu_rx->llctrl.reject_ext_ind;
5071 		rej_ind = (void *)&pdu_rx->llctrl.reject_ind;
5072 		/* NOTE: Do not modify reject_opcode field which overlap with
5073 		 *       error_code field in reject ind PDU structure. Only copy
5074 		 *       error_code from reject ind to reject ext ind PDU
5075 		 *       structure.
5076 		 */
5077 		rej_ext_ind->error_code = rej_ind->error_code;
5078 		err = reject_ind_phy_upd_recv(conn, rx, pdu_rx);
5079 #endif /* CONFIG_BT_CTLR_PHY */
5080 
5081 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
5082 	} else if (conn->llcp_conn_param.ack != conn->llcp_conn_param.req) {
5083 		struct pdu_data_llctrl_reject_ext_ind *rej_ext_ind;
5084 		struct pdu_data_llctrl_reject_ind *rej_ind;
5085 
5086 		rej_ext_ind = (void *)&pdu_rx->llctrl.reject_ext_ind;
5087 		rej_ind = (void *)&pdu_rx->llctrl.reject_ind;
5088 		/* NOTE: Do not modify reject_opcode field which overlap with
5089 		 *       error_code field in reject ind PDU structure. Only copy
5090 		 *       error_code from reject ind to reject ext ind PDU
5091 		 *       structure.
5092 		 */
5093 		rej_ext_ind->error_code = rej_ind->error_code;
5094 		err = reject_ind_conn_upd_recv(conn, rx, pdu_rx);
5095 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
5096 
5097 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
5098 	} else if (conn->llcp_length.ack != conn->llcp_length.req) {
5099 		err = reject_ind_dle_recv(conn, pdu_rx);
5100 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
5101 	}
5102 
5103 	if (err) {
5104 		/* Mark for buffer for release */
5105 		rx->hdr.type = NODE_RX_TYPE_RELEASE;
5106 	}
5107 }
5108 
5109 static inline void reject_ext_ind_recv(struct ll_conn *conn,
5110 				       struct node_rx_pdu *rx,
5111 				       struct pdu_data *pdu_rx)
5112 {
5113 	struct pdu_data_llctrl_reject_ext_ind *rej_ext_ind;
5114 	int err = -EINVAL;
5115 
5116 	rej_ext_ind = (void *)&pdu_rx->llctrl.reject_ext_ind;
5117 
5118 	switch (rej_ext_ind->reject_opcode) {
5119 #if defined(CONFIG_BT_CTLR_LE_ENC)
5120 	case PDU_DATA_LLCTRL_TYPE_ENC_REQ:
5121 		if ((conn->llcp_ack != conn->llcp_req) &&
5122 		    (conn->llcp_type == LLCP_ENCRYPTION)) {
5123 			err = reject_ext_ind_enc_recv(conn, pdu_rx);
5124 		}
5125 		break;
5126 #endif /* CONFIG_BT_CTLR_LE_ENC */
5127 
5128 #if defined(CONFIG_BT_CTLR_PHY)
5129 	case PDU_DATA_LLCTRL_TYPE_PHY_REQ:
5130 		if (conn->llcp_phy.ack != conn->llcp_phy.req) {
5131 			err = reject_ind_phy_upd_recv(conn, rx, pdu_rx);
5132 		}
5133 		break;
5134 #endif /* CONFIG_BT_CTLR_PHY */
5135 
5136 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
5137 	case PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ:
5138 		if (conn->llcp_conn_param.ack != conn->llcp_conn_param.req) {
5139 			err = reject_ind_conn_upd_recv(conn, rx, pdu_rx);
5140 		}
5141 		break;
5142 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
5143 
5144 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
5145 	case PDU_DATA_LLCTRL_TYPE_LENGTH_REQ:
5146 		if (conn->llcp_length.ack != conn->llcp_length.req) {
5147 			err = reject_ind_dle_recv(conn, pdu_rx);
5148 		}
5149 		break;
5150 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
5151 	default:
5152 		/* Ignore */
5153 		break;
5154 	}
5155 
5156 	if (err) {
5157 		/* Mark for buffer for release */
5158 		rx->hdr.type = NODE_RX_TYPE_RELEASE;
5159 	}
5160 }
5161 
5162 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
5163 #if !defined(CONFIG_BT_CTLR_PHY)
5164 static void length_resp_send(struct ll_conn *conn, struct node_tx *tx,
5165 			     uint16_t eff_rx_octets, uint16_t eff_tx_octets)
5166 #else /* CONFIG_BT_CTLR_PHY */
5167 static void length_resp_send(struct ll_conn *conn, struct node_tx *tx,
5168 			     uint16_t eff_rx_octets, uint16_t eff_rx_time,
5169 			     uint16_t eff_tx_octets, uint16_t eff_tx_time)
5170 #endif /* CONFIG_BT_CTLR_PHY */
5171 {
5172 	struct pdu_data *pdu_tx;
5173 
5174 	pdu_tx = (void *)tx->pdu;
5175 	pdu_tx->ll_id = PDU_DATA_LLID_CTRL;
5176 	pdu_tx->len = offsetof(struct pdu_data_llctrl, length_rsp) +
5177 		sizeof(struct pdu_data_llctrl_length_rsp);
5178 	pdu_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_LENGTH_RSP;
5179 	pdu_tx->llctrl.length_rsp.max_rx_octets =
5180 		sys_cpu_to_le16(eff_rx_octets);
5181 	pdu_tx->llctrl.length_rsp.max_tx_octets =
5182 		sys_cpu_to_le16(eff_tx_octets);
5183 
5184 #if !defined(CONFIG_BT_CTLR_PHY)
5185 	pdu_tx->llctrl.length_rsp.max_rx_time =
5186 		sys_cpu_to_le16(PDU_DC_MAX_US(eff_rx_octets, PHY_1M));
5187 	pdu_tx->llctrl.length_rsp.max_tx_time =
5188 		sys_cpu_to_le16(PDU_DC_MAX_US(eff_tx_octets, PHY_1M));
5189 #else /* CONFIG_BT_CTLR_PHY */
5190 	pdu_tx->llctrl.length_rsp.max_rx_time = sys_cpu_to_le16(eff_rx_time);
5191 	pdu_tx->llctrl.length_rsp.max_tx_time = sys_cpu_to_le16(eff_tx_time);
5192 #endif /* CONFIG_BT_CTLR_PHY */
5193 
5194 	ctrl_tx_sec_enqueue(conn, tx);
5195 }
5196 
5197 static inline int length_req_rsp_recv(struct ll_conn *conn, memq_link_t *link,
5198 				      struct node_rx_pdu **rx,
5199 				      struct pdu_data *pdu_rx)
5200 {
5201 	struct node_tx *tx = NULL;
5202 	uint16_t eff_rx_octets;
5203 	uint16_t eff_tx_octets;
5204 #if defined(CONFIG_BT_CTLR_PHY)
5205 	uint16_t eff_rx_time;
5206 	uint16_t eff_tx_time;
5207 #endif /* CONFIG_BT_CTLR_PHY */
5208 
5209 	/* Check for free ctrl tx PDU */
5210 	if (pdu_rx->llctrl.opcode == PDU_DATA_LLCTRL_TYPE_LENGTH_REQ) {
5211 		int err;
5212 
5213 		/* Check transaction violation and get free ctrl tx PDU */
5214 		tx = ctrl_tx_rsp_mem_acquire(conn, *rx, &err);
5215 		if (!tx) {
5216 			return err;
5217 		}
5218 	}
5219 
5220 	eff_rx_octets = conn->lll.max_rx_octets;
5221 	eff_tx_octets = conn->lll.max_tx_octets;
5222 
5223 #if defined(CONFIG_BT_CTLR_PHY)
5224 	eff_rx_time = conn->lll.max_rx_time;
5225 	eff_tx_time = conn->lll.max_tx_time;
5226 #endif /* CONFIG_BT_CTLR_PHY */
5227 
5228 	if (/* Local idle, and Peer request then complete the Peer procedure
5229 	     * with response.
5230 	     */
5231 	    ((conn->llcp_length.req == conn->llcp_length.ack) && tx) ||
5232 	    /* or Local has active... */
5233 	    ((conn->llcp_length.req != conn->llcp_length.ack) &&
5234 	     /* with Local requested and Peer request then complete the
5235 	      * Peer procedure with response.
5236 	      */
5237 	     ((((conn->llcp_length.state == LLCP_LENGTH_STATE_REQ) ||
5238 		(conn->llcp_length.state == LLCP_LENGTH_STATE_REQ_ACK_WAIT)) &&
5239 	       tx) ||
5240 	      /* with Local waiting for response, and Peer response then
5241 	       * complete the Local procedure or Peer request then complete the
5242 	       * Peer procedure with response.
5243 	       */
5244 	      (conn->llcp_length.state == LLCP_LENGTH_STATE_RSP_WAIT)))) {
5245 		struct pdu_data_llctrl_length_req *lr;
5246 		uint16_t max_rx_octets;
5247 		uint16_t max_tx_octets;
5248 
5249 		lr = &pdu_rx->llctrl.length_req;
5250 
5251 		/* use the minimal of our default_tx_octets and
5252 		 * peer max_rx_octets
5253 		 */
5254 		max_rx_octets = sys_le16_to_cpu(lr->max_rx_octets);
5255 		if (max_rx_octets >= PDU_DC_PAYLOAD_SIZE_MIN) {
5256 			eff_tx_octets = MIN(max_rx_octets,
5257 					    conn->default_tx_octets);
5258 		}
5259 
5260 		/* use the minimal of our max supported and
5261 		 * peer max_tx_octets
5262 		 */
5263 		max_tx_octets = sys_le16_to_cpu(lr->max_tx_octets);
5264 		if (max_tx_octets >= PDU_DC_PAYLOAD_SIZE_MIN) {
5265 			eff_rx_octets = MIN(max_tx_octets,
5266 					    LL_LENGTH_OCTETS_RX_MAX);
5267 		}
5268 
5269 #if defined(CONFIG_BT_CTLR_PHY)
5270 		uint16_t max_rx_time;
5271 		uint16_t max_tx_time;
5272 		uint16_t lr_rx_time, lr_tx_time;
5273 
5274 		dle_max_time_get(conn, &max_rx_time, &max_tx_time);
5275 
5276 		/* use the minimal of our default_tx_time and
5277 		 * peer max_rx_time
5278 		 */
5279 
5280 		lr_rx_time = sys_le16_to_cpu(lr->max_rx_time);
5281 		lr_tx_time = sys_le16_to_cpu(lr->max_tx_time);
5282 
5283 		if (lr_rx_time >= PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN,
5284 						PHY_1M)) {
5285 			eff_tx_time = MIN(lr_rx_time, max_tx_time);
5286 #if defined(CONFIG_BT_CTLR_PHY_CODED)
5287 			eff_tx_time = MAX(eff_tx_time,
5288 					  PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN,
5289 							conn->lll.phy_tx));
5290 #endif /* CONFIG_BT_CTLR_PHY_CODED */
5291 		}
5292 
5293 		/* use the minimal of our max supported and
5294 		 * peer max_tx_time
5295 		 */
5296 		if (lr_tx_time >= PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN,
5297 						PHY_1M)) {
5298 			eff_rx_time = MIN(lr_tx_time, max_rx_time);
5299 #if defined(CONFIG_BT_CTLR_PHY_CODED)
5300 			eff_rx_time = MAX(eff_rx_time,
5301 					  PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN,
5302 							conn->lll.phy_rx));
5303 #endif /* !CONFIG_BT_CTLR_PHY_CODED */
5304 		}
5305 #endif /* CONFIG_BT_CTLR_PHY */
5306 
5307 		/* check if change in rx octets */
5308 		if (eff_rx_octets != conn->lll.max_rx_octets) {
5309 			/* FIXME: If we want to resize Rx Pool, decide to
5310 			 *        nack as required when implementing. Also,
5311 			 *        closing the current event may be needed.
5312 			 */
5313 
5314 			/* trigger or retain the ctrl procedure so as
5315 			 * to resize the rx buffers.
5316 			 */
5317 			conn->llcp_length.rx_octets = eff_rx_octets;
5318 			conn->llcp_length.tx_octets = eff_tx_octets;
5319 
5320 #if defined(CONFIG_BT_CTLR_PHY)
5321 			conn->llcp_length.rx_time = eff_rx_time;
5322 			conn->llcp_length.tx_time = eff_tx_time;
5323 #endif /* CONFIG_BT_CTLR_PHY */
5324 
5325 			conn->llcp_length.ack = conn->llcp_length.req - 1;
5326 
5327 			if (tx) {
5328 				conn->llcp_length.state =
5329 					LLCP_LENGTH_STATE_RESIZE_RSP;
5330 			} else {
5331 				/* accept the effective tx */
5332 				conn->lll.max_tx_octets = eff_tx_octets;
5333 #if defined(CONFIG_BT_CTLR_PHY)
5334 				/* accept the effective tx time */
5335 				conn->lll.max_tx_time = eff_tx_time;
5336 #endif /* CONFIG_BT_CTLR_PHY */
5337 				conn->llcp_length.state =
5338 					LLCP_LENGTH_STATE_RESIZE;
5339 			}
5340 
5341 			link->mem = conn->llcp_rx;
5342 			(*rx)->hdr.link = link;
5343 			conn->llcp_rx = *rx;
5344 			*rx = NULL;
5345 		} else {
5346 			/* Procedure complete */
5347 			conn->llcp_length.ack = conn->llcp_length.req;
5348 			conn->procedure_expire = 0U;
5349 
5350 			/* No change in effective octets or time */
5351 			if (eff_tx_octets == conn->lll.max_tx_octets &&
5352 #if defined(CONFIG_BT_CTLR_PHY)
5353 			    eff_tx_time == conn->lll.max_tx_time &&
5354 			    eff_rx_time == conn->lll.max_rx_time &&
5355 #endif /* CONFIG_BT_CTLR_PHY */
5356 			    (1)) {
5357 				/* Mark for buffer for release */
5358 				(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
5359 
5360 				goto send_length_resp;
5361 			}
5362 
5363 #if defined(CONFIG_BT_CTLR_PHY)
5364 			/* accept the effective rx time */
5365 			conn->lll.max_rx_time = eff_rx_time;
5366 #endif /* CONFIG_BT_CTLR_PHY */
5367 
5368 			if (tx) {
5369 				/* trigger or retain the ctrl procedure so as
5370 				 * to resize the rx buffers.
5371 				 */
5372 				conn->llcp_length.rx_octets = eff_rx_octets;
5373 				conn->llcp_length.tx_octets = eff_tx_octets;
5374 
5375 #if defined(CONFIG_BT_CTLR_PHY)
5376 				conn->llcp_length.rx_time = eff_rx_time;
5377 				conn->llcp_length.tx_time = eff_tx_time;
5378 #endif /* CONFIG_BT_CTLR_PHY */
5379 
5380 				/* Wait for rsp ack before tx change  */
5381 				conn->llcp_length.ack =
5382 					(conn->llcp_length.req - 1);
5383 				conn->llcp_length.state =
5384 					LLCP_LENGTH_STATE_RSP_ACK_WAIT;
5385 			} else {
5386 				/* accept the effective tx */
5387 				conn->lll.max_tx_octets = eff_tx_octets;
5388 
5389 #if defined(CONFIG_BT_CTLR_PHY)
5390 				/* accept the effective tx time */
5391 				conn->lll.max_tx_time = eff_tx_time;
5392 #endif /* CONFIG_BT_CTLR_PHY */
5393 			}
5394 
5395 			/* prepare event parameters */
5396 			lr->max_rx_octets = sys_cpu_to_le16(eff_rx_octets);
5397 			lr->max_tx_octets = sys_cpu_to_le16(eff_tx_octets);
5398 
5399 #if !defined(CONFIG_BT_CTLR_PHY)
5400 			lr->max_rx_time =
5401 				sys_cpu_to_le16(PDU_DC_MAX_US(eff_rx_octets,
5402 							      PHY_1M));
5403 			lr->max_tx_time =
5404 				sys_cpu_to_le16(PDU_DC_MAX_US(eff_tx_octets,
5405 							      PHY_1M));
5406 #else /* CONFIG_BT_CTLR_PHY */
5407 			lr->max_rx_time = sys_cpu_to_le16(eff_rx_time);
5408 			lr->max_tx_time = sys_cpu_to_le16(eff_tx_time);
5409 #endif /* CONFIG_BT_CTLR_PHY */
5410 		}
5411 	} else {
5412 		/* Drop response with no Local initiated request and duplicate
5413 		 * requests.
5414 		 */
5415 		if (pdu_rx->llctrl.opcode != PDU_DATA_LLCTRL_TYPE_LENGTH_RSP) {
5416 			mem_release(tx, &mem_conn_tx_ctrl.free);
5417 
5418 			/* Release the transacation lock, as ctrl tx PDU is not
5419 			 * being enqueued.
5420 			 */
5421 			conn->common.txn_lock = 0U;
5422 
5423 			/* Defer new request if previous in resize state */
5424 			if (conn->llcp_length.state ==
5425 			    LLCP_LENGTH_STATE_RESIZE) {
5426 				return -EBUSY;
5427 			}
5428 		}
5429 
5430 		return 0;
5431 	}
5432 
5433 send_length_resp:
5434 	if (tx) {
5435 		/* FIXME: if nack-ing is implemented then release tx instead
5436 		 *        of sending resp.
5437 		 */
5438 #if !defined(CONFIG_BT_CTLR_PHY)
5439 		length_resp_send(conn, tx, eff_rx_octets,
5440 				 eff_tx_octets);
5441 #else /* CONFIG_BT_CTLR_PHY */
5442 		length_resp_send(conn, tx, eff_rx_octets,
5443 				 eff_rx_time, eff_tx_octets,
5444 				 eff_tx_time);
5445 #endif /* CONFIG_BT_CTLR_PHY */
5446 	}
5447 
5448 	return 0;
5449 }
5450 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
5451 
5452 #if defined(CONFIG_BT_CTLR_LE_PING)
5453 static int ping_resp_send(struct ll_conn *conn, struct node_rx_pdu *rx)
5454 {
5455 	struct pdu_data *pdu_tx;
5456 	struct node_tx *tx;
5457 	int err;
5458 
5459 	/* Check transaction violation and get free ctrl tx PDU */
5460 	tx = ctrl_tx_rsp_mem_acquire(conn, rx, &err);
5461 	if (!tx) {
5462 		return err;
5463 	}
5464 
5465 	pdu_tx = (void *)tx->pdu;
5466 	pdu_tx->ll_id = PDU_DATA_LLID_CTRL;
5467 	pdu_tx->len = offsetof(struct pdu_data_llctrl, ping_rsp) +
5468 		      sizeof(struct pdu_data_llctrl_ping_rsp);
5469 	pdu_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_PING_RSP;
5470 
5471 	ctrl_tx_sec_enqueue(conn, tx);
5472 
5473 	/* Mark for buffer for release */
5474 	rx->hdr.type = NODE_RX_TYPE_RELEASE;
5475 
5476 	return 0;
5477 }
5478 #endif /* CONFIG_BT_CTLR_LE_PING */
5479 
5480 #if defined(CONFIG_BT_CTLR_PHY)
5481 static int phy_rsp_send(struct ll_conn *conn, struct node_rx_pdu *rx,
5482 			struct pdu_data *pdu_rx)
5483 {
5484 	struct pdu_data_llctrl_phy_req *p;
5485 	struct pdu_data *pdu_ctrl_tx;
5486 	struct node_tx *tx;
5487 	int err;
5488 
5489 	/* Check transaction violation and get free ctrl tx PDU */
5490 	tx = ctrl_tx_rsp_mem_acquire(conn, rx, &err);
5491 	if (!tx) {
5492 		return err;
5493 	}
5494 
5495 	/* Wait for peer central to complete the procedure */
5496 	conn->llcp_phy.state = LLCP_PHY_STATE_RSP_WAIT;
5497 	if (conn->llcp_phy.ack ==
5498 	    conn->llcp_phy.req) {
5499 		conn->llcp_phy.ack--;
5500 
5501 		conn->llcp_phy.cmd = 0U;
5502 
5503 		conn->llcp_phy.tx =
5504 			conn->phy_pref_tx;
5505 		conn->llcp_phy.rx =
5506 			conn->phy_pref_rx;
5507 
5508 		/* Start Procedure Timeout (TODO: this shall not
5509 		 * replace terminate procedure).
5510 		 */
5511 		conn->procedure_expire =
5512 			conn->procedure_reload;
5513 	}
5514 
5515 	p = &pdu_rx->llctrl.phy_req;
5516 
5517 	conn->llcp_phy.tx &= p->rx_phys;
5518 	conn->llcp_phy.rx &= p->tx_phys;
5519 
5520 	pdu_ctrl_tx = (void *)tx->pdu;
5521 	pdu_ctrl_tx->ll_id = PDU_DATA_LLID_CTRL;
5522 	pdu_ctrl_tx->len = offsetof(struct pdu_data_llctrl, phy_rsp) +
5523 			   sizeof(struct pdu_data_llctrl_phy_rsp);
5524 	pdu_ctrl_tx->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_PHY_RSP;
5525 	pdu_ctrl_tx->llctrl.phy_rsp.tx_phys = conn->phy_pref_tx;
5526 	pdu_ctrl_tx->llctrl.phy_rsp.rx_phys = conn->phy_pref_rx;
5527 
5528 	ctrl_tx_enqueue(conn, tx);
5529 
5530 	/* Mark for buffer for release */
5531 	rx->hdr.type = NODE_RX_TYPE_RELEASE;
5532 
5533 	return 0;
5534 }
5535 
5536 static inline uint8_t phy_upd_ind_recv(struct ll_conn *conn, memq_link_t *link,
5537 				    struct node_rx_pdu **rx,
5538 				    struct pdu_data *pdu_rx)
5539 {
5540 	struct pdu_data_llctrl_phy_upd_ind *ind = &pdu_rx->llctrl.phy_upd_ind;
5541 	uint16_t instant;
5542 	uint8_t phy;
5543 
5544 	/* Both tx and rx PHY unchanged */
5545 	if (!((ind->c_to_p_phy | ind->p_to_c_phy) & 0x07)) {
5546 		struct node_rx_pu *p;
5547 
5548 		/* Not in PHY Update Procedure or PDU in wrong state */
5549 		if ((conn->llcp_phy.ack == conn->llcp_phy.req) ||
5550 		    (conn->llcp_phy.state != LLCP_PHY_STATE_RSP_WAIT)) {
5551 			/* Mark for buffer for release */
5552 			(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
5553 
5554 			return 0;
5555 		}
5556 
5557 		/* Procedure complete */
5558 		conn->llcp_phy.ack = conn->llcp_phy.req;
5559 		conn->llcp_phy.pause_tx = 0U;
5560 		conn->procedure_expire = 0U;
5561 
5562 		/* Reset packet timing restrictions */
5563 		conn->lll.phy_tx_time = conn->lll.phy_tx;
5564 
5565 		/* Ignore event generation if not local cmd initiated */
5566 		if (!conn->llcp_phy.cmd) {
5567 			/* Mark for buffer for release */
5568 			(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
5569 
5570 			return 0;
5571 		}
5572 
5573 		/* generate phy update complete event */
5574 		(*rx)->hdr.type = NODE_RX_TYPE_PHY_UPDATE;
5575 
5576 		p = (void *)pdu_rx;
5577 		p->status = 0U;
5578 		p->tx = conn->lll.phy_tx;
5579 		p->rx = conn->lll.phy_rx;
5580 
5581 		return 0;
5582 	}
5583 
5584 	/* Fail on multiple PHY specified */
5585 	phy = ind->c_to_p_phy;
5586 	if (util_ones_count_get(&phy, sizeof(phy)) > 1U) {
5587 		/* Mark for buffer for release */
5588 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
5589 
5590 		return BT_HCI_ERR_INVALID_LL_PARAM;
5591 	}
5592 	phy = ind->p_to_c_phy;
5593 	if (util_ones_count_get(&phy, sizeof(phy)) > 1U) {
5594 		/* Mark for buffer for release */
5595 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
5596 
5597 		return BT_HCI_ERR_INVALID_LL_PARAM;
5598 	}
5599 
5600 	/* instant passed */
5601 	instant = sys_le16_to_cpu(ind->instant);
5602 	if (((instant - conn->lll.event_counter) & 0xffff) > 0x7fff) {
5603 		/* Mark for buffer for release */
5604 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
5605 
5606 		return BT_HCI_ERR_INSTANT_PASSED;
5607 	}
5608 
5609 	/* different transaction collision */
5610 	if (((conn->llcp_req - conn->llcp_ack) & 0x03) == 0x02) {
5611 		/* Mark for buffer for release */
5612 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
5613 
5614 		return BT_HCI_ERR_DIFF_TRANS_COLLISION;
5615 	}
5616 
5617 	if ((conn->llcp_phy.ack != conn->llcp_phy.req) &&
5618 	    (conn->llcp_phy.state == LLCP_PHY_STATE_RSP_WAIT)) {
5619 		/* Procedure complete, just wait for instant */
5620 		conn->llcp_phy.ack = conn->llcp_phy.req;
5621 		conn->llcp_phy.pause_tx = 0U;
5622 		conn->procedure_expire = 0U;
5623 
5624 		conn->llcp.phy_upd_ind.cmd = conn->llcp_phy.cmd;
5625 	}
5626 
5627 	conn->llcp.phy_upd_ind.tx = ind->p_to_c_phy;
5628 	conn->llcp.phy_upd_ind.rx = ind->c_to_p_phy;
5629 	conn->llcp.phy_upd_ind.instant = instant;
5630 	conn->llcp.phy_upd_ind.initiate = 0U;
5631 
5632 	/* Reserve the Rx-ed PHY Update Indication PDU in the connection
5633 	 * context, by appending to the LLCP node rx list. We do not mark it
5634 	 * for release in ULL, i.e., by returning *rx as NULL.
5635 	 * PHY Update notification to HCI layer will use node rx from this
5636 	 * list when at the instant.
5637 	 * If data length update is supported in the Controller, then, at the
5638 	 * instant we attempt to acquire an additional free node rx for Data
5639 	 * Length Update notification.
5640 	 */
5641 	link->mem = conn->llcp_rx;
5642 	(*rx)->hdr.link = link;
5643 	conn->llcp_rx = *rx;
5644 	*rx = NULL;
5645 
5646 	/* Transition to PHY Update Ind received state and  wait for the
5647 	 * instant.
5648 	 */
5649 	conn->llcp_type = LLCP_PHY_UPD;
5650 	conn->llcp_ack -= 2U;
5651 
5652 	/* Enforce packet timing restrictions until the instant */
5653 	if (conn->llcp.phy_upd_ind.tx) {
5654 		conn->lll.phy_tx_time = conn->llcp.phy_upd_ind.tx;
5655 	}
5656 
5657 	return 0;
5658 }
5659 #endif /* CONFIG_BT_CTLR_PHY */
5660 
5661 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
5662 void event_send_cis_rsp(struct ll_conn *conn)
5663 {
5664 	struct node_tx *tx;
5665 
5666 	tx = mem_acquire(&mem_conn_tx_ctrl.free);
5667 	if (tx) {
5668 		struct pdu_data *pdu = (void *)tx->pdu;
5669 
5670 		pdu->ll_id = PDU_DATA_LLID_CTRL;
5671 		pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_CIS_RSP;
5672 
5673 		sys_put_le24(conn->llcp_cis.cis_offset_min,
5674 			     pdu->llctrl.cis_rsp.cis_offset_min);
5675 		sys_put_le24(conn->llcp_cis.cis_offset_max,
5676 			     pdu->llctrl.cis_rsp.cis_offset_max);
5677 		pdu->llctrl.cis_rsp.conn_event_count =
5678 			sys_cpu_to_le16(conn->llcp_cis.conn_event_count);
5679 
5680 		pdu->len = offsetof(struct pdu_data_llctrl, cis_rsp) +
5681 				    sizeof(struct pdu_data_llctrl_cis_rsp);
5682 
5683 		conn->llcp_cis.state = LLCP_CIS_STATE_IND_WAIT;
5684 
5685 		ctrl_tx_enqueue(conn, tx);
5686 	}
5687 }
5688 
5689 void event_peripheral_iso_prep(struct ll_conn *conn, uint16_t event_counter,
5690 			       uint32_t ticks_at_expire)
5691 {
5692 	if (event_counter == conn->llcp_cis.conn_event_count) {
5693 		ull_peripheral_iso_start(conn, ticks_at_expire);
5694 
5695 		conn->llcp_cis.state = LLCP_CIS_STATE_REQ;
5696 		conn->llcp_cis.ack = conn->llcp_cis.req;
5697 	}
5698 }
5699 
5700 static uint8_t cis_req_recv(struct ll_conn *conn, memq_link_t *link,
5701 			    struct node_rx_pdu **rx, struct pdu_data *pdu)
5702 {
5703 	struct pdu_data_llctrl_cis_req *req = &pdu->llctrl.cis_req;
5704 	struct node_rx_conn_iso_req *conn_iso_req;
5705 	uint16_t cis_handle;
5706 	uint8_t err;
5707 
5708 	conn->llcp_cis.cig_id = req->cig_id;
5709 	conn->llcp_cis.framed = req->framed;
5710 	conn->llcp_cis.c_max_sdu = sys_le16_to_cpu(req->c_max_sdu);
5711 	conn->llcp_cis.p_max_sdu = sys_le16_to_cpu(req->p_max_sdu);
5712 	conn->llcp_cis.cis_offset_min = sys_get_le24(req->cis_offset_min);
5713 	conn->llcp_cis.cis_offset_max = sys_get_le24(req->cis_offset_max);
5714 	conn->llcp_cis.conn_event_count =
5715 		sys_le16_to_cpu(req->conn_event_count);
5716 
5717 	/* Acquire resources for new CIS */
5718 	err = ull_peripheral_iso_acquire(conn, &pdu->llctrl.cis_req, &cis_handle);
5719 	if (err) {
5720 		return err;
5721 	}
5722 
5723 	conn->llcp_cis.cis_handle = cis_handle;
5724 	conn->llcp_cis.state = LLCP_CIS_STATE_RSP_WAIT;
5725 	conn->llcp_cis.ack -= 2U;
5726 
5727 	(*rx)->hdr.type = NODE_RX_TYPE_CIS_REQUEST;
5728 
5729 	conn_iso_req = (void *)pdu;
5730 	conn_iso_req->cig_id = req->cig_id;
5731 	conn_iso_req->cis_id = req->cis_id;
5732 	conn_iso_req->cis_handle = sys_le16_to_cpu(cis_handle);
5733 
5734 	return 0;
5735 }
5736 
5737 static uint8_t cis_ind_recv(struct ll_conn *conn, memq_link_t *link,
5738 			    struct node_rx_pdu **rx, struct pdu_data *pdu)
5739 {
5740 	struct pdu_data_llctrl_cis_ind *ind = &pdu->llctrl.cis_ind;
5741 	uint8_t err;
5742 
5743 	conn->llcp_cis.conn_event_count =
5744 		sys_le16_to_cpu(ind->conn_event_count);
5745 
5746 	/* Setup CIS connection */
5747 	err = ull_peripheral_iso_setup(&pdu->llctrl.cis_ind,
5748 				       conn->llcp_cis.cig_id,
5749 				       conn->llcp_cis.cis_handle);
5750 
5751 	conn->llcp_cis.state = LLCP_CIS_STATE_INST_WAIT;
5752 
5753 	/* Mark for buffer for release */
5754 	(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
5755 
5756 	return err;
5757 }
5758 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
5759 
5760 static inline void ctrl_tx_pre_ack(struct ll_conn *conn,
5761 				   struct pdu_data *pdu_tx)
5762 {
5763 	switch (pdu_tx->llctrl.opcode) {
5764 #if defined(CONFIG_BT_CTLR_LE_ENC)
5765 	case PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_RSP:
5766 		if (!conn->lll.role) {
5767 			break;
5768 		}
5769 		__fallthrough;
5770 #if defined(CONFIG_BT_CENTRAL)
5771 	case PDU_DATA_LLCTRL_TYPE_ENC_REQ:
5772 	case PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_REQ:
5773 #endif /* CONFIG_BT_CENTRAL */
5774 #if defined(CONFIG_BT_PERIPHERAL)
5775 	case PDU_DATA_LLCTRL_TYPE_ENC_RSP:
5776 #endif /* CONFIG_BT_PERIPHERAL */
5777 		/* pause data packet tx */
5778 		conn->llcp_enc.pause_tx = 1U;
5779 		break;
5780 #endif /* CONFIG_BT_CTLR_LE_ENC */
5781 
5782 #if defined(CONFIG_BT_CTLR_PHY)
5783 	case PDU_DATA_LLCTRL_TYPE_PHY_REQ:
5784 #if defined(CONFIG_BT_PERIPHERAL)
5785 	case PDU_DATA_LLCTRL_TYPE_PHY_RSP:
5786 #endif /* CONFIG_BT_PERIPHERAL */
5787 		/* pause data packet tx */
5788 		conn->llcp_phy.pause_tx = 1U;
5789 		break;
5790 #endif /* CONFIG_BT_CTLR_PHY */
5791 
5792 	default:
5793 		/* Do nothing for other ctrl packet ack */
5794 		break;
5795 	}
5796 }
5797 
5798 static inline void ctrl_tx_ack(struct ll_conn *conn, struct node_tx **tx,
5799 			       struct pdu_data *pdu_tx)
5800 {
5801 	switch (pdu_tx->llctrl.opcode) {
5802 	case PDU_DATA_LLCTRL_TYPE_TERMINATE_IND:
5803 	{
5804 		if (pdu_tx->llctrl.terminate_ind.error_code ==
5805 		    BT_HCI_ERR_REMOTE_USER_TERM_CONN) {
5806 			conn->llcp_terminate.reason_final =
5807 				BT_HCI_ERR_LOCALHOST_TERM_CONN;
5808 		} else {
5809 			conn->llcp_terminate.reason_final =
5810 			      pdu_tx->llctrl.terminate_ind.error_code;
5811 		}
5812 
5813 		/* Make (req - ack) == 3, a state indicating terminate_ind has
5814 		 * been ack-ed.
5815 		 */
5816 		conn->llcp_terminate.ack--;
5817 	}
5818 	break;
5819 
5820 	case PDU_DATA_LLCTRL_TYPE_FEATURE_RSP:
5821 	case PDU_DATA_LLCTRL_TYPE_PING_RSP:
5822 	case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
5823 		/* Reset the transaction lock */
5824 		conn->common.txn_lock = 0U;
5825 		break;
5826 
5827 #if defined(CONFIG_BT_CTLR_LE_ENC)
5828 #if defined(CONFIG_BT_CENTRAL)
5829 	case PDU_DATA_LLCTRL_TYPE_ENC_REQ:
5830 		/* things from central stored for session key calculation */
5831 		memcpy(&conn->llcp.encryption.skd[0],
5832 		       &pdu_tx->llctrl.enc_req.skdm[0], 8);
5833 		memcpy(&conn->lll.ccm_rx.iv[0],
5834 		       &pdu_tx->llctrl.enc_req.ivm[0], 4);
5835 
5836 		/* pause data packet tx */
5837 		conn->llcp_enc.pause_tx = 1U;
5838 
5839 		/* Start Procedure Timeout (this will not replace terminate
5840 		 * procedure which always gets place before any packets
5841 		 * going out, hence safe by design).
5842 		 */
5843 		conn->procedure_expire = conn->procedure_reload;
5844 
5845 		/* Reset enc req queued state */
5846 		conn->llcp_enc.ack = conn->llcp_enc.req;
5847 		break;
5848 #endif /* CONFIG_BT_CENTRAL */
5849 
5850 #if defined(CONFIG_BT_PERIPHERAL)
5851 	case PDU_DATA_LLCTRL_TYPE_ENC_RSP:
5852 		/* pause data packet tx */
5853 		conn->llcp_enc.pause_tx = 1U;
5854 		break;
5855 
5856 	case PDU_DATA_LLCTRL_TYPE_START_ENC_REQ:
5857 		/* Remember that we may have received encrypted START_ENC_RSP
5858 		 * alongwith this tx ack at this point in time.
5859 		 */
5860 		conn->llcp.encryption.state = LLCP_ENC_STATE_ENC_WAIT;
5861 		break;
5862 #endif /* CONFIG_BT_PERIPHERAL */
5863 
5864 	case PDU_DATA_LLCTRL_TYPE_START_ENC_RSP:
5865 		if (conn->lll.role) {
5866 			/* resume data packet rx and tx */
5867 			conn->llcp_enc.pause_rx = 0U;
5868 			conn->llcp_enc.pause_tx = 0U;
5869 
5870 			/* Procedure complete */
5871 			conn->procedure_expire = 0U;
5872 
5873 			/* procedure request acked */
5874 			conn->llcp_ack = conn->llcp_req;
5875 		} else {
5876 			conn->llcp.encryption.state = LLCP_ENC_STATE_ENC_WAIT;
5877 		}
5878 		break;
5879 
5880 #if defined(CONFIG_BT_CENTRAL)
5881 	case PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_REQ:
5882 		/* pause data packet tx */
5883 		conn->llcp_enc.pause_tx = 1U;
5884 
5885 		/* key refresh */
5886 		conn->llcp_enc.refresh = 1U;
5887 
5888 		/* Start Procedure Timeout (this will not replace terminate
5889 		 * procedure which always gets place before any packets
5890 		 * going out, hence safe by design).
5891 		 */
5892 		conn->procedure_expire = conn->procedure_reload;
5893 
5894 		/* Reset enc req queued state */
5895 		conn->llcp_enc.ack = conn->llcp_enc.req;
5896 		break;
5897 #endif /* CONFIG_BT_CENTRAL */
5898 
5899 	case PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_RSP:
5900 #if defined(CONFIG_BT_CENTRAL)
5901 		if (!conn->lll.role) {
5902 			/* reused tx-ed PDU and send enc req */
5903 			enc_req_reused_send(conn, tx);
5904 		} else
5905 #endif /* CONFIG_BT_CENTRAL */
5906 		{
5907 			/* pause data packet tx */
5908 			conn->llcp_enc.pause_tx = 1U;
5909 		}
5910 		break;
5911 
5912 	case PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND:
5913 		if (pdu_tx->llctrl.reject_ext_ind.reject_opcode !=
5914 		    PDU_DATA_LLCTRL_TYPE_ENC_REQ) {
5915 			/* Reset the transaction lock set by connection
5916 			 * parameter request and PHY update procedure when
5917 			 * sending the Reject Ext Ind PDU.
5918 			 */
5919 			conn->common.txn_lock = 0U;
5920 
5921 			break;
5922 		}
5923 		__fallthrough;
5924 
5925 	case PDU_DATA_LLCTRL_TYPE_REJECT_IND:
5926 		/* resume data packet rx and tx */
5927 		conn->llcp_enc.pause_rx = 0U;
5928 		conn->llcp_enc.pause_tx = 0U;
5929 
5930 		/* Procedure complete */
5931 		conn->procedure_expire = 0U;
5932 		break;
5933 #endif /* CONFIG_BT_CTLR_LE_ENC */
5934 
5935 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
5936 	case PDU_DATA_LLCTRL_TYPE_LENGTH_REQ:
5937 		/* wait for response */
5938 		if (conn->llcp_length.state == LLCP_LENGTH_STATE_REQ_ACK_WAIT) {
5939 			conn->llcp_length.state = LLCP_LENGTH_STATE_RSP_WAIT;
5940 		}
5941 		break;
5942 
5943 	case PDU_DATA_LLCTRL_TYPE_LENGTH_RSP:
5944 		/* Reset the transaction lock */
5945 		conn->common.txn_lock = 0U;
5946 
5947 		if (conn->llcp_length.req != conn->llcp_length.ack) {
5948 			switch (conn->llcp_length.state) {
5949 			case LLCP_LENGTH_STATE_RSP_ACK_WAIT:
5950 			case LLCP_LENGTH_STATE_RESIZE_RSP:
5951 			case LLCP_LENGTH_STATE_RESIZE_RSP_ACK_WAIT:
5952 				/* accept the effective tx */
5953 				conn->lll.max_tx_octets =
5954 					conn->llcp_length.tx_octets;
5955 
5956 #if defined(CONFIG_BT_CTLR_PHY)
5957 				/* accept the effective tx time */
5958 				conn->lll.max_tx_time =
5959 					conn->llcp_length.tx_time;
5960 #endif /* CONFIG_BT_CTLR_PHY */
5961 
5962 				if (conn->llcp_length.state ==
5963 				    LLCP_LENGTH_STATE_RESIZE_RSP) {
5964 					conn->llcp_length.state =
5965 						LLCP_LENGTH_STATE_RESIZE;
5966 
5967 					break;
5968 				}
5969 
5970 				/* check cache */
5971 				if (!conn->llcp_length.cache.tx_octets) {
5972 					/* Procedure complete */
5973 					conn->llcp_length.ack =
5974 						conn->llcp_length.req;
5975 					conn->procedure_expire = 0U;
5976 
5977 					break;
5978 				}
5979 
5980 				/* Initiate cached procedure */
5981 				conn->llcp_length.tx_octets =
5982 					conn->llcp_length.cache.tx_octets;
5983 				conn->llcp_length.cache.tx_octets = 0;
5984 #if defined(CONFIG_BT_CTLR_PHY)
5985 				conn->llcp_length.tx_time =
5986 					conn->llcp_length.cache.tx_time;
5987 #endif /* CONFIG_BT_CTLR_PHY */
5988 				conn->llcp_length.state = LLCP_LENGTH_STATE_REQ;
5989 				break;
5990 
5991 			default:
5992 				break;
5993 			}
5994 		}
5995 		break;
5996 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
5997 
5998 #if defined(CONFIG_BT_CTLR_PHY)
5999 	case PDU_DATA_LLCTRL_TYPE_PHY_REQ:
6000 		conn->llcp_phy.state = LLCP_PHY_STATE_RSP_WAIT;
6001 		__fallthrough;
6002 
6003 #if defined(CONFIG_BT_PERIPHERAL)
6004 	case PDU_DATA_LLCTRL_TYPE_PHY_RSP:
6005 		if (conn->lll.role) {
6006 			/* select the probable PHY with longest Tx time, which
6007 			 * will be restricted to fit current
6008 			 * connEffectiveMaxTxTime.
6009 			 */
6010 			uint8_t phy_tx_time[8] = {PHY_1M, PHY_1M, PHY_2M,
6011 						  PHY_1M, PHY_CODED, PHY_CODED,
6012 						  PHY_CODED, PHY_CODED};
6013 			struct lll_conn *lll;
6014 			uint8_t phys;
6015 
6016 			/* Reset the transaction lock when PHY update response
6017 			 * sent by peripheral is acknowledged.
6018 			 */
6019 			if (pdu_tx->llctrl.opcode ==
6020 			    PDU_DATA_LLCTRL_TYPE_PHY_RSP) {
6021 				conn->common.txn_lock = 0U;
6022 			}
6023 
6024 			lll = &conn->lll;
6025 			phys = conn->llcp_phy.tx | lll->phy_tx;
6026 			lll->phy_tx_time = phy_tx_time[phys];
6027 		}
6028 
6029 		/* resume data packet tx */
6030 		conn->llcp_phy.pause_tx = 0U;
6031 		break;
6032 #endif /* CONFIG_BT_PERIPHERAL */
6033 
6034 #if defined(CONFIG_BT_CENTRAL)
6035 	case PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND:
6036 		conn->lll.phy_tx_time = conn->llcp.phy_upd_ind.tx;
6037 		/* resume data packet tx */
6038 		conn->llcp_phy.pause_tx = 0U;
6039 		break;
6040 #endif /* CONFIG_BT_CENTRAL */
6041 #endif /* CONFIG_BT_CTLR_PHY */
6042 
6043 	default:
6044 		/* Do nothing for other ctrl packet ack */
6045 		break;
6046 	}
6047 }
6048 
6049 static inline int ctrl_rx(memq_link_t *link, struct node_rx_pdu **rx,
6050 			  struct pdu_data *pdu_rx, struct ll_conn *conn)
6051 {
6052 	int nack = 0;
6053 	uint8_t opcode;
6054 
6055 	opcode = pdu_rx->llctrl.opcode;
6056 
6057 #if defined(CONFIG_BT_CTLR_LE_ENC)
6058 	/* FIXME: do check in individual case to reduce CPU time */
6059 	if (conn->llcp_enc.pause_rx && ctrl_is_unexpected(conn, opcode)) {
6060 		conn->llcp_terminate.reason_final =
6061 			BT_HCI_ERR_TERM_DUE_TO_MIC_FAIL;
6062 
6063 		/* Mark for buffer for release */
6064 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
6065 
6066 		return 0;
6067 	}
6068 #endif /* CONFIG_BT_CTLR_LE_ENC */
6069 
6070 	switch (opcode) {
6071 #if defined(CONFIG_BT_PERIPHERAL)
6072 	case PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND:
6073 	{
6074 		uint8_t err;
6075 
6076 		if (!conn->lll.role ||
6077 		    PDU_DATA_LLCTRL_LEN(conn_update_ind) != pdu_rx->len) {
6078 			goto ull_conn_rx_unknown_rsp_send;
6079 		}
6080 
6081 		err = conn_upd_recv(conn, link, rx, pdu_rx);
6082 		if (err) {
6083 			conn->llcp_terminate.reason_final = err;
6084 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
6085 		} else {
6086 			/* conn param req procedure, if any, is complete */
6087 			conn->procedure_expire = 0U;
6088 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
6089 		}
6090 	}
6091 	break;
6092 
6093 	case PDU_DATA_LLCTRL_TYPE_CHAN_MAP_IND:
6094 	{
6095 		uint8_t err;
6096 
6097 		if (!conn->lll.role ||
6098 		    PDU_DATA_LLCTRL_LEN(chan_map_ind) != pdu_rx->len) {
6099 			goto ull_conn_rx_unknown_rsp_send;
6100 		}
6101 
6102 		err = chan_map_upd_recv(conn, *rx, pdu_rx);
6103 		if (err) {
6104 			conn->llcp_terminate.reason_final = err;
6105 		}
6106 	}
6107 	break;
6108 #endif /* CONFIG_BT_PERIPHERAL */
6109 
6110 	case PDU_DATA_LLCTRL_TYPE_TERMINATE_IND:
6111 		if (PDU_DATA_LLCTRL_LEN(terminate_ind) != pdu_rx->len) {
6112 			goto ull_conn_rx_unknown_rsp_send;
6113 		}
6114 
6115 		terminate_ind_recv(conn, *rx, pdu_rx);
6116 		break;
6117 
6118 #if defined(CONFIG_BT_CTLR_LE_ENC)
6119 #if defined(CONFIG_BT_PERIPHERAL)
6120 	case PDU_DATA_LLCTRL_TYPE_ENC_REQ:
6121 		if (!conn->lll.role ||
6122 		    PDU_DATA_LLCTRL_LEN(enc_req) != pdu_rx->len) {
6123 			goto ull_conn_rx_unknown_rsp_send;
6124 		}
6125 
6126 #if defined(CONFIG_BT_CTLR_PHY)
6127 		/* LL_ENC_REQ was received while local peripheral initiated
6128 		 * procedure is in progress.
6129 		 */
6130 		if (unlikely(((conn->llcp_req - conn->llcp_ack) & 0x03) ==
6131 			     0x02)) {
6132 			/* Adjust ack due to decrement below, to prevent
6133 			 * failures
6134 			 */
6135 			conn->llcp_ack += 2U;
6136 
6137 			/* Store the local peripheral initiated procedure */
6138 			LL_ASSERT(conn->periph.llcp_type == LLCP_NONE);
6139 			conn->periph.llcp_type = conn->llcp_type;
6140 		}
6141 #endif /* CONFIG_BT_CTLR_PHY */
6142 
6143 #if defined(CONFIG_BT_CTLR_FAST_ENC)
6144 		/* TODO: BT Spec. text: may finalize the sending of additional
6145 		 * data channel PDUs queued in the controller.
6146 		 */
6147 		nack = enc_rsp_send(conn);
6148 		if (nack) {
6149 			break;
6150 		}
6151 
6152 		/* Start Enc Req to be scheduled by LL api */
6153 		conn->llcp.encryption.state = LLCP_ENC_STATE_LTK_WAIT;
6154 #else /* CONFIG_BT_CTLR_FAST_ENC */
6155 		/* back up rand and ediv for deferred generation of Enc Req */
6156 		memcpy(&conn->llcp_enc.rand[0],
6157 		       &pdu_rx->llctrl.enc_req.rand[0],
6158 		       sizeof(conn->llcp_enc.rand));
6159 		conn->llcp_enc.ediv[0] = pdu_rx->llctrl.enc_req.ediv[0];
6160 		conn->llcp_enc.ediv[1] = pdu_rx->llctrl.enc_req.ediv[1];
6161 
6162 		/* Enc rsp to be scheduled in central prepare */
6163 		conn->llcp.encryption.state = LLCP_ENC_STATE_INIT;
6164 
6165 		/* Mark for buffer for release */
6166 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
6167 #endif /* CONFIG_BT_CTLR_FAST_ENC */
6168 
6169 		/* Enc Setup state machine active */
6170 		conn->llcp_type = LLCP_ENCRYPTION;
6171 		conn->llcp_ack -= 2U;
6172 
6173 		/* things from central stored for session key calculation */
6174 		memcpy(&conn->llcp.encryption.skd[0],
6175 		       &pdu_rx->llctrl.enc_req.skdm[0], 8);
6176 		memcpy(&conn->lll.ccm_rx.iv[0],
6177 		       &pdu_rx->llctrl.enc_req.ivm[0], 4);
6178 
6179 		/* pause rx data packets */
6180 		conn->llcp_enc.pause_rx = 1U;
6181 
6182 		/* Start Procedure Timeout (TODO: this shall not replace
6183 		 * terminate procedure).
6184 		 */
6185 		conn->procedure_expire = conn->procedure_reload;
6186 
6187 		break;
6188 #endif /* CONFIG_BT_PERIPHERAL */
6189 
6190 #if defined(CONFIG_BT_CENTRAL)
6191 	case PDU_DATA_LLCTRL_TYPE_ENC_RSP:
6192 		if (conn->lll.role ||
6193 		    PDU_DATA_LLCTRL_LEN(enc_rsp) != pdu_rx->len) {
6194 			goto ull_conn_rx_unknown_rsp_send;
6195 		}
6196 
6197 		/* things sent by peripheral stored for session key calculation */
6198 		memcpy(&conn->llcp.encryption.skd[8],
6199 		       &pdu_rx->llctrl.enc_rsp.skds[0], 8);
6200 		memcpy(&conn->lll.ccm_rx.iv[4],
6201 		       &pdu_rx->llctrl.enc_rsp.ivs[0], 4);
6202 
6203 		/* pause rx data packets */
6204 		conn->llcp_enc.pause_rx = 1U;
6205 
6206 		/* Mark for buffer for release */
6207 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
6208 
6209 		break;
6210 
6211 	case PDU_DATA_LLCTRL_TYPE_START_ENC_REQ:
6212 		if (conn->lll.role || (conn->llcp_req == conn->llcp_ack) ||
6213 		    (conn->llcp_type != LLCP_ENCRYPTION) ||
6214 		    PDU_DATA_LLCTRL_LEN(start_enc_req) != pdu_rx->len) {
6215 			goto ull_conn_rx_unknown_rsp_send;
6216 		}
6217 
6218 		/* start enc rsp to be scheduled in central prepare */
6219 		conn->llcp.encryption.state = LLCP_ENC_STATE_INPROG;
6220 
6221 		/* Mark for buffer for release */
6222 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
6223 
6224 		break;
6225 #endif /* CONFIG_BT_CENTRAL */
6226 
6227 	case PDU_DATA_LLCTRL_TYPE_START_ENC_RSP:
6228 		if ((conn->llcp_req == conn->llcp_ack) ||
6229 		    (conn->llcp_type != LLCP_ENCRYPTION) ||
6230 		    (PDU_DATA_LLCTRL_LEN(start_enc_rsp) != pdu_rx->len)) {
6231 			goto ull_conn_rx_unknown_rsp_send;
6232 		}
6233 
6234 		if (conn->lll.role) {
6235 #if !defined(CONFIG_BT_CTLR_FAST_ENC)
6236 			/* start enc rsp to be scheduled in peripheral prepare */
6237 			conn->llcp.encryption.state = LLCP_ENC_STATE_INPROG;
6238 
6239 #else /* CONFIG_BT_CTLR_FAST_ENC */
6240 			nack = start_enc_rsp_send(conn, NULL);
6241 			if (nack) {
6242 				break;
6243 			}
6244 #endif /* CONFIG_BT_CTLR_FAST_ENC */
6245 
6246 		} else {
6247 			/* resume data packet rx and tx */
6248 			conn->llcp_enc.pause_rx = 0U;
6249 			conn->llcp_enc.pause_tx = 0U;
6250 
6251 			/* Procedure complete */
6252 			conn->procedure_expire = 0U;
6253 
6254 			/* procedure request acked */
6255 			conn->llcp_ack = conn->llcp_req;
6256 		}
6257 
6258 		/* enqueue the start enc resp (encryption change/refresh) */
6259 		if (conn->llcp_enc.refresh) {
6260 			conn->llcp_enc.refresh = 0U;
6261 
6262 			/* key refresh event */
6263 			(*rx)->hdr.type = NODE_RX_TYPE_ENC_REFRESH;
6264 		}
6265 		break;
6266 #endif /* CONFIG_BT_CTLR_LE_ENC */
6267 
6268 #if defined(CONFIG_BT_PERIPHERAL)
6269 	case PDU_DATA_LLCTRL_TYPE_FEATURE_REQ:
6270 		if (!conn->lll.role ||
6271 		    PDU_DATA_LLCTRL_LEN(feature_req) != pdu_rx->len) {
6272 			goto ull_conn_rx_unknown_rsp_send;
6273 		}
6274 
6275 		nack = feature_rsp_send(conn, *rx, pdu_rx);
6276 		break;
6277 #endif /* CONFIG_BT_PERIPHERAL */
6278 
6279 #if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
6280 	case PDU_DATA_LLCTRL_TYPE_PER_INIT_FEAT_XCHG:
6281 		if (conn->lll.role ||
6282 		    PDU_DATA_LLCTRL_LEN(per_init_feat_xchg) != pdu_rx->len) {
6283 			goto ull_conn_rx_unknown_rsp_send;
6284 		}
6285 
6286 		nack = feature_rsp_send(conn, *rx, pdu_rx);
6287 		break;
6288 #endif /* CONFIG_BT_CENTRAL && CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
6289 
6290 #if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
6291 	case PDU_DATA_LLCTRL_TYPE_FEATURE_RSP:
6292 		if ((!IS_ENABLED(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG) &&
6293 		     conn->lll.role) ||
6294 		    PDU_DATA_LLCTRL_LEN(feature_rsp) != pdu_rx->len) {
6295 			goto ull_conn_rx_unknown_rsp_send;
6296 		}
6297 
6298 		feature_rsp_recv(conn, pdu_rx);
6299 		break;
6300 #endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
6301 
6302 #if defined(CONFIG_BT_CTLR_LE_ENC)
6303 #if defined(CONFIG_BT_PERIPHERAL)
6304 	case PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_REQ:
6305 		if (!conn->lll.role ||
6306 		    PDU_DATA_LLCTRL_LEN(pause_enc_req) != pdu_rx->len) {
6307 			goto ull_conn_rx_unknown_rsp_send;
6308 		}
6309 
6310 		nack = pause_enc_rsp_send(conn, *rx, 1);
6311 		break;
6312 #endif /* CONFIG_BT_PERIPHERAL */
6313 
6314 	case PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_RSP:
6315 		if (PDU_DATA_LLCTRL_LEN(pause_enc_rsp) != pdu_rx->len) {
6316 			goto ull_conn_rx_unknown_rsp_send;
6317 		}
6318 
6319 		nack = pause_enc_rsp_send(conn, *rx, 0);
6320 		break;
6321 #endif /* CONFIG_BT_CTLR_LE_ENC */
6322 
6323 	case PDU_DATA_LLCTRL_TYPE_VERSION_IND:
6324 		if (PDU_DATA_LLCTRL_LEN(version_ind) != pdu_rx->len) {
6325 			goto ull_conn_rx_unknown_rsp_send;
6326 		}
6327 
6328 		nack = version_ind_send(conn, *rx, pdu_rx);
6329 		break;
6330 
6331 #if defined(CONFIG_BT_CTLR_LE_ENC)
6332 	case PDU_DATA_LLCTRL_TYPE_REJECT_IND:
6333 		if (PDU_DATA_LLCTRL_LEN(reject_ind) != pdu_rx->len) {
6334 			goto ull_conn_rx_unknown_rsp_send;
6335 		}
6336 
6337 		reject_ind_recv(conn, *rx, pdu_rx);
6338 		break;
6339 #endif /* CONFIG_BT_CTLR_LE_ENC */
6340 
6341 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
6342 	case PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ:
6343 		if (PDU_DATA_LLCTRL_LEN(conn_param_req) != pdu_rx->len) {
6344 			goto ull_conn_rx_unknown_rsp_send;
6345 		}
6346 
6347 
6348 		/* check CUI/CPR mutex for other connections having CPR in
6349 		 * progress.
6350 		 */
6351 		if (cpr_active_is_set(conn)) {
6352 			/* Unsupported LL Parameter Value */
6353 			nack = reject_ext_ind_send(conn, *rx,
6354 					PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ,
6355 					BT_HCI_ERR_UNSUPP_LL_PARAM_VAL);
6356 			break;
6357 		}
6358 
6359 		if (!conn->lll.role) {
6360 			if ((conn->llcp_conn_param.req !=
6361 					conn->llcp_conn_param.ack) &&
6362 			    ((conn->llcp_conn_param.state ==
6363 			      LLCP_CPR_STATE_REQ) ||
6364 			     (conn->llcp_conn_param.state ==
6365 			      LLCP_CPR_STATE_RSP_WAIT) ||
6366 			     (conn->llcp_conn_param.state ==
6367 			      LLCP_CPR_STATE_UPD))) {
6368 				/* Same procedure collision  */
6369 				nack = reject_ext_ind_send(conn, *rx,
6370 					PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ,
6371 					BT_HCI_ERR_LL_PROC_COLLISION);
6372 #if defined(CONFIG_BT_CTLR_PHY)
6373 #if defined(CONFIG_BT_CTLR_LE_ENC)
6374 			} else if (((((conn->llcp_req - conn->llcp_ack) &
6375 				      0x03) == 0x02) &&
6376 				    (conn->llcp_type != LLCP_ENCRYPTION)) ||
6377 				   (conn->llcp_phy.req != conn->llcp_phy.ack)) {
6378 #else /* !CONFIG_BT_CTLR_LE_ENC */
6379 			} else if ((((conn->llcp_req - conn->llcp_ack) &
6380 				     0x03) == 0x02) &&
6381 				   (conn->llcp_phy.req != conn->llcp_phy.ack)) {
6382 #endif /* !CONFIG_BT_CTLR_LE_ENC */
6383 #else /* !CONFIG_BT_CTLR_PHY */
6384 #if defined(CONFIG_BT_CTLR_LE_ENC)
6385 			} else if ((((conn->llcp_req - conn->llcp_ack) &
6386 				     0x03) == 0x02) &&
6387 				   (conn->llcp_type != LLCP_ENCRYPTION)) {
6388 #else /* !CONFIG_BT_CTLR_LE_ENC */
6389 			} else if (((conn->llcp_req - conn->llcp_ack) &
6390 				      0x03) == 0x02) {
6391 #endif /* !CONFIG_BT_CTLR_LE_ENC */
6392 #endif /* !CONFIG_BT_CTLR_PHY */
6393 				/* Different procedure collision */
6394 				nack = reject_ext_ind_send(conn, *rx,
6395 					PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ,
6396 					BT_HCI_ERR_DIFF_TRANS_COLLISION);
6397 			} else {
6398 				struct pdu_data_llctrl_conn_param_req *cpr = (void *)
6399 					&pdu_rx->llctrl.conn_param_req;
6400 				struct lll_conn *lll = &conn->lll;
6401 
6402 				/* Extract parameters */
6403 				uint16_t interval_min =
6404 					sys_le16_to_cpu(cpr->interval_min);
6405 				uint16_t interval_max =
6406 					sys_le16_to_cpu(cpr->interval_max);
6407 				uint16_t latency =
6408 					sys_le16_to_cpu(cpr->latency);
6409 				uint16_t timeout =
6410 					sys_le16_to_cpu(cpr->timeout);
6411 				uint16_t preferred_periodicity =
6412 					cpr->preferred_periodicity;
6413 
6414 				/* Invalid parameters */
6415 				if ((interval_min < CONN_INTERVAL_MIN(conn)) ||
6416 				    (interval_max > 3200) ||
6417 				    (interval_min > interval_max) ||
6418 				    (latency > 499) ||
6419 				    (timeout < 10) ||
6420 				    (timeout > 3200) ||
6421 				    ((timeout * 4U) <=
6422 				     ((latency + 1) * interval_max)) ||
6423 				    (preferred_periodicity > interval_max)) {
6424 					nack = reject_ext_ind_send(conn, *rx,
6425 						PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ,
6426 						BT_HCI_ERR_INVALID_LL_PARAM);
6427 					break;
6428 				}
6429 
6430 				/* save parameters to be used to select offset
6431 				 */
6432 				conn->llcp_conn_param.interval_min =
6433 					interval_min;
6434 				conn->llcp_conn_param.interval_max =
6435 					interval_max;
6436 				conn->llcp_conn_param.latency =	latency;
6437 				conn->llcp_conn_param.timeout =	timeout;
6438 				conn->llcp_conn_param.preferred_periodicity =
6439 					preferred_periodicity;
6440 				conn->llcp_conn_param.reference_conn_event_count =
6441 					sys_le16_to_cpu(cpr->reference_conn_event_count);
6442 				conn->llcp_conn_param.offset0 =
6443 					sys_le16_to_cpu(cpr->offset0);
6444 				conn->llcp_conn_param.offset1 =
6445 					sys_le16_to_cpu(cpr->offset1);
6446 				conn->llcp_conn_param.offset2 =
6447 					sys_le16_to_cpu(cpr->offset2);
6448 				conn->llcp_conn_param.offset3 =
6449 					sys_le16_to_cpu(cpr->offset3);
6450 				conn->llcp_conn_param.offset4 =
6451 					sys_le16_to_cpu(cpr->offset4);
6452 				conn->llcp_conn_param.offset5 =
6453 					sys_le16_to_cpu(cpr->offset5);
6454 
6455 				/* enqueue the conn param req, if parameters
6456 				 * changed, else respond.
6457 				 */
6458 				if ((conn->llcp_conn_param.interval_max !=
6459 				     lll->interval) ||
6460 				    (conn->llcp_conn_param.latency !=
6461 				     lll->latency) ||
6462 				    (RADIO_CONN_EVENTS(conn->llcp_conn_param.timeout *
6463 						       10000U,
6464 						       lll->interval *
6465 						       CONN_INT_UNIT_US) !=
6466 				     conn->supervision_reload)) {
6467 #if defined(CONFIG_BT_CTLR_LE_ENC)
6468 					/* postpone CP request event if under
6469 					 * encryption setup
6470 					 */
6471 					if (conn->llcp_enc.pause_tx) {
6472 						conn->llcp_conn_param.state =
6473 							LLCP_CPR_STATE_APP_REQ;
6474 
6475 						/* Mark for buffer for release */
6476 						(*rx)->hdr.type =
6477 							NODE_RX_TYPE_RELEASE;
6478 					} else
6479 #endif /* CONFIG_BT_CTLR_LE_ENC */
6480 					{
6481 						conn->llcp_conn_param.state =
6482 							LLCP_CPR_STATE_APP_WAIT;
6483 					}
6484 				} else {
6485 					conn->llcp_conn_param.status = 0U;
6486 					conn->llcp_conn_param.cmd = 0U;
6487 					conn->llcp_conn_param.state =
6488 						LLCP_CPR_STATE_RSP;
6489 
6490 					/* Mark for buffer for release */
6491 					(*rx)->hdr.type =
6492 						NODE_RX_TYPE_RELEASE;
6493 				}
6494 
6495 				conn->llcp_conn_param.ack--;
6496 
6497 				/* Set CPR mutex */
6498 				cpr_active_check_and_set(conn);
6499 			}
6500 		} else if ((conn->llcp_conn_param.req ==
6501 			    conn->llcp_conn_param.ack) ||
6502 			   (conn->llcp_conn_param.state ==
6503 			    LLCP_CPR_STATE_REQ) ||
6504 			   (conn->llcp_conn_param.state ==
6505 			    LLCP_CPR_STATE_RSP_WAIT)) {
6506 			struct pdu_data_llctrl_conn_param_req *cpr = (void *)
6507 				&pdu_rx->llctrl.conn_param_req;
6508 			struct lll_conn *lll = &conn->lll;
6509 
6510 			/* Extract parameters */
6511 			uint16_t interval_min = sys_le16_to_cpu(cpr->interval_min);
6512 			uint16_t interval_max = sys_le16_to_cpu(cpr->interval_max);
6513 			uint16_t latency = sys_le16_to_cpu(cpr->latency);
6514 			uint16_t timeout = sys_le16_to_cpu(cpr->timeout);
6515 			uint16_t preferred_periodicity =
6516 				cpr->preferred_periodicity;
6517 
6518 			/* Invalid parameters */
6519 			if ((interval_min < CONN_INTERVAL_MIN(conn)) ||
6520 			    (interval_max > 3200) ||
6521 			    (interval_min > interval_max) ||
6522 			    (latency > 499) ||
6523 			    (timeout < 10) || (timeout > 3200) ||
6524 			    ((timeout * 4U) <=
6525 			     ((latency + 1) * interval_max)) ||
6526 			    (preferred_periodicity > interval_max)) {
6527 				nack = reject_ext_ind_send(conn, *rx,
6528 					PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ,
6529 					BT_HCI_ERR_INVALID_LL_PARAM);
6530 				break;
6531 			}
6532 
6533 			/* resp to be generated by app, for now save
6534 			 * parameters
6535 			 */
6536 			conn->llcp_conn_param.interval_min = interval_min;
6537 			conn->llcp_conn_param.interval_max = interval_max;
6538 			conn->llcp_conn_param.latency =	latency;
6539 			conn->llcp_conn_param.timeout =	timeout;
6540 			conn->llcp_conn_param.preferred_periodicity =
6541 				preferred_periodicity;
6542 			conn->llcp_conn_param.reference_conn_event_count =
6543 				sys_le16_to_cpu(cpr->reference_conn_event_count);
6544 			conn->llcp_conn_param.offset0 =
6545 				sys_le16_to_cpu(cpr->offset0);
6546 			conn->llcp_conn_param.offset1 =
6547 				sys_le16_to_cpu(cpr->offset1);
6548 			conn->llcp_conn_param.offset2 =
6549 				sys_le16_to_cpu(cpr->offset2);
6550 			conn->llcp_conn_param.offset3 =
6551 				sys_le16_to_cpu(cpr->offset3);
6552 			conn->llcp_conn_param.offset4 =
6553 				sys_le16_to_cpu(cpr->offset4);
6554 			conn->llcp_conn_param.offset5 =
6555 				sys_le16_to_cpu(cpr->offset5);
6556 
6557 			/* enqueue the conn param req, if parameters changed,
6558 			 * else respond
6559 			 */
6560 			if ((conn->llcp_conn_param.interval_max !=
6561 			     lll->interval) ||
6562 			    (conn->llcp_conn_param.latency != lll->latency) ||
6563 			    (RADIO_CONN_EVENTS(conn->llcp_conn_param.timeout *
6564 					       10000U,
6565 					       lll->interval *
6566 					       CONN_INT_UNIT_US) !=
6567 			     conn->supervision_reload)) {
6568 				conn->llcp_conn_param.state =
6569 					LLCP_CPR_STATE_APP_WAIT;
6570 			} else {
6571 				conn->llcp_conn_param.status = 0U;
6572 				conn->llcp_conn_param.cmd = 0U;
6573 				conn->llcp_conn_param.state =
6574 					LLCP_CPR_STATE_RSP;
6575 
6576 				/* Mark for buffer for release */
6577 				(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
6578 			}
6579 
6580 			conn->llcp_conn_param.ack--;
6581 
6582 			/* Set CPR mutex */
6583 			cpr_active_check_and_set(conn);
6584 		} else {
6585 			/* Ignore duplicate request as peripheral is busy
6586 			 * processing the previously initiated connection
6587 			 * update request procedure.
6588 			 */
6589 			/* Mark for buffer for release */
6590 			(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
6591 		}
6592 		break;
6593 
6594 #if defined(CONFIG_BT_CENTRAL)
6595 	case PDU_DATA_LLCTRL_TYPE_CONN_PARAM_RSP:
6596 		if (conn->lll.role ||
6597 		    PDU_DATA_LLCTRL_LEN(conn_param_rsp) != pdu_rx->len) {
6598 			goto ull_conn_rx_unknown_rsp_send;
6599 		}
6600 
6601 		if (!conn->lll.role &&
6602 		    (conn->llcp_conn_param.req !=
6603 		     conn->llcp_conn_param.ack) &&
6604 		    (conn->llcp_conn_param.state ==
6605 		     LLCP_CPR_STATE_RSP_WAIT)) {
6606 			struct pdu_data_llctrl_conn_param_req *cpr = (void *)
6607 				&pdu_rx->llctrl.conn_param_req;
6608 
6609 			/* Extract parameters */
6610 			uint16_t interval_min = sys_le16_to_cpu(cpr->interval_min);
6611 			uint16_t interval_max = sys_le16_to_cpu(cpr->interval_max);
6612 			uint16_t latency = sys_le16_to_cpu(cpr->latency);
6613 			uint16_t timeout = sys_le16_to_cpu(cpr->timeout);
6614 			uint16_t preferred_periodicity =
6615 				cpr->preferred_periodicity;
6616 
6617 			/* Invalid parameters */
6618 			if ((interval_min < CONN_INTERVAL_MIN(conn)) ||
6619 			    (interval_max > 3200) ||
6620 			    (interval_min > interval_max) ||
6621 			    (latency > 499) ||
6622 			    (timeout < 10) || (timeout > 3200) ||
6623 			    ((timeout * 4U) <=
6624 			     ((latency + 1) * interval_max)) ||
6625 			    (preferred_periodicity > interval_max)) {
6626 				nack = reject_ext_ind_send(conn, *rx,
6627 					PDU_DATA_LLCTRL_TYPE_CONN_PARAM_RSP,
6628 					BT_HCI_ERR_INVALID_LL_PARAM);
6629 				break;
6630 			}
6631 
6632 			/* Stop procedure timeout */
6633 			conn->procedure_expire = 0U;
6634 
6635 			/* save parameters to be used to select offset
6636 			 */
6637 			conn->llcp_conn_param.interval_min = interval_min;
6638 			conn->llcp_conn_param.interval_max = interval_max;
6639 			conn->llcp_conn_param.latency =	latency;
6640 			conn->llcp_conn_param.timeout =	timeout;
6641 			conn->llcp_conn_param.preferred_periodicity =
6642 				preferred_periodicity;
6643 			conn->llcp_conn_param.reference_conn_event_count =
6644 				sys_le16_to_cpu(cpr->reference_conn_event_count);
6645 			conn->llcp_conn_param.offset0 =
6646 				sys_le16_to_cpu(cpr->offset0);
6647 			conn->llcp_conn_param.offset1 =
6648 				sys_le16_to_cpu(cpr->offset1);
6649 			conn->llcp_conn_param.offset2 =
6650 				sys_le16_to_cpu(cpr->offset2);
6651 			conn->llcp_conn_param.offset3 =
6652 				sys_le16_to_cpu(cpr->offset3);
6653 			conn->llcp_conn_param.offset4 =
6654 				sys_le16_to_cpu(cpr->offset4);
6655 			conn->llcp_conn_param.offset5 =
6656 				sys_le16_to_cpu(cpr->offset5);
6657 
6658 			/* Perform connection update */
6659 			conn->llcp_conn_param.state = LLCP_CPR_STATE_RSP;
6660 		}
6661 
6662 		/* Mark for buffer for release */
6663 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
6664 
6665 		break;
6666 #endif /* CONFIG_BT_CENTRAL */
6667 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
6668 
6669 	case PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND:
6670 		if (PDU_DATA_LLCTRL_LEN(reject_ext_ind) != pdu_rx->len) {
6671 			goto ull_conn_rx_unknown_rsp_send;
6672 		}
6673 
6674 		reject_ext_ind_recv(conn, *rx, pdu_rx);
6675 		break;
6676 
6677 #if defined(CONFIG_BT_CTLR_LE_PING)
6678 	case PDU_DATA_LLCTRL_TYPE_PING_REQ:
6679 		if (PDU_DATA_LLCTRL_LEN(ping_req) != pdu_rx->len) {
6680 			goto ull_conn_rx_unknown_rsp_send;
6681 		}
6682 
6683 		nack = ping_resp_send(conn, *rx);
6684 		break;
6685 
6686 	case PDU_DATA_LLCTRL_TYPE_PING_RSP:
6687 		if (PDU_DATA_LLCTRL_LEN(ping_rsp) != pdu_rx->len) {
6688 			goto ull_conn_rx_unknown_rsp_send;
6689 		}
6690 
6691 		/* Procedure complete */
6692 		conn->procedure_expire = 0U;
6693 
6694 		/* Mark for buffer for release */
6695 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
6696 
6697 		break;
6698 #endif /* CONFIG_BT_CTLR_LE_PING */
6699 
6700 	case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
6701 		if (PDU_DATA_LLCTRL_LEN(unknown_rsp) != pdu_rx->len) {
6702 			goto ull_conn_rx_unknown_rsp_send;
6703 		}
6704 
6705 		struct pdu_data_llctrl *llctrl = (void *)&pdu_rx->llctrl;
6706 
6707 		if (0) {
6708 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
6709 		} else if ((conn->llcp_conn_param.ack !=
6710 			    conn->llcp_conn_param.req) &&
6711 			   (llctrl->unknown_rsp.type ==
6712 			    PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ)) {
6713 			struct lll_conn *lll = &conn->lll;
6714 			struct node_rx_cu *cu;
6715 
6716 			/* Mark CPR as unsupported */
6717 			conn->llcp_conn_param.disabled = 1U;
6718 
6719 			/* TODO: check for unsupported remote feature reason */
6720 			if (!conn->lll.role) {
6721 				LL_ASSERT(conn->llcp_cu.req ==
6722 					  conn->llcp_cu.ack);
6723 
6724 				conn->llcp_conn_param.state =
6725 					LLCP_CPR_STATE_UPD;
6726 
6727 				conn->llcp_cu.win_size = 1U;
6728 				conn->llcp_cu.win_offset_us = 0U;
6729 				conn->llcp_cu.interval =
6730 					conn->llcp_conn_param.interval_max;
6731 				conn->llcp_cu.latency =
6732 					conn->llcp_conn_param.latency;
6733 				conn->llcp_cu.timeout =
6734 					conn->llcp_conn_param.timeout;
6735 				conn->llcp_cu.state = LLCP_CUI_STATE_USE;
6736 				conn->llcp_cu.cmd = conn->llcp_conn_param.cmd;
6737 				conn->llcp_cu.ack--;
6738 
6739 				/* Mark for buffer for release */
6740 				(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
6741 
6742 				break;
6743 			}
6744 
6745 			/* Reset CPR mutex */
6746 			cpr_active_reset();
6747 
6748 			/* Procedure complete */
6749 			conn->llcp_conn_param.ack = conn->llcp_conn_param.req;
6750 
6751 			/* skip event generation if not cmd initiated */
6752 			if (!conn->llcp_conn_param.cmd) {
6753 				/* Mark for buffer for release */
6754 				(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
6755 
6756 				break;
6757 			}
6758 
6759 			/* generate conn upd complete event with error code */
6760 			(*rx)->hdr.type = NODE_RX_TYPE_CONN_UPDATE;
6761 
6762 			/* prepare connection update complete structure */
6763 			cu = (void *)pdu_rx;
6764 			cu->status = BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
6765 			cu->interval = lll->interval;
6766 			cu->latency = lll->latency;
6767 			cu->timeout = conn->supervision_reload *
6768 				      lll->interval * 125U / 1000;
6769 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
6770 
6771 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
6772 		} else if ((conn->llcp_length.req != conn->llcp_length.ack) &&
6773 			   (llctrl->unknown_rsp.type ==
6774 			    PDU_DATA_LLCTRL_TYPE_LENGTH_REQ)) {
6775 			/* Mark length update as unsupported */
6776 			conn->llcp_length.disabled = 1U;
6777 
6778 			/* Procedure complete */
6779 			conn->llcp_length.ack = conn->llcp_length.req;
6780 
6781 			/* propagate the data length procedure to
6782 			 * host
6783 			 */
6784 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
6785 
6786 #if defined(CONFIG_BT_CTLR_PHY)
6787 		} else if ((conn->llcp_phy.req != conn->llcp_phy.ack) &&
6788 			   (llctrl->unknown_rsp.type ==
6789 			    PDU_DATA_LLCTRL_TYPE_PHY_REQ)) {
6790 			struct lll_conn *lll = &conn->lll;
6791 
6792 			/* Mark phy update as unsupported */
6793 			conn->llcp_phy.disabled = 1U;
6794 
6795 			/* Procedure complete */
6796 			conn->llcp_phy.ack = conn->llcp_phy.req;
6797 			conn->llcp_phy.pause_tx = 0U;
6798 
6799 			/* Reset packet timing restrictions */
6800 			lll->phy_tx_time = lll->phy_tx;
6801 
6802 			/* skip event generation is not cmd initiated */
6803 			if (conn->llcp_phy.cmd) {
6804 				struct node_rx_pu *p;
6805 
6806 				/* generate phy update complete event */
6807 				(*rx)->hdr.type = NODE_RX_TYPE_PHY_UPDATE;
6808 
6809 				p = (void *)pdu_rx;
6810 				p->status = 0U;
6811 				p->tx = lll->phy_tx;
6812 				p->rx = lll->phy_rx;
6813 			} else {
6814 				/* Mark for buffer for release */
6815 				(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
6816 			}
6817 #endif /* CONFIG_BT_CTLR_PHY */
6818 
6819 		} else {
6820 			switch (llctrl->unknown_rsp.type) {
6821 
6822 #if defined(CONFIG_BT_CTLR_LE_PING)
6823 			case PDU_DATA_LLCTRL_TYPE_PING_REQ:
6824 				/* unknown rsp to LE Ping Req completes the
6825 				 * procedure; nothing to do here.
6826 				 */
6827 
6828 				/* Mark for buffer for release */
6829 				(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
6830 				break;
6831 #endif /* CONFIG_BT_CTLR_LE_PING */
6832 
6833 			default:
6834 				/* TODO: enqueue the error and let HCI handle
6835 				 *       it.
6836 				 */
6837 				break;
6838 			}
6839 		}
6840 
6841 		/* Procedure complete */
6842 		conn->procedure_expire = 0U;
6843 		break;
6844 
6845 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
6846 	case PDU_DATA_LLCTRL_TYPE_LENGTH_RSP:
6847 	case PDU_DATA_LLCTRL_TYPE_LENGTH_REQ:
6848 		if (PDU_DATA_LLCTRL_LEN(length_req) != pdu_rx->len) {
6849 			goto ull_conn_rx_unknown_rsp_send;
6850 		}
6851 
6852 		nack = length_req_rsp_recv(conn, link, rx, pdu_rx);
6853 		break;
6854 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
6855 
6856 #if defined(CONFIG_BT_CTLR_PHY)
6857 	case PDU_DATA_LLCTRL_TYPE_PHY_REQ:
6858 		if (PDU_DATA_LLCTRL_LEN(phy_req) != pdu_rx->len) {
6859 			goto ull_conn_rx_unknown_rsp_send;
6860 		}
6861 
6862 		if (!conn->lll.role) {
6863 			if ((conn->llcp_phy.ack !=
6864 			     conn->llcp_phy.req) &&
6865 			    ((conn->llcp_phy.state ==
6866 			      LLCP_PHY_STATE_ACK_WAIT) ||
6867 			     (conn->llcp_phy.state ==
6868 			      LLCP_PHY_STATE_RSP_WAIT) ||
6869 			     (conn->llcp_phy.state ==
6870 			      LLCP_PHY_STATE_UPD))) {
6871 				/* Same procedure collision  */
6872 				nack = reject_ext_ind_send(conn, *rx,
6873 					PDU_DATA_LLCTRL_TYPE_PHY_REQ,
6874 					BT_HCI_ERR_LL_PROC_COLLISION);
6875 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
6876 #if defined(CONFIG_BT_CTLR_LE_ENC)
6877 			} else if (((((conn->llcp_req - conn->llcp_ack) &
6878 				      0x03) == 0x02) &&
6879 				    (conn->llcp_type !=
6880 				     LLCP_ENCRYPTION)) ||
6881 				   (conn->llcp_conn_param.req !=
6882 				    conn->llcp_conn_param.ack)) {
6883 #else /* !CONFIG_BT_CTLR_LE_ENC */
6884 			} else if ((((conn->llcp_req - conn->llcp_ack) &
6885 				     0x03) == 0x02) &&
6886 				   (conn->llcp_conn_param.req !=
6887 				    conn->llcp_conn_param.ack)) {
6888 #endif /* !CONFIG_BT_CTLR_LE_ENC */
6889 #else /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
6890 #if defined(CONFIG_BT_CTLR_LE_ENC)
6891 			} else if ((((conn->llcp_req - conn->llcp_ack) &
6892 				     0x03) == 0x02) &&
6893 				   (conn->llcp_type !=
6894 				    LLCP_ENCRYPTION)) {
6895 #else /* !CONFIG_BT_CTLR_LE_ENC */
6896 			} else if (((conn->llcp_req - conn->llcp_ack) &
6897 				    0x03) == 0x02) {
6898 #endif /* !CONFIG_BT_CTLR_LE_ENC */
6899 #endif /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
6900 				/* Different procedure collision */
6901 				nack = reject_ext_ind_send(conn, *rx,
6902 					PDU_DATA_LLCTRL_TYPE_PHY_REQ,
6903 					BT_HCI_ERR_DIFF_TRANS_COLLISION);
6904 			} else {
6905 				struct pdu_data_llctrl *c = &pdu_rx->llctrl;
6906 				struct pdu_data_llctrl_phy_req *p =
6907 					&c->phy_req;
6908 
6909 				conn->llcp_phy.state =
6910 					LLCP_PHY_STATE_UPD;
6911 
6912 				if (conn->llcp_phy.ack ==
6913 				    conn->llcp_phy.req) {
6914 					conn->llcp_phy.ack--;
6915 
6916 					conn->llcp_phy.cmd = 0U;
6917 
6918 					conn->llcp_phy.tx =
6919 						conn->phy_pref_tx;
6920 					conn->llcp_phy.rx =
6921 						conn->phy_pref_rx;
6922 				}
6923 
6924 				conn->llcp_phy.tx &= p->rx_phys;
6925 				conn->llcp_phy.rx &= p->tx_phys;
6926 
6927 				if (!conn->llcp_phy.tx || !conn->llcp_phy.rx) {
6928 					conn->llcp_phy.tx = 0;
6929 					conn->llcp_phy.rx = 0;
6930 				}
6931 
6932 				/* pause data packet tx */
6933 				conn->llcp_phy.pause_tx = 1U;
6934 
6935 				/* Mark for buffer for release */
6936 				(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
6937 			}
6938 		} else {
6939 			nack = phy_rsp_send(conn, *rx, pdu_rx);
6940 		}
6941 		break;
6942 
6943 #if defined(CONFIG_BT_CENTRAL)
6944 	case PDU_DATA_LLCTRL_TYPE_PHY_RSP:
6945 		if (conn->lll.role ||
6946 		    PDU_DATA_LLCTRL_LEN(phy_rsp) != pdu_rx->len) {
6947 			goto ull_conn_rx_unknown_rsp_send;
6948 		}
6949 
6950 		if (!conn->lll.role &&
6951 		    (conn->llcp_phy.ack != conn->llcp_phy.req) &&
6952 		    (conn->llcp_phy.state == LLCP_PHY_STATE_RSP_WAIT)) {
6953 			struct pdu_data_llctrl_phy_rsp *p =
6954 				&pdu_rx->llctrl.phy_rsp;
6955 
6956 			conn->llcp_phy.state = LLCP_PHY_STATE_UPD;
6957 
6958 			conn->llcp_phy.tx &= p->rx_phys;
6959 			conn->llcp_phy.rx &= p->tx_phys;
6960 
6961 			if (!conn->llcp_phy.tx || !conn->llcp_phy.rx) {
6962 				conn->llcp_phy.tx = 0;
6963 				conn->llcp_phy.rx = 0;
6964 			}
6965 
6966 			/* pause data packet tx */
6967 			conn->llcp_phy.pause_tx = 1U;
6968 
6969 			/* Procedure timeout is stopped */
6970 			conn->procedure_expire = 0U;
6971 		}
6972 
6973 		/* Mark for buffer for release */
6974 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
6975 
6976 		break;
6977 #endif /* CONFIG_BT_CENTRAL */
6978 
6979 #if defined(CONFIG_BT_PERIPHERAL)
6980 	case PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND:
6981 	{
6982 		uint8_t err;
6983 
6984 		if (!conn->lll.role ||
6985 		    PDU_DATA_LLCTRL_LEN(phy_upd_ind) != pdu_rx->len) {
6986 			goto ull_conn_rx_unknown_rsp_send;
6987 		}
6988 
6989 		err = phy_upd_ind_recv(conn, link, rx, pdu_rx);
6990 		if (err) {
6991 			conn->llcp_terminate.reason_final = err;
6992 		}
6993 	}
6994 	break;
6995 #endif /* CONFIG_BT_PERIPHERAL */
6996 #endif /* CONFIG_BT_CTLR_PHY */
6997 
6998 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
6999 #if defined(CONFIG_BT_CENTRAL)
7000 	case PDU_DATA_LLCTRL_TYPE_MIN_USED_CHAN_IND:
7001 		if (conn->lll.role ||
7002 		    PDU_DATA_LLCTRL_LEN(min_used_chans_ind) != pdu_rx->len) {
7003 			goto ull_conn_rx_unknown_rsp_send;
7004 		}
7005 
7006 		if (!conn->lll.role) {
7007 			struct pdu_data_llctrl_min_used_chans_ind *p =
7008 				&pdu_rx->llctrl.min_used_chans_ind;
7009 
7010 #if defined(CONFIG_BT_CTLR_PHY)
7011 			if (!(p->phys & (conn->lll.phy_tx |
7012 					 conn->lll.phy_rx))) {
7013 #else /* !CONFIG_BT_CTLR_PHY */
7014 			if (!(p->phys & 0x01)) {
7015 #endif /* !CONFIG_BT_CTLR_PHY */
7016 				break;
7017 			}
7018 
7019 			if (((conn->llcp_req - conn->llcp_ack) & 0x03) ==
7020 			    0x02) {
7021 				break;
7022 			}
7023 
7024 			ull_chan_map_get(conn->llcp.chan_map.chm);
7025 			/* conn->llcp.chan_map.instant     = 0; */
7026 			conn->llcp.chan_map.initiate = 1U;
7027 
7028 			conn->llcp_type = LLCP_CHAN_MAP;
7029 			conn->llcp_ack -= 2U;
7030 		}
7031 
7032 		/* Mark for buffer for release */
7033 		(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
7034 
7035 		break;
7036 #endif  /* CONFIG_BT_CENTRAL */
7037 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */
7038 
7039 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
7040 	case PDU_DATA_LLCTRL_TYPE_CIS_REQ:
7041 	{
7042 		uint8_t err;
7043 
7044 		if (!conn->lll.role ||
7045 		    PDU_DATA_LLCTRL_LEN(cis_req) != pdu_rx->len) {
7046 			goto ull_conn_rx_unknown_rsp_send;
7047 		}
7048 
7049 		err = cis_req_recv(conn, link, rx, pdu_rx);
7050 		if (err) {
7051 			conn->llcp_terminate.reason_final = err;
7052 		}
7053 		break;
7054 	}
7055 
7056 	case PDU_DATA_LLCTRL_TYPE_CIS_IND:
7057 	{
7058 		uint8_t err;
7059 
7060 		if (!conn->lll.role ||
7061 		    PDU_DATA_LLCTRL_LEN(cis_ind) != pdu_rx->len) {
7062 			goto ull_conn_rx_unknown_rsp_send;
7063 		}
7064 
7065 		err = cis_ind_recv(conn, link, rx, pdu_rx);
7066 		if (err) {
7067 			conn->llcp_terminate.reason_final = err;
7068 		}
7069 		break;
7070 	}
7071 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
7072 
7073 	default:
7074 ull_conn_rx_unknown_rsp_send:
7075 		nack = unknown_rsp_send(conn, *rx, opcode);
7076 		break;
7077 	}
7078 
7079 	return nack;
7080 }
7081 
7082 #if defined(CONFIG_BT_CTLR_FORCE_MD_AUTO)
7083 static uint8_t force_md_cnt_calc(struct lll_conn *lll_conn, uint32_t tx_rate)
7084 {
7085 	uint32_t time_incoming, time_outgoing;
7086 	uint8_t force_md_cnt;
7087 	uint8_t phy_flags;
7088 	uint8_t mic_size;
7089 	uint8_t phy;
7090 
7091 #if defined(CONFIG_BT_CTLR_PHY)
7092 	phy = lll_conn->phy_tx;
7093 	phy_flags = lll_conn->phy_flags;
7094 #else /* !CONFIG_BT_CTLR_PHY */
7095 	phy = PHY_1M;
7096 	phy_flags = 0U;
7097 #endif /* !CONFIG_BT_CTLR_PHY */
7098 
7099 #if defined(CONFIG_BT_CTLR_LE_ENC)
7100 	mic_size = PDU_MIC_SIZE * lll_conn->enc_tx;
7101 #else /* !CONFIG_BT_CTLR_LE_ENC */
7102 	mic_size = 0U;
7103 #endif /* !CONFIG_BT_CTLR_LE_ENC */
7104 
7105 	time_incoming = (LL_LENGTH_OCTETS_RX_MAX << 3) *
7106 			1000000UL / tx_rate;
7107 	time_outgoing = PDU_DC_US(LL_LENGTH_OCTETS_RX_MAX, mic_size, phy,
7108 				  phy_flags) +
7109 			PDU_DC_US(0U, 0U, phy, PHY_FLAGS_S8) +
7110 			(EVENT_IFS_US << 1);
7111 
7112 	force_md_cnt = 0U;
7113 	if (time_incoming > time_outgoing) {
7114 		uint32_t delta;
7115 		uint32_t time_keep_alive;
7116 
7117 		delta = (time_incoming << 1) - time_outgoing;
7118 		time_keep_alive = (PDU_DC_US(0U, 0U, phy, PHY_FLAGS_S8) +
7119 				   EVENT_IFS_US) << 1;
7120 		force_md_cnt = (delta + (time_keep_alive - 1)) /
7121 			       time_keep_alive;
7122 		BT_DBG("Time: incoming= %u, expected outgoing= %u, delta= %u, "
7123 		       "keepalive= %u, force_md_cnt = %u.",
7124 		       time_incoming, time_outgoing, delta, time_keep_alive,
7125 		       force_md_cnt);
7126 	}
7127 
7128 	return force_md_cnt;
7129 }
7130 #endif /* CONFIG_BT_CTLR_FORCE_MD_AUTO */
7131