1 /*
2  * Copyright (c) 2020 Demant
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 
9 #include <zephyr/sys/byteorder.h>
10 #include <zephyr/sys/slist.h>
11 #include <zephyr/sys/util.h>
12 
13 #include <zephyr/bluetooth/hci_types.h>
14 
15 #include "hal/ccm.h"
16 
17 #include "util/util.h"
18 #include "util/mem.h"
19 #include "util/memq.h"
20 #include "util/dbuf.h"
21 #include "util/mayfly.h"
22 
23 #include "pdu_df.h"
24 #include "lll/pdu_vendor.h"
25 #include "pdu.h"
26 
27 #include "ll.h"
28 #include "ll_feat.h"
29 #include "ll_settings.h"
30 
31 #include "lll.h"
32 #include "lll_clock.h"
33 #include "lll/lll_df_types.h"
34 #include "lll_conn.h"
35 #include "lll_conn_iso.h"
36 #include "lll_sync.h"
37 #include "lll_sync_iso.h"
38 #include "lll_scan.h"
39 #include "lll/lll_adv_types.h"
40 #include "lll_adv.h"
41 #include "lll/lll_adv_pdu.h"
42 
43 #include "ull_tx_queue.h"
44 
45 #include "isoal.h"
46 #include "ull_iso_types.h"
47 #include "ull_sync_types.h"
48 #include "ull_scan_types.h"
49 #include "ull_adv_types.h"
50 #include "ull_adv_internal.h"
51 #include "ull_conn_iso_types.h"
52 #include "ull_conn_iso_internal.h"
53 #include "ull_central_iso_internal.h"
54 
55 #include "ull_internal.h"
56 #include "ull_conn_types.h"
57 #include "ull_conn_internal.h"
58 #include "ull_llcp.h"
59 #include "ull_llcp_features.h"
60 #include "ull_llcp_internal.h"
61 #include "ull_peripheral_internal.h"
62 #include "ull_sync_internal.h"
63 
64 #include "ull_filter.h"
65 
66 #include <soc.h>
67 #include "hal/debug.h"
68 
69 #define LLCTRL_PDU_SIZE (offsetof(struct pdu_data, llctrl) + sizeof(struct pdu_data_llctrl))
70 #define PROC_CTX_BUF_SIZE WB_UP(sizeof(struct proc_ctx))
71 #define TX_CTRL_BUF_SIZE WB_UP(offsetof(struct node_tx, pdu) + LLCTRL_PDU_SIZE)
72 #define NTF_BUF_SIZE WB_UP(offsetof(struct node_rx_pdu, pdu) + LLCTRL_PDU_SIZE)
73 #define OFFSET_BASE_MAX_VALUE 0x1FFF
74 
75 /* LLCP Allocations */
76 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
77 sys_slist_t tx_buffer_wait_list;
78 static uint8_t common_tx_buffer_alloc;
79 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
80 
81 static uint8_t MALIGN(4) buffer_mem_tx[TX_CTRL_BUF_SIZE * LLCP_TX_CTRL_BUF_COUNT];
82 static struct llcp_mem_pool mem_tx = { .pool = buffer_mem_tx };
83 
84 static uint8_t MALIGN(4) buffer_mem_local_ctx[PROC_CTX_BUF_SIZE *
85 				    CONFIG_BT_CTLR_LLCP_LOCAL_PROC_CTX_BUF_NUM];
86 static struct llcp_mem_pool mem_local_ctx = { .pool = buffer_mem_local_ctx };
87 
88 static uint8_t MALIGN(4) buffer_mem_remote_ctx[PROC_CTX_BUF_SIZE *
89 				     CONFIG_BT_CTLR_LLCP_REMOTE_PROC_CTX_BUF_NUM];
90 static struct llcp_mem_pool mem_remote_ctx = { .pool = buffer_mem_remote_ctx };
91 
92 /*
93  * LLCP Resource Management
94  */
proc_ctx_acquire(struct llcp_mem_pool * owner)95 static struct proc_ctx *proc_ctx_acquire(struct llcp_mem_pool *owner)
96 {
97 	struct proc_ctx *ctx;
98 
99 	ctx = (struct proc_ctx *)mem_acquire(&owner->free);
100 
101 	if (ctx) {
102 		/* Set the owner */
103 		ctx->owner = owner;
104 	}
105 
106 	return ctx;
107 }
108 
llcp_proc_ctx_release(struct proc_ctx * ctx)109 void llcp_proc_ctx_release(struct proc_ctx *ctx)
110 {
111 	/* We need to have an owner otherwise the memory allocated would leak */
112 	LL_ASSERT(ctx->owner);
113 
114 	/* Release the memory back to the owner */
115 	mem_release(ctx, &ctx->owner->free);
116 }
117 
118 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
119 /*
120  * @brief Update 'global' tx buffer allowance
121  */
ull_cp_update_tx_buffer_queue(struct ll_conn * conn)122 void ull_cp_update_tx_buffer_queue(struct ll_conn *conn)
123 {
124 	if (conn->llcp.tx_buffer_alloc > CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM) {
125 		common_tx_buffer_alloc -= (conn->llcp.tx_buffer_alloc -
126 					   CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM);
127 	}
128 }
129 
130 
131 /*
132  * @brief Check for per conn pre-allocated tx buffer allowance
133  * @return true if buffer is available
134  */
static_tx_buffer_available(struct ll_conn * conn,struct proc_ctx * ctx)135 static inline bool static_tx_buffer_available(struct ll_conn *conn, struct proc_ctx *ctx)
136 {
137 #if (CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM > 0)
138 	/* Check if per connection pre-aloted tx buffer is available */
139 	if (conn->llcp.tx_buffer_alloc < CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM) {
140 		/* This connection has not yet used up all the pre-aloted tx buffers */
141 		return true;
142 	}
143 #endif /* CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM > 0 */
144 	return false;
145 }
146 
147 /*
148  * @brief pre-alloc/peek of a tx buffer, leave requester on the wait list (@head if first up)
149  *
150  * @return true if alloc is allowed, false if not
151  *
152  */
llcp_tx_alloc_peek(struct ll_conn * conn,struct proc_ctx * ctx)153 bool llcp_tx_alloc_peek(struct ll_conn *conn, struct proc_ctx *ctx)
154 {
155 	if (!static_tx_buffer_available(conn, ctx)) {
156 		/* The conn already has spent its pre-aloted tx buffer(s),
157 		 * so we should consider the common tx buffer pool
158 		 */
159 		if (ctx->wait_reason == WAITING_FOR_NOTHING) {
160 			/* The current procedure is not in line for a tx buffer
161 			 * so sign up on the wait list
162 			 */
163 			sys_slist_append(&tx_buffer_wait_list, &ctx->wait_node);
164 			ctx->wait_reason = WAITING_FOR_TX_BUFFER;
165 		}
166 
167 		/* Now check to see if this procedure context is @ head of the wait list */
168 		if (ctx->wait_reason == WAITING_FOR_TX_BUFFER &&
169 		    sys_slist_peek_head(&tx_buffer_wait_list) == &ctx->wait_node) {
170 			return (common_tx_buffer_alloc <
171 				CONFIG_BT_CTLR_LLCP_COMMON_TX_CTRL_BUF_NUM);
172 		}
173 
174 		return false;
175 	}
176 	return true;
177 }
178 
179 /*
180  * @brief un-peek of a tx buffer, in case ongoing alloc is aborted
181  *
182  */
llcp_tx_alloc_unpeek(struct proc_ctx * ctx)183 void llcp_tx_alloc_unpeek(struct proc_ctx *ctx)
184 {
185 	sys_slist_find_and_remove(&tx_buffer_wait_list, &ctx->wait_node);
186 	ctx->wait_reason = WAITING_FOR_NOTHING;
187 }
188 
189 /*
190  * @brief complete alloc of a tx buffer, must preceded by successful call to
191  * llcp_tx_alloc_peek()
192  *
193  * @return node_tx* that was peek'ed by llcp_tx_alloc_peek()
194  *
195  */
llcp_tx_alloc(struct ll_conn * conn,struct proc_ctx * ctx)196 struct node_tx *llcp_tx_alloc(struct ll_conn *conn, struct proc_ctx *ctx)
197 {
198 	conn->llcp.tx_buffer_alloc++;
199 #if (CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM > 0)
200 	if (conn->llcp.tx_buffer_alloc > CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM) {
201 		common_tx_buffer_alloc++;
202 		/* global buffer allocated, so we're at the head and should just pop head */
203 		sys_slist_get(&tx_buffer_wait_list);
204 	} else {
205 		/* we're allocating conn_tx_buffer, so remove from wait list if waiting */
206 		if (ctx->wait_reason == WAITING_FOR_TX_BUFFER) {
207 			sys_slist_find_and_remove(&tx_buffer_wait_list, &ctx->wait_node);
208 		}
209 	}
210 #else /* CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM > 0 */
211 	/* global buffer allocated, so remove head of wait list */
212 	common_tx_buffer_alloc++;
213 	sys_slist_get(&tx_buffer_wait_list);
214 #endif /* CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM > 0 */
215 	ctx->wait_reason = WAITING_FOR_NOTHING;
216 
217 	return (struct node_tx *)mem_acquire(&mem_tx.free);
218 }
219 #else /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
llcp_tx_alloc_peek(struct ll_conn * conn,struct proc_ctx * ctx)220 bool llcp_tx_alloc_peek(struct ll_conn *conn, struct proc_ctx *ctx)
221 {
222 	ARG_UNUSED(conn);
223 	return mem_tx.free != NULL;
224 }
225 
llcp_tx_alloc_unpeek(struct proc_ctx * ctx)226 void llcp_tx_alloc_unpeek(struct proc_ctx *ctx)
227 {
228 	/* Empty on purpose, as unpeek is not needed when no buffer queueing is used */
229 	ARG_UNUSED(ctx);
230 }
231 
llcp_tx_alloc(struct ll_conn * conn,struct proc_ctx * ctx)232 struct node_tx *llcp_tx_alloc(struct ll_conn *conn, struct proc_ctx *ctx)
233 {
234 	struct pdu_data *pdu;
235 	struct node_tx *tx;
236 
237 	ARG_UNUSED(conn);
238 	tx = (struct node_tx *)mem_acquire(&mem_tx.free);
239 
240 	pdu = (struct pdu_data *)tx->pdu;
241 	ull_pdu_data_init(pdu);
242 
243 	return tx;
244 }
245 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
246 
tx_release(struct node_tx * tx)247 static void tx_release(struct node_tx *tx)
248 {
249 	mem_release(tx, &mem_tx.free);
250 }
251 
llcp_ntf_alloc_is_available(void)252 bool llcp_ntf_alloc_is_available(void)
253 {
254 	return ll_pdu_rx_alloc_peek(1) != NULL;
255 }
256 
llcp_ntf_alloc_num_available(uint8_t count)257 bool llcp_ntf_alloc_num_available(uint8_t count)
258 {
259 	return ll_pdu_rx_alloc_peek(count) != NULL;
260 }
261 
llcp_ntf_alloc(void)262 struct node_rx_pdu *llcp_ntf_alloc(void)
263 {
264 	return ll_pdu_rx_alloc();
265 }
266 
267 /*
268  * ULL -> LLL Interface
269  */
270 
llcp_tx_enqueue(struct ll_conn * conn,struct node_tx * tx)271 void llcp_tx_enqueue(struct ll_conn *conn, struct node_tx *tx)
272 {
273 	ull_tx_q_enqueue_ctrl(&conn->tx_q, tx);
274 }
275 
llcp_tx_pause_data(struct ll_conn * conn,enum llcp_tx_q_pause_data_mask pause_mask)276 void llcp_tx_pause_data(struct ll_conn *conn, enum llcp_tx_q_pause_data_mask pause_mask)
277 {
278 	/* Only pause the TX Q if we have not already paused it (by any procedure) */
279 	if (conn->llcp.tx_q_pause_data_mask == 0) {
280 		ull_tx_q_pause_data(&conn->tx_q);
281 	}
282 
283 	/* Add the procedure that paused data */
284 	conn->llcp.tx_q_pause_data_mask |= pause_mask;
285 }
286 
llcp_tx_resume_data(struct ll_conn * conn,enum llcp_tx_q_pause_data_mask resume_mask)287 void llcp_tx_resume_data(struct ll_conn *conn, enum llcp_tx_q_pause_data_mask resume_mask)
288 {
289 	/* Remove the procedure that paused data */
290 	conn->llcp.tx_q_pause_data_mask &= ~resume_mask;
291 
292 	/* Only resume the TX Q if we have removed all procedures that paused data */
293 	if (conn->llcp.tx_q_pause_data_mask == 0) {
294 		ull_tx_q_resume_data(&conn->tx_q);
295 	}
296 }
297 
llcp_rx_node_retain(struct proc_ctx * ctx)298 void llcp_rx_node_retain(struct proc_ctx *ctx)
299 {
300 	LL_ASSERT(ctx->node_ref.rx);
301 
302 	/* Only retain if not already retained */
303 	if (ctx->node_ref.rx->hdr.type != NODE_RX_TYPE_RETAIN) {
304 		/* Mark RX node to NOT release */
305 		ctx->node_ref.rx->hdr.type = NODE_RX_TYPE_RETAIN;
306 
307 		/* store link element reference to use once this node is moved up */
308 		ctx->node_ref.rx->hdr.link = ctx->node_ref.link;
309 	}
310 }
311 
llcp_rx_node_release(struct proc_ctx * ctx)312 void llcp_rx_node_release(struct proc_ctx *ctx)
313 {
314 	LL_ASSERT(ctx->node_ref.rx);
315 
316 	/* Only release if retained */
317 	if (ctx->node_ref.rx->hdr.type == NODE_RX_TYPE_RETAIN) {
318 		/* Mark RX node to release and release */
319 		ctx->node_ref.rx->hdr.type = NODE_RX_TYPE_RELEASE;
320 		ll_rx_put_sched(ctx->node_ref.rx->hdr.link, ctx->node_ref.rx);
321 	}
322 }
323 
llcp_nodes_release(struct ll_conn * conn,struct proc_ctx * ctx)324 void llcp_nodes_release(struct ll_conn *conn, struct proc_ctx *ctx)
325 {
326 	if (ctx->node_ref.rx && ctx->node_ref.rx->hdr.type == NODE_RX_TYPE_RETAIN) {
327 		/* RX node retained, so release */
328 		ctx->node_ref.rx->hdr.link->mem = conn->llcp.rx_node_release;
329 		ctx->node_ref.rx->hdr.type = NODE_RX_TYPE_RELEASE;
330 		conn->llcp.rx_node_release = ctx->node_ref.rx;
331 	}
332 #if defined(CONFIG_BT_CTLR_PHY) && defined(CONFIG_BT_CTLR_DATA_LENGTH)
333 	if (ctx->proc == PROC_PHY_UPDATE && ctx->data.pu.ntf_dle_node) {
334 		/* RX node retained, so release */
335 		ctx->data.pu.ntf_dle_node->hdr.link->mem = conn->llcp.rx_node_release;
336 		ctx->data.pu.ntf_dle_node->hdr.type = NODE_RX_TYPE_RELEASE;
337 		conn->llcp.rx_node_release = ctx->data.pu.ntf_dle_node;
338 	}
339 #endif
340 
341 	if (ctx->node_ref.tx) {
342 		ctx->node_ref.tx->next = conn->llcp.tx_node_release;
343 		conn->llcp.tx_node_release = ctx->node_ref.tx;
344 	}
345 }
346 
347 /*
348  * LLCP Procedure Creation
349  */
350 
create_procedure(enum llcp_proc proc,struct llcp_mem_pool * ctx_pool)351 static struct proc_ctx *create_procedure(enum llcp_proc proc, struct llcp_mem_pool *ctx_pool)
352 {
353 	struct proc_ctx *ctx;
354 
355 	ctx = proc_ctx_acquire(ctx_pool);
356 	if (!ctx) {
357 		return NULL;
358 	}
359 
360 	ctx->proc = proc;
361 	ctx->done = 0U;
362 	ctx->rx_greedy = 0U;
363 	ctx->node_ref.rx = NULL;
364 	ctx->node_ref.tx_ack = NULL;
365 	ctx->state = LLCP_STATE_IDLE;
366 
367 	/* Clear procedure context data */
368 	memset((void *)&ctx->data, 0, sizeof(ctx->data));
369 
370 	/* Initialize opcodes fields to known values */
371 	ctx->rx_opcode = ULL_LLCP_INVALID_OPCODE;
372 	ctx->tx_opcode = ULL_LLCP_INVALID_OPCODE;
373 	ctx->response_opcode = ULL_LLCP_INVALID_OPCODE;
374 
375 	return ctx;
376 }
377 
llcp_create_local_procedure(enum llcp_proc proc)378 struct proc_ctx *llcp_create_local_procedure(enum llcp_proc proc)
379 {
380 	return create_procedure(proc, &mem_local_ctx);
381 }
382 
llcp_create_remote_procedure(enum llcp_proc proc)383 struct proc_ctx *llcp_create_remote_procedure(enum llcp_proc proc)
384 {
385 	return create_procedure(proc, &mem_remote_ctx);
386 }
387 
388 /*
389  * LLCP Public API
390  */
391 
ull_cp_init(void)392 void ull_cp_init(void)
393 {
394 	mem_init(mem_local_ctx.pool, PROC_CTX_BUF_SIZE,
395 		 CONFIG_BT_CTLR_LLCP_LOCAL_PROC_CTX_BUF_NUM,
396 		 &mem_local_ctx.free);
397 	mem_init(mem_remote_ctx.pool, PROC_CTX_BUF_SIZE,
398 		 CONFIG_BT_CTLR_LLCP_REMOTE_PROC_CTX_BUF_NUM,
399 		 &mem_remote_ctx.free);
400 	mem_init(mem_tx.pool, TX_CTRL_BUF_SIZE, LLCP_TX_CTRL_BUF_COUNT, &mem_tx.free);
401 
402 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
403 	/* Reset buffer alloc management */
404 	sys_slist_init(&tx_buffer_wait_list);
405 	common_tx_buffer_alloc = 0;
406 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
407 }
408 
ull_llcp_init(struct ll_conn * conn)409 void ull_llcp_init(struct ll_conn *conn)
410 {
411 	/* Reset local request fsm */
412 	llcp_lr_init(conn);
413 	sys_slist_init(&conn->llcp.local.pend_proc_list);
414 	conn->llcp.local.pause = 0U;
415 
416 	/* Reset remote request fsm */
417 	llcp_rr_init(conn);
418 	sys_slist_init(&conn->llcp.remote.pend_proc_list);
419 	conn->llcp.remote.pause = 0U;
420 	conn->llcp.remote.incompat = INCOMPAT_NO_COLLISION;
421 	conn->llcp.remote.collision = 0U;
422 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
423 	conn->llcp.remote.paused_cmd = PROC_NONE;
424 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
425 
426 	/* Reset the Procedure Response Timeout to be disabled,
427 	 * 'ull_cp_prt_reload_set' must be called to setup this value.
428 	 */
429 	conn->llcp.prt_reload = 0U;
430 
431 	/* Reset the cached version Information (PROC_VERSION_EXCHANGE) */
432 	memset(&conn->llcp.vex, 0, sizeof(conn->llcp.vex));
433 
434 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
435 	/* Reset the cached min used channels information (PROC_MIN_USED_CHANS) */
436 	memset(&conn->llcp.muc, 0, sizeof(conn->llcp.muc));
437 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */
438 
439 	/* Reset the feature exchange fields */
440 	memset(&conn->llcp.fex, 0, sizeof(conn->llcp.fex));
441 	conn->llcp.fex.features_used = ll_feat_get();
442 
443 #if defined(CONFIG_BT_CTLR_LE_ENC)
444 	/* Reset encryption related state */
445 	conn->lll.enc_tx = 0U;
446 	conn->lll.enc_rx = 0U;
447 #endif /* CONFIG_BT_CTLR_LE_ENC */
448 
449 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
450 	conn->llcp.cte_req.is_enabled = 0U;
451 	conn->llcp.cte_req.req_expire = 0U;
452 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
453 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
454 	conn->llcp.cte_rsp.is_enabled = 0U;
455 	conn->llcp.cte_rsp.is_active = 0U;
456 	conn->llcp.cte_rsp.disable_param = NULL;
457 	conn->llcp.cte_rsp.disable_cb = NULL;
458 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
459 
460 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
461 	conn->llcp.tx_buffer_alloc = 0;
462 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
463 
464 	conn->llcp.tx_q_pause_data_mask = 0;
465 	conn->lll.event_counter = 0;
466 
467 	conn->llcp.tx_node_release = NULL;
468 	conn->llcp.rx_node_release = NULL;
469 }
470 
ull_cp_release_tx(struct ll_conn * conn,struct node_tx * tx)471 void ull_cp_release_tx(struct ll_conn *conn, struct node_tx *tx)
472 {
473 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
474 	if (conn) {
475 		LL_ASSERT(conn->llcp.tx_buffer_alloc > 0);
476 		if (conn->llcp.tx_buffer_alloc > CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM) {
477 			common_tx_buffer_alloc--;
478 		}
479 		conn->llcp.tx_buffer_alloc--;
480 	}
481 #else /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
482 	ARG_UNUSED(conn);
483 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
484 	tx_release(tx);
485 }
486 
prt_elapse(uint16_t * expire,uint16_t elapsed_event)487 static int prt_elapse(uint16_t *expire, uint16_t elapsed_event)
488 {
489 	if (*expire != 0U) {
490 		if (*expire > elapsed_event) {
491 			*expire -= elapsed_event;
492 		} else {
493 			/* Timer expired */
494 			return -ETIMEDOUT;
495 		}
496 	}
497 
498 	/* Timer still running */
499 	return 0;
500 }
501 
ull_cp_prt_elapse(struct ll_conn * conn,uint16_t elapsed_event,uint8_t * error_code)502 int ull_cp_prt_elapse(struct ll_conn *conn, uint16_t elapsed_event, uint8_t *error_code)
503 {
504 	int loc_ret;
505 	int rem_ret;
506 
507 	loc_ret = prt_elapse(&conn->llcp.local.prt_expire, elapsed_event);
508 	if (loc_ret == -ETIMEDOUT) {
509 		/* Local Request Machine timed out */
510 
511 		struct proc_ctx *ctx;
512 
513 		ctx = llcp_lr_peek(conn);
514 		LL_ASSERT(ctx);
515 
516 		if (ctx->proc == PROC_TERMINATE) {
517 			/* Active procedure is ACL Termination */
518 			*error_code = ctx->data.term.error_code;
519 		} else {
520 			*error_code = BT_HCI_ERR_LL_RESP_TIMEOUT;
521 		}
522 
523 		return -ETIMEDOUT;
524 	}
525 
526 	rem_ret = prt_elapse(&conn->llcp.remote.prt_expire, elapsed_event);
527 	if (rem_ret == -ETIMEDOUT) {
528 		/* Remote Request Machine timed out */
529 
530 		*error_code = BT_HCI_ERR_LL_RESP_TIMEOUT;
531 		return -ETIMEDOUT;
532 	}
533 
534 	/* Both timers are still running */
535 	*error_code = BT_HCI_ERR_SUCCESS;
536 	return 0;
537 }
538 
ull_cp_prt_reload_set(struct ll_conn * conn,uint32_t conn_intv_us)539 void ull_cp_prt_reload_set(struct ll_conn *conn, uint32_t conn_intv_us)
540 {
541 	/* Convert 40s Procedure Response Timeout into events */
542 	conn->llcp.prt_reload = RADIO_CONN_EVENTS((40U * 1000U * 1000U), conn_intv_us);
543 }
544 
ull_cp_run(struct ll_conn * conn)545 void ull_cp_run(struct ll_conn *conn)
546 {
547 	llcp_rr_run(conn);
548 	llcp_lr_run(conn);
549 }
550 
ull_cp_state_set(struct ll_conn * conn,uint8_t state)551 void ull_cp_state_set(struct ll_conn *conn, uint8_t state)
552 {
553 	switch (state) {
554 	case ULL_CP_CONNECTED:
555 		llcp_rr_connect(conn);
556 		llcp_lr_connect(conn);
557 		break;
558 	case ULL_CP_DISCONNECTED:
559 		llcp_rr_disconnect(conn);
560 		llcp_lr_disconnect(conn);
561 		break;
562 	default:
563 		break;
564 	}
565 }
566 
ull_cp_release_nodes(struct ll_conn * conn)567 void ull_cp_release_nodes(struct ll_conn *conn)
568 {
569 	struct node_rx_pdu *rx;
570 	struct node_tx *tx;
571 
572 	/* release any llcp retained rx nodes */
573 	rx = conn->llcp.rx_node_release;
574 	while (rx) {
575 		struct node_rx_hdr *hdr;
576 
577 		/* traverse to next rx node */
578 		hdr = &rx->hdr;
579 		rx = hdr->link->mem;
580 
581 		/* enqueue rx node towards Thread */
582 		ll_rx_put(hdr->link, hdr);
583 	}
584 	conn->llcp.rx_node_release = NULL;
585 
586 	/* release any llcp pre-allocated tx nodes */
587 	tx = conn->llcp.tx_node_release;
588 	while (tx) {
589 		struct node_tx *tx_release;
590 
591 		tx_release = tx;
592 		tx = tx->next;
593 
594 		ull_cp_release_tx(conn, tx_release);
595 	}
596 	conn->llcp.tx_node_release = NULL;
597 }
598 
599 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
ull_cp_min_used_chans(struct ll_conn * conn,uint8_t phys,uint8_t min_used_chans)600 uint8_t ull_cp_min_used_chans(struct ll_conn *conn, uint8_t phys, uint8_t min_used_chans)
601 {
602 	struct proc_ctx *ctx;
603 
604 	if (conn->lll.role != BT_HCI_ROLE_PERIPHERAL) {
605 		return BT_HCI_ERR_CMD_DISALLOWED;
606 	}
607 
608 	ctx = llcp_create_local_procedure(PROC_MIN_USED_CHANS);
609 	if (!ctx) {
610 		return BT_HCI_ERR_CMD_DISALLOWED;
611 	}
612 
613 	ctx->data.muc.phys = phys;
614 	ctx->data.muc.min_used_chans = min_used_chans;
615 
616 	llcp_lr_enqueue(conn, ctx);
617 
618 	return BT_HCI_ERR_SUCCESS;
619 }
620 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */
621 
622 #if defined(CONFIG_BT_CTLR_LE_PING)
ull_cp_le_ping(struct ll_conn * conn)623 uint8_t ull_cp_le_ping(struct ll_conn *conn)
624 {
625 	struct proc_ctx *ctx;
626 
627 	ctx = llcp_create_local_procedure(PROC_LE_PING);
628 	if (!ctx) {
629 		return BT_HCI_ERR_CMD_DISALLOWED;
630 	}
631 
632 	llcp_lr_enqueue(conn, ctx);
633 
634 	return BT_HCI_ERR_SUCCESS;
635 }
636 #endif /* CONFIG_BT_CTLR_LE_PING */
637 
638 #if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
ull_cp_feature_exchange(struct ll_conn * conn,uint8_t host_initiated)639 uint8_t ull_cp_feature_exchange(struct ll_conn *conn, uint8_t host_initiated)
640 {
641 	struct proc_ctx *ctx;
642 
643 	ctx = llcp_create_local_procedure(PROC_FEATURE_EXCHANGE);
644 	if (!ctx) {
645 		return BT_HCI_ERR_CMD_DISALLOWED;
646 	}
647 
648 	ctx->data.fex.host_initiated = host_initiated;
649 
650 	llcp_lr_enqueue(conn, ctx);
651 
652 	return BT_HCI_ERR_SUCCESS;
653 }
654 #endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
655 
ull_cp_version_exchange(struct ll_conn * conn)656 uint8_t ull_cp_version_exchange(struct ll_conn *conn)
657 {
658 	struct proc_ctx *ctx;
659 
660 	ctx = llcp_create_local_procedure(PROC_VERSION_EXCHANGE);
661 	if (!ctx) {
662 		return BT_HCI_ERR_CMD_DISALLOWED;
663 	}
664 
665 	llcp_lr_enqueue(conn, ctx);
666 
667 	return BT_HCI_ERR_SUCCESS;
668 }
669 
670 #if defined(CONFIG_BT_CTLR_LE_ENC)
671 #if defined(CONFIG_BT_CENTRAL)
ull_cp_encryption_start(struct ll_conn * conn,const uint8_t rand[8],const uint8_t ediv[2],const uint8_t ltk[16])672 uint8_t ull_cp_encryption_start(struct ll_conn *conn, const uint8_t rand[8], const uint8_t ediv[2],
673 				const uint8_t ltk[16])
674 {
675 	struct proc_ctx *ctx;
676 
677 	if (conn->lll.role != BT_HCI_ROLE_CENTRAL) {
678 		return BT_HCI_ERR_CMD_DISALLOWED;
679 	}
680 
681 	ctx = llcp_create_local_procedure(PROC_ENCRYPTION_START);
682 	if (!ctx) {
683 		return BT_HCI_ERR_CMD_DISALLOWED;
684 	}
685 
686 	/* Copy input parameters */
687 	memcpy(ctx->data.enc.rand, rand, sizeof(ctx->data.enc.rand));
688 	ctx->data.enc.ediv[0] = ediv[0];
689 	ctx->data.enc.ediv[1] = ediv[1];
690 	memcpy(ctx->data.enc.ltk, ltk, sizeof(ctx->data.enc.ltk));
691 
692 	/* Enqueue request */
693 	llcp_lr_enqueue(conn, ctx);
694 
695 	return BT_HCI_ERR_SUCCESS;
696 }
697 
ull_cp_encryption_pause(struct ll_conn * conn,const uint8_t rand[8],const uint8_t ediv[2],const uint8_t ltk[16])698 uint8_t ull_cp_encryption_pause(struct ll_conn *conn, const uint8_t rand[8], const uint8_t ediv[2],
699 				const uint8_t ltk[16])
700 {
701 	struct proc_ctx *ctx;
702 
703 	if (conn->lll.role != BT_HCI_ROLE_CENTRAL) {
704 		return BT_HCI_ERR_CMD_DISALLOWED;
705 	}
706 
707 	ctx = llcp_create_local_procedure(PROC_ENCRYPTION_PAUSE);
708 	if (!ctx) {
709 		return BT_HCI_ERR_CMD_DISALLOWED;
710 	}
711 
712 	/* Copy input parameters */
713 	memcpy(ctx->data.enc.rand, rand, sizeof(ctx->data.enc.rand));
714 	ctx->data.enc.ediv[0] = ediv[0];
715 	ctx->data.enc.ediv[1] = ediv[1];
716 	memcpy(ctx->data.enc.ltk, ltk, sizeof(ctx->data.enc.ltk));
717 
718 	/* Enqueue request */
719 	llcp_lr_enqueue(conn, ctx);
720 
721 	return BT_HCI_ERR_SUCCESS;
722 }
723 #endif /* CONFIG_BT_CENTRAL */
724 
ull_cp_encryption_paused(struct ll_conn * conn)725 uint8_t ull_cp_encryption_paused(struct ll_conn *conn)
726 {
727 	struct proc_ctx *ctx;
728 
729 	ctx = llcp_rr_peek(conn);
730 	if (ctx && ctx->proc == PROC_ENCRYPTION_PAUSE) {
731 		return 1;
732 	}
733 
734 	ctx = llcp_lr_peek(conn);
735 	if (ctx && ctx->proc == PROC_ENCRYPTION_PAUSE) {
736 		return 1;
737 	}
738 
739 	return 0;
740 }
741 #endif /* CONFIG_BT_CTLR_LE_ENC */
742 
743 #if defined(CONFIG_BT_CTLR_PHY)
ull_cp_phy_update(struct ll_conn * conn,uint8_t tx,uint8_t flags,uint8_t rx,uint8_t host_initiated)744 uint8_t ull_cp_phy_update(struct ll_conn *conn, uint8_t tx, uint8_t flags, uint8_t rx,
745 			  uint8_t host_initiated)
746 {
747 	struct proc_ctx *ctx;
748 
749 	ctx = llcp_create_local_procedure(PROC_PHY_UPDATE);
750 	if (!ctx) {
751 		return BT_HCI_ERR_CMD_DISALLOWED;
752 	}
753 
754 	ctx->data.pu.tx = tx;
755 	ctx->data.pu.flags = flags;
756 	ctx->data.pu.rx = rx;
757 	ctx->data.pu.host_initiated = host_initiated;
758 
759 	llcp_lr_enqueue(conn, ctx);
760 
761 	return BT_HCI_ERR_SUCCESS;
762 }
763 #endif /* CONFIG_BT_CTLR_PHY */
764 
ull_cp_terminate(struct ll_conn * conn,uint8_t error_code)765 uint8_t ull_cp_terminate(struct ll_conn *conn, uint8_t error_code)
766 {
767 	struct proc_ctx *ctx;
768 
769 	llcp_lr_terminate(conn);
770 	llcp_rr_terminate(conn);
771 
772 	ctx = llcp_create_local_procedure(PROC_TERMINATE);
773 	if (!ctx) {
774 		return BT_HCI_ERR_CMD_DISALLOWED;
775 	}
776 
777 	ctx->data.term.error_code = error_code;
778 
779 	llcp_lr_enqueue(conn, ctx);
780 
781 	return BT_HCI_ERR_SUCCESS;
782 }
783 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
ull_cp_cis_terminate(struct ll_conn * conn,struct ll_conn_iso_stream * cis,uint8_t error_code)784 uint8_t ull_cp_cis_terminate(struct ll_conn *conn,
785 			     struct ll_conn_iso_stream *cis,
786 			     uint8_t error_code)
787 {
788 	struct proc_ctx *ctx;
789 
790 	if (conn->lll.handle != cis->lll.acl_handle) {
791 		return BT_HCI_ERR_CMD_DISALLOWED;
792 	}
793 
794 	ctx = llcp_create_local_procedure(PROC_CIS_TERMINATE);
795 	if (!ctx) {
796 		return BT_HCI_ERR_CMD_DISALLOWED;
797 	}
798 
799 	ctx->data.cis_term.cig_id = cis->group->cig_id;
800 	ctx->data.cis_term.cis_id = cis->cis_id;
801 	ctx->data.cis_term.error_code = error_code;
802 
803 	llcp_lr_enqueue(conn, ctx);
804 
805 	return BT_HCI_ERR_SUCCESS;
806 }
807 #endif /* defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) */
808 
809 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
ull_cp_cis_create(struct ll_conn * conn,struct ll_conn_iso_stream * cis)810 uint8_t ull_cp_cis_create(struct ll_conn *conn, struct ll_conn_iso_stream *cis)
811 {
812 	struct ll_conn_iso_group *cig;
813 	struct proc_ctx *ctx;
814 
815 	if (!conn->llcp.fex.valid) {
816 		/* No feature exchange was performed so initiate before CIS Create */
817 		if (ull_cp_feature_exchange(conn, 0U) != BT_HCI_ERR_SUCCESS) {
818 			return BT_HCI_ERR_CMD_DISALLOWED;
819 		}
820 	}
821 
822 	ctx = llcp_create_local_procedure(PROC_CIS_CREATE);
823 	if (!ctx) {
824 		return BT_HCI_ERR_CMD_DISALLOWED;
825 	}
826 
827 	cig = cis->group;
828 	ctx->data.cis_create.cis_handle = cis->lll.handle;
829 
830 	ctx->data.cis_create.cig_id = cis->group->cig_id;
831 	ctx->data.cis_create.cis_id = cis->cis_id;
832 	ctx->data.cis_create.c_phy = cis->lll.tx.phy;
833 	ctx->data.cis_create.p_phy = cis->lll.rx.phy;
834 	ctx->data.cis_create.c_sdu_interval = cig->c_sdu_interval;
835 	ctx->data.cis_create.p_sdu_interval = cig->p_sdu_interval;
836 	ctx->data.cis_create.c_max_pdu = cis->lll.tx.max_pdu;
837 	ctx->data.cis_create.p_max_pdu = cis->lll.rx.max_pdu;
838 	ctx->data.cis_create.c_max_sdu = cis->c_max_sdu;
839 	ctx->data.cis_create.p_max_sdu = cis->p_max_sdu;
840 	ctx->data.cis_create.iso_interval = cig->iso_interval;
841 	ctx->data.cis_create.framed = cis->framed;
842 	ctx->data.cis_create.nse = cis->lll.nse;
843 	ctx->data.cis_create.sub_interval = cis->lll.sub_interval;
844 	ctx->data.cis_create.c_bn = cis->lll.tx.bn;
845 	ctx->data.cis_create.p_bn = cis->lll.rx.bn;
846 	ctx->data.cis_create.c_ft = cis->lll.tx.ft;
847 	ctx->data.cis_create.p_ft = cis->lll.rx.ft;
848 
849 	/* ctx->data.cis_create.conn_event_count will be filled when Tx PDU is
850 	 * enqueued.
851 	 */
852 
853 	llcp_lr_enqueue(conn, ctx);
854 
855 	return BT_HCI_ERR_SUCCESS;
856 }
857 #endif /* defined(CONFIG_BT_CTLR_CENTRAL_ISO) */
858 
859 #if defined(CONFIG_BT_CENTRAL)
ull_cp_chan_map_update(struct ll_conn * conn,const uint8_t chm[5])860 uint8_t ull_cp_chan_map_update(struct ll_conn *conn, const uint8_t chm[5])
861 {
862 	struct proc_ctx *ctx;
863 
864 	if (conn->lll.role != BT_HCI_ROLE_CENTRAL) {
865 		return BT_HCI_ERR_CMD_DISALLOWED;
866 	}
867 
868 	ctx = llcp_create_local_procedure(PROC_CHAN_MAP_UPDATE);
869 	if (!ctx) {
870 		return BT_HCI_ERR_CMD_DISALLOWED;
871 	}
872 
873 	memcpy(ctx->data.chmu.chm, chm, sizeof(ctx->data.chmu.chm));
874 
875 	llcp_lr_enqueue(conn, ctx);
876 
877 	return BT_HCI_ERR_SUCCESS;
878 }
879 #endif /* CONFIG_BT_CENTRAL */
880 
ull_cp_chan_map_update_pending(struct ll_conn * conn)881 const uint8_t *ull_cp_chan_map_update_pending(struct ll_conn *conn)
882 {
883 	struct proc_ctx *ctx;
884 
885 	if (conn->lll.role == BT_HCI_ROLE_CENTRAL) {
886 		ctx = llcp_lr_peek(conn);
887 	} else {
888 		ctx = llcp_rr_peek(conn);
889 	}
890 
891 	if (ctx && ctx->proc == PROC_CHAN_MAP_UPDATE) {
892 		return ctx->data.chmu.chm;
893 	}
894 
895 	return NULL;
896 }
897 
898 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
ull_cp_data_length_update(struct ll_conn * conn,uint16_t max_tx_octets,uint16_t max_tx_time)899 uint8_t ull_cp_data_length_update(struct ll_conn *conn, uint16_t max_tx_octets,
900 				  uint16_t max_tx_time)
901 {
902 	struct proc_ctx *ctx;
903 
904 	if (!feature_dle(conn)) {
905 		/* Data Length Update procedure not supported */
906 
907 		/* Returning BT_HCI_ERR_SUCCESS here might seem counter-intuitive,
908 		 * but nothing in the specification seems to suggests
909 		 * BT_HCI_ERR_UNSUPP_REMOTE_FEATURE.
910 		 */
911 		return BT_HCI_ERR_SUCCESS;
912 	}
913 
914 	ctx = llcp_create_local_procedure(PROC_DATA_LENGTH_UPDATE);
915 
916 	if (!ctx) {
917 		return BT_HCI_ERR_CMD_DISALLOWED;
918 	}
919 
920 	/* Apply update to local */
921 	ull_dle_local_tx_update(conn, max_tx_octets, max_tx_time);
922 
923 	llcp_lr_enqueue(conn, ctx);
924 
925 	return BT_HCI_ERR_SUCCESS;
926 }
927 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
928 
929 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
ull_cp_req_peer_sca(struct ll_conn * conn)930 uint8_t ull_cp_req_peer_sca(struct ll_conn *conn)
931 {
932 	struct proc_ctx *ctx;
933 
934 	if (!feature_sca(conn)) {
935 		return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
936 	}
937 
938 	ctx = llcp_create_local_procedure(PROC_SCA_UPDATE);
939 
940 	if (!ctx) {
941 		return BT_HCI_ERR_CMD_DISALLOWED;
942 	}
943 
944 	llcp_lr_enqueue(conn, ctx);
945 
946 	return BT_HCI_ERR_SUCCESS;
947 }
948 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
949 
950 #if defined(CONFIG_BT_CTLR_LE_ENC)
ull_cp_ltk_req_reply(struct ll_conn * conn,const uint8_t ltk[16])951 uint8_t ull_cp_ltk_req_reply(struct ll_conn *conn, const uint8_t ltk[16])
952 {
953 	struct proc_ctx *ctx;
954 
955 	ctx = llcp_rr_peek(conn);
956 	if (ctx && (ctx->proc == PROC_ENCRYPTION_START || ctx->proc == PROC_ENCRYPTION_PAUSE) &&
957 	    llcp_rp_enc_ltk_req_reply_allowed(conn, ctx)) {
958 		memcpy(ctx->data.enc.ltk, ltk, sizeof(ctx->data.enc.ltk));
959 		llcp_rp_enc_ltk_req_reply(conn, ctx);
960 		return BT_HCI_ERR_SUCCESS;
961 	}
962 	return BT_HCI_ERR_CMD_DISALLOWED;
963 }
964 
ull_cp_ltk_req_neq_reply(struct ll_conn * conn)965 uint8_t ull_cp_ltk_req_neq_reply(struct ll_conn *conn)
966 {
967 	struct proc_ctx *ctx;
968 
969 	ctx = llcp_rr_peek(conn);
970 	if (ctx && (ctx->proc == PROC_ENCRYPTION_START || ctx->proc == PROC_ENCRYPTION_PAUSE) &&
971 	    llcp_rp_enc_ltk_req_reply_allowed(conn, ctx)) {
972 		llcp_rp_enc_ltk_req_neg_reply(conn, ctx);
973 		return BT_HCI_ERR_SUCCESS;
974 	}
975 	return BT_HCI_ERR_CMD_DISALLOWED;
976 }
977 #endif /* CONFIG_BT_CTLR_LE_ENC */
978 
ull_cp_conn_update(struct ll_conn * conn,uint16_t interval_min,uint16_t interval_max,uint16_t latency,uint16_t timeout,uint16_t * offsets)979 uint8_t ull_cp_conn_update(struct ll_conn *conn, uint16_t interval_min, uint16_t interval_max,
980 			   uint16_t latency, uint16_t timeout, uint16_t *offsets)
981 {
982 	struct proc_ctx *ctx;
983 
984 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
985 	if (feature_conn_param_req(conn)) {
986 		ctx = llcp_create_local_procedure(PROC_CONN_PARAM_REQ);
987 	} else if (conn->lll.role == BT_HCI_ROLE_CENTRAL) {
988 		ctx = llcp_create_local_procedure(PROC_CONN_UPDATE);
989 	} else {
990 		return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
991 	}
992 #else /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
993 	if (conn->lll.role == BT_HCI_ROLE_PERIPHERAL) {
994 		return BT_HCI_ERR_CMD_DISALLOWED;
995 	}
996 	ctx = llcp_create_local_procedure(PROC_CONN_UPDATE);
997 #endif /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
998 
999 	if (!ctx) {
1000 		return BT_HCI_ERR_CMD_DISALLOWED;
1001 	}
1002 
1003 	/* Store arguments in corresponding procedure context */
1004 	if (ctx->proc == PROC_CONN_UPDATE) {
1005 		ctx->data.cu.interval_max = interval_max;
1006 		ctx->data.cu.latency = latency;
1007 		ctx->data.cu.timeout = timeout;
1008 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1009 	} else if (ctx->proc == PROC_CONN_PARAM_REQ) {
1010 		ctx->data.cu.interval_min = interval_min;
1011 		ctx->data.cu.interval_max = interval_max;
1012 		ctx->data.cu.latency = latency;
1013 		ctx->data.cu.timeout = timeout;
1014 		ctx->data.cu.offsets[0] = offsets ? offsets[0] : 0x0000;
1015 		ctx->data.cu.offsets[1] = offsets ? offsets[1] : 0xffff;
1016 		ctx->data.cu.offsets[2] = offsets ? offsets[2] : 0xffff;
1017 		ctx->data.cu.offsets[3] = offsets ? offsets[3] : 0xffff;
1018 		ctx->data.cu.offsets[4] = offsets ? offsets[4] : 0xffff;
1019 		ctx->data.cu.offsets[5] = offsets ? offsets[5] : 0xffff;
1020 
1021 		if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
1022 		    (conn->lll.role == BT_HCI_ROLE_PERIPHERAL)) {
1023 			uint16_t handle = ll_conn_handle_get(conn);
1024 
1025 			ull_periph_latency_cancel(conn, handle);
1026 		}
1027 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1028 	} else {
1029 		LL_ASSERT(0); /* Unknown procedure */
1030 	}
1031 
1032 	llcp_lr_enqueue(conn, ctx);
1033 
1034 	return BT_HCI_ERR_SUCCESS;
1035 }
1036 
1037 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
ull_cp_periodic_sync(struct ll_conn * conn,struct ll_sync_set * sync,struct ll_adv_sync_set * adv_sync,uint16_t service_data)1038 uint8_t ull_cp_periodic_sync(struct ll_conn *conn, struct ll_sync_set *sync,
1039 			     struct ll_adv_sync_set *adv_sync, uint16_t service_data)
1040 {
1041 	struct pdu_adv_sync_info *si;
1042 	struct proc_ctx *ctx;
1043 	uint8_t *access_addr;
1044 	const uint8_t *adva;
1045 	uint16_t interval;
1046 	uint8_t *chan_map;
1047 	uint8_t *crc_init;
1048 	uint8_t addr_type;
1049 	uint8_t si_sca;
1050 	uint8_t sid;
1051 	uint8_t phy;
1052 
1053 	/* Exactly one of the sync and adv_sync pointers should be non-null */
1054 	LL_ASSERT((!adv_sync && sync) || (adv_sync && !sync));
1055 
1056 	if (!feature_peer_periodic_sync_recv(conn)) {
1057 		return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
1058 	}
1059 
1060 	ctx = llcp_create_local_procedure(PROC_PERIODIC_SYNC);
1061 	if (!ctx) {
1062 		return BT_HCI_ERR_CMD_DISALLOWED;
1063 	}
1064 
1065 	if (sync) {
1066 		chan_map = sync->lll.chm[sync->lll.chm_first].data_chan_map;
1067 		si_sca = sync->lll.sca;
1068 		access_addr = sync->lll.access_addr;
1069 		crc_init = sync->lll.crc_init;
1070 		sid = sync->sid;
1071 		phy = sync->lll.phy;
1072 		interval = sync->interval;
1073 
1074 		addr_type = sync->peer_id_addr_type;
1075 		if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY) && sync->peer_addr_resolved) {
1076 			uint8_t rl_idx;
1077 
1078 			/* peer_id_addr contains the identity address; Get the peers RPA */
1079 
1080 			rl_idx = ull_filter_rl_find(addr_type, sync->peer_id_addr, NULL);
1081 
1082 			/* A resolved address must be present in the resolve list */
1083 			LL_ASSERT(rl_idx < ll_rl_size_get());
1084 
1085 			/* Generate RPAs if required */
1086 			ull_filter_rpa_update(false);
1087 
1088 			/* Note: Since we need the peers RPA, use tgta_get */
1089 			adva = ull_filter_tgta_get(rl_idx);
1090 		} else {
1091 			adva = sync->peer_id_addr;
1092 		}
1093 	} else {
1094 		struct ll_adv_set *adv;
1095 		struct pdu_adv *adv_pdu;
1096 
1097 		chan_map = adv_sync->lll.chm[adv_sync->lll.chm_first].data_chan_map;
1098 		si_sca = lll_clock_sca_local_get();
1099 		access_addr = adv_sync->lll.access_addr;
1100 		crc_init = adv_sync->lll.crc_init;
1101 		phy = adv_sync->lll.adv->phy_s;
1102 		interval = adv_sync->interval;
1103 
1104 		adv = HDR_LLL2ULL(adv_sync->lll.adv);
1105 		sid = adv->sid;
1106 
1107 		/* Pull AdvA from pdu */
1108 		adv_pdu = lll_adv_sync_data_curr_get(&adv_sync->lll);
1109 		addr_type = adv_pdu->tx_addr;
1110 		/* Note: AdvA is mandatory for AUX_SYNC_IND and at the start of the ext. header */
1111 		adva = adv_pdu->adv_ext_ind.ext_hdr.data;
1112 	}
1113 
1114 	/* Store parameters in corresponding procedure context */
1115 	ctx->data.periodic_sync.sync_handle = sync ? ull_sync_handle_get(sync) :
1116 							BT_HCI_SYNC_HANDLE_INVALID;
1117 	ctx->data.periodic_sync.adv_handle = adv_sync ? ull_adv_sync_handle_get(adv_sync) :
1118 							BT_HCI_ADV_HANDLE_INVALID;
1119 	ctx->data.periodic_sync.id = service_data;
1120 	ctx->data.periodic_sync.sca = lll_clock_sca_local_get();
1121 
1122 	si = &ctx->data.periodic_sync.sync_info;
1123 	si->interval = sys_cpu_to_le16(interval);
1124 	(void)memcpy(si->sca_chm, chan_map, sizeof(ctx->data.periodic_sync.sync_info.sca_chm));
1125 	si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] &= ~PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK;
1126 	si->sca_chm[PDU_SYNC_INFO_SCA_CHM_SCA_BYTE_OFFSET] |= ((si_sca <<
1127 							      PDU_SYNC_INFO_SCA_CHM_SCA_BIT_POS) &
1128 							      PDU_SYNC_INFO_SCA_CHM_SCA_BIT_MASK);
1129 	(void)memcpy(si->aa, access_addr, sizeof(si->aa));
1130 	(void)memcpy(si->crc_init, crc_init, sizeof(si->crc_init));
1131 
1132 	ctx->data.periodic_sync.addr_type = addr_type;
1133 	(void)memcpy(ctx->data.periodic_sync.adv_addr, adva, BDADDR_SIZE);
1134 	ctx->data.periodic_sync.sid = sid;
1135 	ctx->data.periodic_sync.phy = phy;
1136 
1137 	/* All timing sensitive parameters will be determined and filled when Tx PDU is enqueued. */
1138 
1139 	llcp_lr_enqueue(conn, ctx);
1140 
1141 	return BT_HCI_ERR_SUCCESS;
1142 }
1143 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
1144 
1145 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
ull_cp_remote_dle_pending(struct ll_conn * conn)1146 uint8_t ull_cp_remote_dle_pending(struct ll_conn *conn)
1147 {
1148 	struct proc_ctx *ctx;
1149 
1150 	ctx = llcp_rr_peek(conn);
1151 
1152 	return (ctx && ctx->proc == PROC_DATA_LENGTH_UPDATE);
1153 }
1154 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1155 
1156 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
ull_cp_conn_param_req_reply(struct ll_conn * conn)1157 void ull_cp_conn_param_req_reply(struct ll_conn *conn)
1158 {
1159 	struct proc_ctx *ctx;
1160 
1161 	ctx = llcp_rr_peek(conn);
1162 	if (ctx && ctx->proc == PROC_CONN_PARAM_REQ) {
1163 		llcp_rp_conn_param_req_reply(conn, ctx);
1164 	}
1165 }
1166 
ull_cp_conn_param_req_neg_reply(struct ll_conn * conn,uint8_t error_code)1167 void ull_cp_conn_param_req_neg_reply(struct ll_conn *conn, uint8_t error_code)
1168 {
1169 	struct proc_ctx *ctx;
1170 
1171 	ctx = llcp_rr_peek(conn);
1172 	if (ctx && ctx->proc == PROC_CONN_PARAM_REQ) {
1173 		ctx->data.cu.error = error_code;
1174 		llcp_rp_conn_param_req_neg_reply(conn, ctx);
1175 	}
1176 }
1177 
ull_cp_remote_cpr_pending(struct ll_conn * conn)1178 uint8_t ull_cp_remote_cpr_pending(struct ll_conn *conn)
1179 {
1180 	struct proc_ctx *ctx;
1181 
1182 	ctx = llcp_rr_peek(conn);
1183 
1184 	return (ctx && ctx->proc == PROC_CONN_PARAM_REQ);
1185 }
1186 
1187 #if defined(CONFIG_BT_CTLR_USER_CPR_ANCHOR_POINT_MOVE)
ull_cp_remote_cpr_apm_awaiting_reply(struct ll_conn * conn)1188 bool ull_cp_remote_cpr_apm_awaiting_reply(struct ll_conn *conn)
1189 {
1190 	struct proc_ctx *ctx;
1191 
1192 	ctx = llcp_rr_peek(conn);
1193 
1194 	if (ctx && ctx->proc == PROC_CONN_PARAM_REQ) {
1195 		return llcp_rp_conn_param_req_apm_awaiting_reply(ctx);
1196 	}
1197 
1198 	return false;
1199 }
1200 
ull_cp_remote_cpr_apm_reply(struct ll_conn * conn,uint16_t * offsets)1201 void ull_cp_remote_cpr_apm_reply(struct ll_conn *conn, uint16_t *offsets)
1202 {
1203 	struct proc_ctx *ctx;
1204 
1205 	ctx = llcp_rr_peek(conn);
1206 
1207 	if (ctx && ctx->proc == PROC_CONN_PARAM_REQ) {
1208 		ctx->data.cu.offsets[0] = offsets[0];
1209 		ctx->data.cu.offsets[1] = offsets[1];
1210 		ctx->data.cu.offsets[2] = offsets[2];
1211 		ctx->data.cu.offsets[3] = offsets[3];
1212 		ctx->data.cu.offsets[4] = offsets[4];
1213 		ctx->data.cu.offsets[5] = offsets[5];
1214 		ctx->data.cu.error = 0U;
1215 		llcp_rp_conn_param_req_apm_reply(conn, ctx);
1216 	}
1217 }
1218 
ull_cp_remote_cpr_apm_neg_reply(struct ll_conn * conn,uint8_t error_code)1219 void ull_cp_remote_cpr_apm_neg_reply(struct ll_conn *conn, uint8_t error_code)
1220 {
1221 	struct proc_ctx *ctx;
1222 
1223 	ctx = llcp_rr_peek(conn);
1224 
1225 	if (ctx && ctx->proc == PROC_CONN_PARAM_REQ) {
1226 		ctx->data.cu.error = error_code;
1227 		llcp_rp_conn_param_req_apm_reply(conn, ctx);
1228 	}
1229 }
1230 #endif /* CONFIG_BT_CTLR_USER_CPR_ANCHOR_POINT_MOVE */
1231 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1232 
1233 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
ull_cp_cte_rsp_enable(struct ll_conn * conn,bool enable,uint8_t max_cte_len,uint8_t cte_types)1234 void ull_cp_cte_rsp_enable(struct ll_conn *conn, bool enable, uint8_t max_cte_len,
1235 			   uint8_t cte_types)
1236 {
1237 	conn->llcp.cte_rsp.is_enabled = enable;
1238 
1239 	if (enable) {
1240 		conn->llcp.cte_rsp.max_cte_len = max_cte_len;
1241 		conn->llcp.cte_rsp.cte_types = cte_types;
1242 	}
1243 }
1244 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
1245 
1246 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
ull_cp_cte_req(struct ll_conn * conn,uint8_t min_cte_len,uint8_t cte_type)1247 uint8_t ull_cp_cte_req(struct ll_conn *conn, uint8_t min_cte_len, uint8_t cte_type)
1248 {
1249 	struct proc_ctx *ctx;
1250 
1251 	/* If Controller gained, awareness:
1252 	 * - by Feature Exchange control procedure that peer device does not support CTE response,
1253 	 * - by reception LL_UNKNOWN_RSP with unknown type LL_CTE_REQ that peer device does not
1254 	 *   recognize CTE request,
1255 	 * then response to Host that CTE request enable command is not possible due to unsupported
1256 	 * remote feature.
1257 	 */
1258 	if ((conn->llcp.fex.valid &&
1259 	     (!(conn->llcp.fex.features_peer & BIT64(BT_LE_FEAT_BIT_CONN_CTE_RESP)))) ||
1260 	    (!conn->llcp.fex.valid && !feature_cte_req(conn))) {
1261 		return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
1262 	}
1263 
1264 	/* The request may be started by periodic CTE request procedure, so it skips earlier
1265 	 * verification of PHY. In case the PHY has changed to CODE the request should be stopped.
1266 	 */
1267 #if defined(CONFIG_BT_CTLR_PHY)
1268 	if (conn->lll.phy_rx != PHY_CODED) {
1269 #else
1270 	if (1) {
1271 #endif /* CONFIG_BT_CTLR_PHY */
1272 		ctx = llcp_create_local_procedure(PROC_CTE_REQ);
1273 		if (!ctx) {
1274 			return BT_HCI_ERR_CMD_DISALLOWED;
1275 		}
1276 
1277 		ctx->data.cte_req.min_len = min_cte_len;
1278 		ctx->data.cte_req.type = cte_type;
1279 
1280 		llcp_lr_enqueue(conn, ctx);
1281 
1282 		return BT_HCI_ERR_SUCCESS;
1283 	}
1284 
1285 	return BT_HCI_ERR_CMD_DISALLOWED;
1286 }
1287 
1288 void ull_cp_cte_req_set_disable(struct ll_conn *conn)
1289 {
1290 	conn->llcp.cte_req.is_enabled = 0U;
1291 	conn->llcp.cte_req.req_interval = 0U;
1292 }
1293 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
1294 
1295 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1296 void ull_cp_cc_offset_calc_reply(struct ll_conn *conn, uint32_t cis_offset_min,
1297 				 uint32_t cis_offset_max)
1298 {
1299 	struct proc_ctx *ctx;
1300 
1301 	ctx = llcp_lr_peek(conn);
1302 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1303 		ctx->data.cis_create.cis_offset_min = cis_offset_min;
1304 		ctx->data.cis_create.cis_offset_max = cis_offset_max;
1305 
1306 		llcp_lp_cc_offset_calc_reply(conn, ctx);
1307 	}
1308 }
1309 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
1310 
1311 #if defined(CONFIG_BT_PERIPHERAL) && defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1312 bool ull_cp_cc_awaiting_reply(struct ll_conn *conn)
1313 {
1314 	struct proc_ctx *ctx;
1315 
1316 	ctx = llcp_rr_peek(conn);
1317 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1318 		return llcp_rp_cc_awaiting_reply(ctx);
1319 	}
1320 
1321 	return false;
1322 }
1323 
1324 uint16_t ull_cp_cc_ongoing_handle(struct ll_conn *conn)
1325 {
1326 	struct proc_ctx *ctx;
1327 
1328 	ctx = llcp_rr_peek(conn);
1329 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1330 		return ctx->data.cis_create.cis_handle;
1331 	}
1332 
1333 	return 0xFFFF;
1334 }
1335 
1336 void ull_cp_cc_accept(struct ll_conn *conn, uint32_t cis_offset_min)
1337 {
1338 	struct proc_ctx *ctx;
1339 
1340 	ctx = llcp_rr_peek(conn);
1341 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1342 		if (cis_offset_min > ctx->data.cis_create.cis_offset_min) {
1343 			if (cis_offset_min > ctx->data.cis_create.cis_offset_max) {
1344 				ctx->data.cis_create.error = BT_HCI_ERR_UNSUPP_LL_PARAM_VAL;
1345 				llcp_rp_cc_reject(conn, ctx);
1346 
1347 				return;
1348 			}
1349 
1350 			ctx->data.cis_create.cis_offset_min = cis_offset_min;
1351 		}
1352 
1353 		llcp_rp_cc_accept(conn, ctx);
1354 	}
1355 }
1356 
1357 void ull_cp_cc_reject(struct ll_conn *conn, uint8_t error_code)
1358 {
1359 	struct proc_ctx *ctx;
1360 
1361 	ctx = llcp_rr_peek(conn);
1362 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1363 		ctx->data.cis_create.error = error_code;
1364 		llcp_rp_cc_reject(conn, ctx);
1365 	}
1366 }
1367 #endif /* CONFIG_BT_PERIPHERAL && CONFIG_BT_CTLR_PERIPHERAL_ISO */
1368 
1369 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1370 bool ull_cp_cc_awaiting_established(struct ll_conn *conn)
1371 {
1372 	struct proc_ctx *ctx;
1373 
1374 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1375 	ctx = llcp_rr_peek(conn);
1376 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1377 		return llcp_rp_cc_awaiting_established(ctx);
1378 	}
1379 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
1380 
1381 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1382 	ctx = llcp_lr_peek(conn);
1383 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1384 		return llcp_lp_cc_awaiting_established(ctx);
1385 	}
1386 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
1387 	return false;
1388 }
1389 
1390 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1391 bool ull_cp_cc_cancel(struct ll_conn *conn)
1392 {
1393 	struct proc_ctx *ctx;
1394 
1395 	ctx = llcp_lr_peek(conn);
1396 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1397 		return llcp_lp_cc_cancel(conn, ctx);
1398 	}
1399 
1400 	return false;
1401 }
1402 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
1403 
1404 void ull_cp_cc_established(struct ll_conn *conn, uint8_t error_code)
1405 {
1406 	struct proc_ctx *ctx;
1407 
1408 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1409 	ctx = llcp_rr_peek(conn);
1410 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1411 		ctx->data.cis_create.error = error_code;
1412 		llcp_rp_cc_established(conn, ctx);
1413 		llcp_rr_check_done(conn, ctx);
1414 	}
1415 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
1416 
1417 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1418 	ctx = llcp_lr_peek(conn);
1419 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1420 		ctx->data.cis_create.error = error_code;
1421 		llcp_lp_cc_established(conn, ctx);
1422 		llcp_lr_check_done(conn, ctx);
1423 	}
1424 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
1425 }
1426 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO */
1427 
1428 #if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1429 bool ull_lp_cc_is_active(struct ll_conn *conn)
1430 {
1431 	struct proc_ctx *ctx;
1432 
1433 	ctx = llcp_lr_peek(conn);
1434 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1435 		return llcp_lp_cc_is_active(ctx);
1436 	}
1437 	return false;
1438 }
1439 
1440 bool ull_lp_cc_is_enqueued(struct ll_conn *conn)
1441 {
1442 	struct proc_ctx *ctx;
1443 
1444 	ctx = llcp_lr_peek_proc(conn, PROC_CIS_CREATE);
1445 
1446 	return (ctx != NULL);
1447 }
1448 #endif /* defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_CTLR_CENTRAL_ISO) */
1449 
1450 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
1451 void ull_lp_past_offset_get_calc_params(struct ll_conn *conn,
1452 					uint8_t *adv_sync_handle, uint16_t *sync_handle)
1453 {
1454 	const struct proc_ctx *ctx;
1455 
1456 	ctx = llcp_lr_peek_proc(conn, PROC_PERIODIC_SYNC);
1457 
1458 	if (ctx) {
1459 		*adv_sync_handle = ctx->data.periodic_sync.adv_handle;
1460 		*sync_handle = ctx->data.periodic_sync.sync_handle;
1461 	} else {
1462 		*adv_sync_handle = BT_HCI_ADV_HANDLE_INVALID;
1463 		*sync_handle = BT_HCI_SYNC_HANDLE_INVALID;
1464 	}
1465 }
1466 
1467 void ull_lp_past_offset_calc_reply(struct ll_conn *conn, uint32_t offset_us,
1468 				   uint16_t pa_event_counter, uint16_t last_pa_event_counter)
1469 {
1470 	struct proc_ctx *ctx;
1471 	uint16_t conn_event_offset = 0;
1472 
1473 	ctx = llcp_lr_peek_proc(conn, PROC_PERIODIC_SYNC);
1474 
1475 	if (ctx) {
1476 		/* Check if the offset_us will fit within the sync_info offset fields */
1477 		uint32_t max_offset = OFFS_ADJUST_US + OFFSET_BASE_MAX_VALUE * OFFS_UNIT_300_US;
1478 
1479 		if (offset_us > max_offset) {
1480 			/* The offset_us is larger than what the sync_info offset fields can hold,
1481 			 * therefore it needs to be compensated with a change in the
1482 			 * connection event count - conn_event_count
1483 			 */
1484 			uint32_t conn_interval_us = conn->lll.interval * CONN_INT_UNIT_US;
1485 
1486 			conn_event_offset = DIV_ROUND_UP(offset_us - max_offset, conn_interval_us);
1487 
1488 			/* Update offset_us */
1489 			offset_us = offset_us - (conn_event_offset * conn_interval_us);
1490 
1491 			ctx->data.periodic_sync.conn_event_count = ull_conn_event_counter(conn) +
1492 								   conn_event_offset;
1493 		}
1494 
1495 		llcp_pdu_fill_sync_info_offset(&ctx->data.periodic_sync.sync_info, offset_us);
1496 #if defined(CONFIG_BT_PERIPHERAL)
1497 		/* Save the result for later use */
1498 		ctx->data.periodic_sync.offset_us = offset_us;
1499 #endif /* CONFIG_BT_PERIPHERAL */
1500 
1501 		ctx->data.periodic_sync.sync_conn_event_count = ull_conn_event_counter(conn);
1502 		ctx->data.periodic_sync.conn_event_count = ull_conn_event_counter(conn) +
1503 							   conn_event_offset;
1504 
1505 		ctx->data.periodic_sync.sync_info.evt_cntr = pa_event_counter;
1506 
1507 		ctx->data.periodic_sync.last_pa_event_counter = last_pa_event_counter;
1508 
1509 		llcp_lp_past_offset_calc_reply(conn, ctx);
1510 	}
1511 }
1512 
1513 void ull_lp_past_conn_evt_done(struct ll_conn *conn, struct node_rx_event_done *done)
1514 {
1515 	struct proc_ctx *ctx;
1516 
1517 	ctx = llcp_lr_peek_proc(conn, PROC_PERIODIC_SYNC);
1518 	if (ctx) {
1519 #if defined(CONFIG_BT_PERIPHERAL)
1520 		if (conn->lll.role == BT_HCI_ROLE_PERIPHERAL && done->extra.trx_cnt) {
1521 			uint32_t start_to_actual_us;
1522 
1523 			start_to_actual_us = ull_get_wrapped_time_us(
1524 						done->extra.drift.start_to_address_actual_us,
1525 						(-done->extra.drift.preamble_to_addr_us));
1526 
1527 			ctx->data.periodic_sync.conn_start_to_actual_us = start_to_actual_us;
1528 		}
1529 #endif /* CONFIG_BT_PERIPHERAL */
1530 
1531 		ctx->data.periodic_sync.conn_evt_trx = done->extra.trx_cnt;
1532 		llcp_lp_past_conn_evt_done(conn, ctx);
1533 	}
1534 }
1535 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
1536 
1537 static bool pdu_is_expected(struct pdu_data *pdu, struct proc_ctx *ctx)
1538 {
1539 	return (ctx->rx_opcode == pdu->llctrl.opcode || ctx->rx_greedy);
1540 }
1541 
1542 static bool pdu_is_unknown(struct pdu_data *pdu, struct proc_ctx *ctx)
1543 {
1544 	return ((pdu->llctrl.opcode == PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP) &&
1545 		(ctx->tx_opcode == pdu->llctrl.unknown_rsp.type));
1546 }
1547 
1548 static bool pdu_is_reject(struct pdu_data *pdu, struct proc_ctx *ctx)
1549 {
1550 	/* For LL_REJECT_IND there is no simple way of confirming protocol validity of the PDU
1551 	 * for the given procedure, so simply pass it on and let procedure engine deal with it
1552 	 */
1553 	return (pdu->llctrl.opcode == PDU_DATA_LLCTRL_TYPE_REJECT_IND);
1554 }
1555 
1556 static bool pdu_is_reject_ext(struct pdu_data *pdu, struct proc_ctx *ctx)
1557 {
1558 	return ((pdu->llctrl.opcode == PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND) &&
1559 		(ctx->tx_opcode == pdu->llctrl.reject_ext_ind.reject_opcode));
1560 }
1561 
1562 static bool pdu_is_any_reject(struct pdu_data *pdu, struct proc_ctx *ctx)
1563 {
1564 	return (pdu_is_reject_ext(pdu, ctx) || pdu_is_reject(pdu, ctx));
1565 }
1566 
1567 static bool pdu_is_terminate(struct pdu_data *pdu)
1568 {
1569 	return pdu->llctrl.opcode == PDU_DATA_LLCTRL_TYPE_TERMINATE_IND;
1570 }
1571 
1572 #define VALIDATE_PDU_LEN(pdu, type) (pdu->len == PDU_DATA_LLCTRL_LEN(type))
1573 
1574 #if defined(CONFIG_BT_PERIPHERAL)
1575 static bool pdu_validate_conn_update_ind(struct pdu_data *pdu)
1576 {
1577 	return VALIDATE_PDU_LEN(pdu, conn_update_ind);
1578 }
1579 
1580 static bool pdu_validate_chan_map_ind(struct pdu_data *pdu)
1581 {
1582 	return VALIDATE_PDU_LEN(pdu, chan_map_ind);
1583 }
1584 #endif /* CONFIG_BT_PERIPHERAL */
1585 
1586 static bool pdu_validate_terminate_ind(struct pdu_data *pdu)
1587 {
1588 	return VALIDATE_PDU_LEN(pdu, terminate_ind);
1589 }
1590 
1591 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_PERIPHERAL)
1592 static bool pdu_validate_enc_req(struct pdu_data *pdu)
1593 {
1594 	return VALIDATE_PDU_LEN(pdu, enc_req);
1595 }
1596 #endif /* CONFIG_BT_CTLR_LE_ENC && CONFIG_BT_PERIPHERAL */
1597 
1598 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_CENTRAL)
1599 static bool pdu_validate_enc_rsp(struct pdu_data *pdu)
1600 {
1601 	return VALIDATE_PDU_LEN(pdu, enc_rsp);
1602 }
1603 
1604 static bool pdu_validate_start_enc_req(struct pdu_data *pdu)
1605 {
1606 	return VALIDATE_PDU_LEN(pdu, start_enc_req);
1607 }
1608 #endif /* CONFIG_BT_CTLR_LE_ENC && CONFIG_BT_CENTRAL */
1609 
1610 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_PERIPHERAL)
1611 static bool pdu_validate_start_enc_rsp(struct pdu_data *pdu)
1612 {
1613 	return VALIDATE_PDU_LEN(pdu, start_enc_rsp);
1614 }
1615 #endif
1616 
1617 static bool pdu_validate_unknown_rsp(struct pdu_data *pdu)
1618 {
1619 	return VALIDATE_PDU_LEN(pdu, unknown_rsp);
1620 }
1621 
1622 #if defined(CONFIG_BT_PERIPHERAL)
1623 static bool pdu_validate_feature_req(struct pdu_data *pdu)
1624 {
1625 	return VALIDATE_PDU_LEN(pdu, feature_req);
1626 }
1627 #endif
1628 
1629 #if defined(CONFIG_BT_CENTRAL)
1630 static bool pdu_validate_feature_rsp(struct pdu_data *pdu)
1631 {
1632 	return VALIDATE_PDU_LEN(pdu, feature_rsp);
1633 }
1634 #endif
1635 
1636 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_PERIPHERAL)
1637 static bool pdu_validate_pause_enc_req(struct pdu_data *pdu)
1638 {
1639 	return VALIDATE_PDU_LEN(pdu, pause_enc_req);
1640 }
1641 #endif /* CONFIG_BT_CTLR_LE_ENC && CONFIG_BT_PERIPHERAL */
1642 
1643 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_CENTRAL)
1644 static bool pdu_validate_pause_enc_rsp(struct pdu_data *pdu)
1645 {
1646 	return VALIDATE_PDU_LEN(pdu, pause_enc_rsp);
1647 }
1648 #endif
1649 
1650 static bool pdu_validate_version_ind(struct pdu_data *pdu)
1651 {
1652 	return VALIDATE_PDU_LEN(pdu, version_ind);
1653 }
1654 
1655 static bool pdu_validate_reject_ind(struct pdu_data *pdu)
1656 {
1657 	return VALIDATE_PDU_LEN(pdu, reject_ind);
1658 }
1659 
1660 #if defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG) && defined(CONFIG_BT_CENTRAL)
1661 static bool pdu_validate_per_init_feat_xchg(struct pdu_data *pdu)
1662 {
1663 	return VALIDATE_PDU_LEN(pdu, per_init_feat_xchg);
1664 }
1665 #endif /* CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG && CONFIG_BT_CENTRAL */
1666 
1667 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1668 static bool pdu_validate_conn_param_req(struct pdu_data *pdu)
1669 {
1670 	return VALIDATE_PDU_LEN(pdu, conn_param_req);
1671 }
1672 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1673 
1674 static bool pdu_validate_conn_param_rsp(struct pdu_data *pdu)
1675 {
1676 	return VALIDATE_PDU_LEN(pdu, conn_param_rsp);
1677 }
1678 
1679 static bool pdu_validate_reject_ext_ind(struct pdu_data *pdu)
1680 {
1681 	return VALIDATE_PDU_LEN(pdu, reject_ext_ind);
1682 }
1683 
1684 #if defined(CONFIG_BT_CTLR_LE_PING)
1685 static bool pdu_validate_ping_req(struct pdu_data *pdu)
1686 {
1687 	return VALIDATE_PDU_LEN(pdu, ping_req);
1688 }
1689 #endif /* CONFIG_BT_CTLR_LE_PING */
1690 
1691 static bool pdu_validate_ping_rsp(struct pdu_data *pdu)
1692 {
1693 	return VALIDATE_PDU_LEN(pdu, ping_rsp);
1694 }
1695 
1696 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1697 static bool pdu_validate_length_req(struct pdu_data *pdu)
1698 {
1699 	return VALIDATE_PDU_LEN(pdu, length_req);
1700 }
1701 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1702 
1703 static bool pdu_validate_length_rsp(struct pdu_data *pdu)
1704 {
1705 	return VALIDATE_PDU_LEN(pdu, length_rsp);
1706 }
1707 
1708 #if defined(CONFIG_BT_CTLR_PHY)
1709 static bool pdu_validate_phy_req(struct pdu_data *pdu)
1710 {
1711 	return VALIDATE_PDU_LEN(pdu, phy_req);
1712 }
1713 #endif /* CONFIG_BT_CTLR_PHY */
1714 
1715 static bool pdu_validate_phy_rsp(struct pdu_data *pdu)
1716 {
1717 	return VALIDATE_PDU_LEN(pdu, phy_rsp);
1718 }
1719 
1720 static bool pdu_validate_phy_upd_ind(struct pdu_data *pdu)
1721 {
1722 	return VALIDATE_PDU_LEN(pdu, phy_upd_ind);
1723 }
1724 
1725 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_CENTRAL)
1726 static bool pdu_validate_min_used_chan_ind(struct pdu_data *pdu)
1727 {
1728 	return VALIDATE_PDU_LEN(pdu, min_used_chans_ind);
1729 }
1730 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_CENTRAL */
1731 
1732 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
1733 static bool pdu_validate_cte_req(struct pdu_data *pdu)
1734 {
1735 	return VALIDATE_PDU_LEN(pdu, cte_req);
1736 }
1737 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
1738 
1739 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
1740 static bool pdu_validate_cte_resp(struct pdu_data *pdu)
1741 {
1742 	return VALIDATE_PDU_LEN(pdu, cte_rsp);
1743 }
1744 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
1745 
1746 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1747 static bool pdu_validate_clock_accuracy_req(struct pdu_data *pdu)
1748 {
1749 	return VALIDATE_PDU_LEN(pdu, clock_accuracy_req);
1750 }
1751 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1752 
1753 static bool pdu_validate_clock_accuracy_rsp(struct pdu_data *pdu)
1754 {
1755 	return VALIDATE_PDU_LEN(pdu, clock_accuracy_rsp);
1756 }
1757 
1758 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
1759 static bool pdu_validate_periodic_sync_ind(struct pdu_data *pdu)
1760 {
1761 	return VALIDATE_PDU_LEN(pdu, periodic_sync_ind);
1762 }
1763 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
1764 
1765 typedef bool (*pdu_param_validate_t)(struct pdu_data *pdu);
1766 
1767 struct pdu_validate {
1768 	/* TODO can be just size if no other sanity checks here */
1769 	pdu_param_validate_t validate_cb;
1770 };
1771 
1772 static const struct pdu_validate pdu_validate[] = {
1773 #if defined(CONFIG_BT_PERIPHERAL)
1774 	[PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND] = { pdu_validate_conn_update_ind },
1775 	[PDU_DATA_LLCTRL_TYPE_CHAN_MAP_IND] = { pdu_validate_chan_map_ind },
1776 #endif /* CONFIG_BT_PERIPHERAL */
1777 	[PDU_DATA_LLCTRL_TYPE_TERMINATE_IND] = { pdu_validate_terminate_ind },
1778 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_PERIPHERAL)
1779 	[PDU_DATA_LLCTRL_TYPE_ENC_REQ] = { pdu_validate_enc_req },
1780 #endif /* CONFIG_BT_CTLR_LE_ENC && CONFIG_BT_PERIPHERAL */
1781 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_CENTRAL)
1782 	[PDU_DATA_LLCTRL_TYPE_ENC_RSP] = { pdu_validate_enc_rsp },
1783 	[PDU_DATA_LLCTRL_TYPE_START_ENC_REQ] = { pdu_validate_start_enc_req },
1784 #endif
1785 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_PERIPHERAL)
1786 	[PDU_DATA_LLCTRL_TYPE_START_ENC_RSP] = { pdu_validate_start_enc_rsp },
1787 #endif
1788 	[PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP] = { pdu_validate_unknown_rsp },
1789 #if defined(CONFIG_BT_PERIPHERAL)
1790 	[PDU_DATA_LLCTRL_TYPE_FEATURE_REQ] = { pdu_validate_feature_req },
1791 #endif
1792 #if defined(CONFIG_BT_CENTRAL)
1793 	[PDU_DATA_LLCTRL_TYPE_FEATURE_RSP] = { pdu_validate_feature_rsp },
1794 #endif
1795 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_PERIPHERAL)
1796 	[PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_REQ] = { pdu_validate_pause_enc_req },
1797 #endif /* CONFIG_BT_CTLR_LE_ENC && CONFIG_BT_PERIPHERAL */
1798 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_CENTRAL)
1799 	[PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_RSP] = { pdu_validate_pause_enc_rsp },
1800 #endif
1801 	[PDU_DATA_LLCTRL_TYPE_VERSION_IND] = { pdu_validate_version_ind },
1802 	[PDU_DATA_LLCTRL_TYPE_REJECT_IND] = { pdu_validate_reject_ind },
1803 #if defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG) && defined(CONFIG_BT_CENTRAL)
1804 	[PDU_DATA_LLCTRL_TYPE_PER_INIT_FEAT_XCHG] = { pdu_validate_per_init_feat_xchg },
1805 #endif /* CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG && CONFIG_BT_CENTRAL */
1806 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1807 	[PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ] = { pdu_validate_conn_param_req },
1808 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1809 	[PDU_DATA_LLCTRL_TYPE_CONN_PARAM_RSP] = { pdu_validate_conn_param_rsp },
1810 	[PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND] = { pdu_validate_reject_ext_ind },
1811 #if defined(CONFIG_BT_CTLR_LE_PING)
1812 	[PDU_DATA_LLCTRL_TYPE_PING_REQ] = { pdu_validate_ping_req },
1813 #endif /* CONFIG_BT_CTLR_LE_PING */
1814 	[PDU_DATA_LLCTRL_TYPE_PING_RSP] = { pdu_validate_ping_rsp },
1815 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1816 	[PDU_DATA_LLCTRL_TYPE_LENGTH_REQ] = { pdu_validate_length_req },
1817 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1818 	[PDU_DATA_LLCTRL_TYPE_LENGTH_RSP] = { pdu_validate_length_rsp },
1819 #if defined(CONFIG_BT_CTLR_PHY)
1820 	[PDU_DATA_LLCTRL_TYPE_PHY_REQ] = { pdu_validate_phy_req },
1821 #endif /* CONFIG_BT_CTLR_PHY */
1822 	[PDU_DATA_LLCTRL_TYPE_PHY_RSP] = { pdu_validate_phy_rsp },
1823 	[PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND] = { pdu_validate_phy_upd_ind },
1824 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_CENTRAL)
1825 	[PDU_DATA_LLCTRL_TYPE_MIN_USED_CHAN_IND] = { pdu_validate_min_used_chan_ind },
1826 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_CENTRAL */
1827 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
1828 	[PDU_DATA_LLCTRL_TYPE_CTE_REQ] = { pdu_validate_cte_req },
1829 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
1830 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
1831 	[PDU_DATA_LLCTRL_TYPE_CTE_RSP] = { pdu_validate_cte_resp },
1832 #endif /* PDU_DATA_LLCTRL_TYPE_CTE_RSP */
1833 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1834 	[PDU_DATA_LLCTRL_TYPE_CLOCK_ACCURACY_REQ] = { pdu_validate_clock_accuracy_req },
1835 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
1836 	[PDU_DATA_LLCTRL_TYPE_CLOCK_ACCURACY_RSP] = { pdu_validate_clock_accuracy_rsp },
1837 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
1838 	[PDU_DATA_LLCTRL_TYPE_PERIODIC_SYNC_IND] = { pdu_validate_periodic_sync_ind },
1839 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
1840 };
1841 
1842 static bool pdu_is_valid(struct pdu_data *pdu)
1843 {
1844 	/* the should be at least 1 byte of data with opcode*/
1845 	if (pdu->len < 1) {
1846 		/* fake opcode */
1847 		pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
1848 		return false;
1849 	}
1850 
1851 	if (pdu->llctrl.opcode < ARRAY_SIZE(pdu_validate)) {
1852 		pdu_param_validate_t cb;
1853 
1854 		cb = pdu_validate[pdu->llctrl.opcode].validate_cb;
1855 		if (cb) {
1856 			return cb(pdu);
1857 		}
1858 	}
1859 
1860 	/* consider unsupported and unknown PDUs as valid */
1861 	return true;
1862 }
1863 
1864 void ull_cp_tx_ack(struct ll_conn *conn, struct node_tx *tx)
1865 {
1866 	struct proc_ctx *ctx;
1867 
1868 	ctx = llcp_lr_peek(conn);
1869 	if (ctx && ctx->node_ref.tx_ack == tx) {
1870 		/* TX ack re. local request */
1871 		llcp_lr_tx_ack(conn, ctx, tx);
1872 	}
1873 
1874 	ctx = llcp_rr_peek(conn);
1875 	if (ctx && ctx->node_ref.tx_ack == tx) {
1876 		/* TX ack re. remote response */
1877 		llcp_rr_tx_ack(conn, ctx, tx);
1878 	}
1879 }
1880 
1881 void ull_cp_tx_ntf(struct ll_conn *conn)
1882 {
1883 	struct proc_ctx *ctx;
1884 
1885 	ctx = llcp_lr_peek(conn);
1886 	if (ctx) {
1887 		/* TX notifications towards Host */
1888 		llcp_lr_tx_ntf(conn, ctx);
1889 	}
1890 
1891 	ctx = llcp_rr_peek(conn);
1892 	if (ctx) {
1893 		/* TX notifications towards Host */
1894 		llcp_rr_tx_ntf(conn, ctx);
1895 	}
1896 }
1897 
1898 void ull_cp_rx(struct ll_conn *conn, memq_link_t *link, struct node_rx_pdu *rx)
1899 {
1900 	struct proc_ctx *ctx_l;
1901 	struct proc_ctx *ctx_r;
1902 	struct pdu_data *pdu;
1903 	bool unexpected_l;
1904 	bool unexpected_r;
1905 	bool pdu_valid;
1906 
1907 	pdu = (struct pdu_data *)rx->pdu;
1908 
1909 	pdu_valid = pdu_is_valid(pdu);
1910 
1911 	if (!pdu_valid) {
1912 		struct proc_ctx *ctx;
1913 
1914 		ctx = llcp_lr_peek(conn);
1915 		if (ctx && pdu_is_expected(pdu, ctx)) {
1916 			return;
1917 		}
1918 
1919 		ctx = llcp_rr_peek(conn);
1920 		if (ctx && pdu_is_expected(pdu, ctx)) {
1921 			return;
1922 		}
1923 
1924 		/*  Process invalid PDU's as new procedure */
1925 		ctx_l = NULL;
1926 		ctx_r = NULL;
1927 	} else if (pdu_is_terminate(pdu)) {
1928 		/*  Process LL_TERMINATE_IND PDU's as new procedure */
1929 		ctx_l = NULL;
1930 		ctx_r = NULL;
1931 	} else {
1932 		/* Query local and remote activity */
1933 		ctx_l = llcp_lr_peek(conn);
1934 		ctx_r = llcp_rr_peek(conn);
1935 	}
1936 
1937 	if (ctx_l) {
1938 		/* Local active procedure */
1939 
1940 		if (ctx_r) {
1941 			/* Local active procedure
1942 			 * Remote active procedure
1943 			 */
1944 			unexpected_l = !(pdu_is_expected(pdu, ctx_l) ||
1945 					 pdu_is_unknown(pdu, ctx_l) ||
1946 					 pdu_is_any_reject(pdu, ctx_l));
1947 
1948 			unexpected_r = !(pdu_is_expected(pdu, ctx_r) ||
1949 					 pdu_is_unknown(pdu, ctx_r) ||
1950 					 pdu_is_reject_ext(pdu, ctx_r));
1951 
1952 			if (unexpected_l == unexpected_r) {
1953 				/* Both Local and Remote procedure active
1954 				 * and PDU is either
1955 				 * unexpected by both
1956 				 * or
1957 				 * expected by both
1958 				 *
1959 				 * Both situations is a result of invalid behaviour
1960 				 */
1961 				conn->llcp_terminate.reason_final =
1962 					unexpected_r ? BT_HCI_ERR_LMP_PDU_NOT_ALLOWED :
1963 						       BT_HCI_ERR_UNSPECIFIED;
1964 			} else if (unexpected_l) {
1965 				/* Local active procedure
1966 				 * Unexpected local procedure PDU
1967 				 * Remote active procedure
1968 				 * Expected remote procedure PDU
1969 				 */
1970 
1971 				/* Process PDU in remote procedure */
1972 				llcp_rr_rx(conn, ctx_r, link, rx);
1973 			} else if (unexpected_r) {
1974 				/* Local active procedure
1975 				 * Expected local procedure PDU
1976 				 * Remote active procedure
1977 				 * Unexpected remote procedure PDU
1978 				 */
1979 
1980 				/* Process PDU in local procedure */
1981 				llcp_lr_rx(conn, ctx_l, link, rx);
1982 			}
1983 			/* no else clause as this cannot occur with the logic above:
1984 			 * if they are not identical then one must be true
1985 			 */
1986 		} else {
1987 			/* Local active procedure
1988 			 * No remote active procedure
1989 			 */
1990 
1991 			unexpected_l = !(pdu_is_expected(pdu, ctx_l) ||
1992 					 pdu_is_unknown(pdu, ctx_l) ||
1993 					 pdu_is_any_reject(pdu, ctx_l));
1994 
1995 			if (unexpected_l) {
1996 				/* Local active procedure
1997 				 * Unexpected local procedure PDU
1998 				 * No remote active procedure
1999 				 */
2000 
2001 				/* Process PDU as a new remote request */
2002 				LL_ASSERT(pdu_valid);
2003 				llcp_rr_new(conn, link, rx, true);
2004 			} else {
2005 				/* Local active procedure
2006 				 * Expected local procedure PDU
2007 				 * No remote active procedure
2008 				 */
2009 
2010 				/* Process PDU in local procedure */
2011 				llcp_lr_rx(conn, ctx_l, link, rx);
2012 			}
2013 		}
2014 	} else if (ctx_r) {
2015 		/* No local active procedure
2016 		 * Remote active procedure
2017 		 */
2018 
2019 		/* Process PDU in remote procedure */
2020 		llcp_rr_rx(conn, ctx_r, link, rx);
2021 	} else {
2022 		/* No local active procedure
2023 		 * No remote active procedure
2024 		 */
2025 
2026 		/* Process PDU as a new remote request */
2027 		llcp_rr_new(conn, link, rx, pdu_valid);
2028 	}
2029 }
2030 
2031 #ifdef ZTEST_UNITTEST
2032 
2033 uint16_t llcp_local_ctx_buffers_free(void)
2034 {
2035 	return mem_free_count_get(mem_local_ctx.free);
2036 }
2037 
2038 uint16_t llcp_remote_ctx_buffers_free(void)
2039 {
2040 	return mem_free_count_get(mem_remote_ctx.free);
2041 }
2042 
2043 uint16_t llcp_ctx_buffers_free(void)
2044 {
2045 	return llcp_local_ctx_buffers_free() + llcp_remote_ctx_buffers_free();
2046 }
2047 
2048 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
2049 uint8_t llcp_common_tx_buffer_alloc_count(void)
2050 {
2051 	return common_tx_buffer_alloc;
2052 }
2053 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
2054 
2055 struct proc_ctx *llcp_proc_ctx_acquire(void)
2056 {
2057 	return proc_ctx_acquire(&mem_local_ctx);
2058 }
2059 
2060 struct proc_ctx *llcp_create_procedure(enum llcp_proc proc)
2061 {
2062 	return create_procedure(proc, &mem_local_ctx);
2063 }
2064 #endif
2065 
2066 bool phy_valid(uint8_t phy)
2067 {
2068 	/* This is equivalent to:
2069 	 * exactly one bit set, and no bit set is rfu's
2070 	 */
2071 	return (phy == PHY_1M || phy == PHY_2M || phy == PHY_CODED);
2072 }
2073