1 /*
2  * Copyright (c) 2020 Demant
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 
9 #include <zephyr/sys/byteorder.h>
10 #include <zephyr/sys/slist.h>
11 #include <zephyr/sys/util.h>
12 
13 #include <zephyr/bluetooth/hci_types.h>
14 
15 #include "hal/ccm.h"
16 
17 #include "util/util.h"
18 #include "util/mem.h"
19 #include "util/memq.h"
20 #include "util/dbuf.h"
21 
22 #include "pdu_df.h"
23 #include "lll/pdu_vendor.h"
24 #include "pdu.h"
25 
26 #include "ll.h"
27 #include "ll_feat.h"
28 #include "ll_settings.h"
29 
30 #include "lll.h"
31 #include "lll_clock.h"
32 #include "lll/lll_df_types.h"
33 #include "lll_conn.h"
34 #include "lll_conn_iso.h"
35 
36 #include "ull_tx_queue.h"
37 
38 #include "isoal.h"
39 #include "ull_iso_types.h"
40 #include "ull_conn_iso_types.h"
41 #include "ull_conn_iso_internal.h"
42 #include "ull_central_iso_internal.h"
43 
44 #include "ull_internal.h"
45 #include "ull_conn_types.h"
46 #include "ull_conn_internal.h"
47 #include "ull_llcp.h"
48 #include "ull_llcp_features.h"
49 #include "ull_llcp_internal.h"
50 #include "ull_peripheral_internal.h"
51 
52 #include <soc.h>
53 #include "hal/debug.h"
54 
55 #define LLCTRL_PDU_SIZE (offsetof(struct pdu_data, llctrl) + sizeof(struct pdu_data_llctrl))
56 #define PROC_CTX_BUF_SIZE WB_UP(sizeof(struct proc_ctx))
57 #define TX_CTRL_BUF_SIZE WB_UP(offsetof(struct node_tx, pdu) + LLCTRL_PDU_SIZE)
58 #define NTF_BUF_SIZE WB_UP(offsetof(struct node_rx_pdu, pdu) + LLCTRL_PDU_SIZE)
59 
60 /* LLCP Allocations */
61 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
62 sys_slist_t tx_buffer_wait_list;
63 static uint8_t common_tx_buffer_alloc;
64 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
65 
66 static uint8_t MALIGN(4) buffer_mem_tx[TX_CTRL_BUF_SIZE * LLCP_TX_CTRL_BUF_COUNT];
67 static struct llcp_mem_pool mem_tx = { .pool = buffer_mem_tx };
68 
69 static uint8_t MALIGN(4) buffer_mem_local_ctx[PROC_CTX_BUF_SIZE *
70 				    CONFIG_BT_CTLR_LLCP_LOCAL_PROC_CTX_BUF_NUM];
71 static struct llcp_mem_pool mem_local_ctx = { .pool = buffer_mem_local_ctx };
72 
73 static uint8_t MALIGN(4) buffer_mem_remote_ctx[PROC_CTX_BUF_SIZE *
74 				     CONFIG_BT_CTLR_LLCP_REMOTE_PROC_CTX_BUF_NUM];
75 static struct llcp_mem_pool mem_remote_ctx = { .pool = buffer_mem_remote_ctx };
76 
77 /*
78  * LLCP Resource Management
79  */
proc_ctx_acquire(struct llcp_mem_pool * owner)80 static struct proc_ctx *proc_ctx_acquire(struct llcp_mem_pool *owner)
81 {
82 	struct proc_ctx *ctx;
83 
84 	ctx = (struct proc_ctx *)mem_acquire(&owner->free);
85 
86 	if (ctx) {
87 		/* Set the owner */
88 		ctx->owner = owner;
89 	}
90 
91 	return ctx;
92 }
93 
llcp_proc_ctx_release(struct proc_ctx * ctx)94 void llcp_proc_ctx_release(struct proc_ctx *ctx)
95 {
96 	/* We need to have an owner otherwise the memory allocated would leak */
97 	LL_ASSERT(ctx->owner);
98 
99 	/* Release the memory back to the owner */
100 	mem_release(ctx, &ctx->owner->free);
101 }
102 
103 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
104 /*
105  * @brief Update 'global' tx buffer allowance
106  */
ull_cp_update_tx_buffer_queue(struct ll_conn * conn)107 void ull_cp_update_tx_buffer_queue(struct ll_conn *conn)
108 {
109 	if (conn->llcp.tx_buffer_alloc > CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM) {
110 		common_tx_buffer_alloc -= (conn->llcp.tx_buffer_alloc -
111 					   CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM);
112 	}
113 }
114 
115 
116 /*
117  * @brief Check for per conn pre-allocated tx buffer allowance
118  * @return true if buffer is available
119  */
static_tx_buffer_available(struct ll_conn * conn,struct proc_ctx * ctx)120 static inline bool static_tx_buffer_available(struct ll_conn *conn, struct proc_ctx *ctx)
121 {
122 #if (CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM > 0)
123 	/* Check if per connection pre-aloted tx buffer is available */
124 	if (conn->llcp.tx_buffer_alloc < CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM) {
125 		/* This connection has not yet used up all the pre-aloted tx buffers */
126 		return true;
127 	}
128 #endif /* CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM > 0 */
129 	return false;
130 }
131 
132 /*
133  * @brief pre-alloc/peek of a tx buffer, leave requester on the wait list (@head if first up)
134  *
135  * @return true if alloc is allowed, false if not
136  *
137  */
llcp_tx_alloc_peek(struct ll_conn * conn,struct proc_ctx * ctx)138 bool llcp_tx_alloc_peek(struct ll_conn *conn, struct proc_ctx *ctx)
139 {
140 	if (!static_tx_buffer_available(conn, ctx)) {
141 		/* The conn already has spent its pre-aloted tx buffer(s),
142 		 * so we should consider the common tx buffer pool
143 		 */
144 		if (ctx->wait_reason == WAITING_FOR_NOTHING) {
145 			/* The current procedure is not in line for a tx buffer
146 			 * so sign up on the wait list
147 			 */
148 			sys_slist_append(&tx_buffer_wait_list, &ctx->wait_node);
149 			ctx->wait_reason = WAITING_FOR_TX_BUFFER;
150 		}
151 
152 		/* Now check to see if this procedure context is @ head of the wait list */
153 		if (ctx->wait_reason == WAITING_FOR_TX_BUFFER &&
154 		    sys_slist_peek_head(&tx_buffer_wait_list) == &ctx->wait_node) {
155 			return (common_tx_buffer_alloc <
156 				CONFIG_BT_CTLR_LLCP_COMMON_TX_CTRL_BUF_NUM);
157 		}
158 
159 		return false;
160 	}
161 	return true;
162 }
163 
164 /*
165  * @brief un-peek of a tx buffer, in case ongoing alloc is aborted
166  *
167  */
llcp_tx_alloc_unpeek(struct proc_ctx * ctx)168 void llcp_tx_alloc_unpeek(struct proc_ctx *ctx)
169 {
170 	sys_slist_find_and_remove(&tx_buffer_wait_list, &ctx->wait_node);
171 	ctx->wait_reason = WAITING_FOR_NOTHING;
172 }
173 
174 /*
175  * @brief complete alloc of a tx buffer, must preceded by successful call to
176  * llcp_tx_alloc_peek()
177  *
178  * @return node_tx* that was peek'ed by llcp_tx_alloc_peek()
179  *
180  */
llcp_tx_alloc(struct ll_conn * conn,struct proc_ctx * ctx)181 struct node_tx *llcp_tx_alloc(struct ll_conn *conn, struct proc_ctx *ctx)
182 {
183 	conn->llcp.tx_buffer_alloc++;
184 #if (CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM > 0)
185 	if (conn->llcp.tx_buffer_alloc > CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM) {
186 		common_tx_buffer_alloc++;
187 		/* global buffer allocated, so we're at the head and should just pop head */
188 		sys_slist_get(&tx_buffer_wait_list);
189 	} else {
190 		/* we're allocating conn_tx_buffer, so remove from wait list if waiting */
191 		if (ctx->wait_reason == WAITING_FOR_TX_BUFFER) {
192 			sys_slist_find_and_remove(&tx_buffer_wait_list, &ctx->wait_node);
193 		}
194 	}
195 #else /* CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM > 0 */
196 	/* global buffer allocated, so remove head of wait list */
197 	common_tx_buffer_alloc++;
198 	sys_slist_get(&tx_buffer_wait_list);
199 #endif /* CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM > 0 */
200 	ctx->wait_reason = WAITING_FOR_NOTHING;
201 
202 	return (struct node_tx *)mem_acquire(&mem_tx.free);
203 }
204 #else /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
llcp_tx_alloc_peek(struct ll_conn * conn,struct proc_ctx * ctx)205 bool llcp_tx_alloc_peek(struct ll_conn *conn, struct proc_ctx *ctx)
206 {
207 	ARG_UNUSED(conn);
208 	return mem_tx.free != NULL;
209 }
210 
llcp_tx_alloc_unpeek(struct proc_ctx * ctx)211 void llcp_tx_alloc_unpeek(struct proc_ctx *ctx)
212 {
213 	/* Empty on purpose, as unpeek is not needed when no buffer queueing is used */
214 	ARG_UNUSED(ctx);
215 }
216 
llcp_tx_alloc(struct ll_conn * conn,struct proc_ctx * ctx)217 struct node_tx *llcp_tx_alloc(struct ll_conn *conn, struct proc_ctx *ctx)
218 {
219 	struct pdu_data *pdu;
220 	struct node_tx *tx;
221 
222 	ARG_UNUSED(conn);
223 	tx = (struct node_tx *)mem_acquire(&mem_tx.free);
224 
225 	pdu = (struct pdu_data *)tx->pdu;
226 	ull_pdu_data_init(pdu);
227 
228 	return tx;
229 }
230 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
231 
tx_release(struct node_tx * tx)232 static void tx_release(struct node_tx *tx)
233 {
234 	mem_release(tx, &mem_tx.free);
235 }
236 
llcp_ntf_alloc_is_available(void)237 bool llcp_ntf_alloc_is_available(void)
238 {
239 	return ll_pdu_rx_alloc_peek(1) != NULL;
240 }
241 
llcp_ntf_alloc_num_available(uint8_t count)242 bool llcp_ntf_alloc_num_available(uint8_t count)
243 {
244 	return ll_pdu_rx_alloc_peek(count) != NULL;
245 }
246 
llcp_ntf_alloc(void)247 struct node_rx_pdu *llcp_ntf_alloc(void)
248 {
249 	return ll_pdu_rx_alloc();
250 }
251 
252 /*
253  * ULL -> LLL Interface
254  */
255 
llcp_tx_enqueue(struct ll_conn * conn,struct node_tx * tx)256 void llcp_tx_enqueue(struct ll_conn *conn, struct node_tx *tx)
257 {
258 	ull_tx_q_enqueue_ctrl(&conn->tx_q, tx);
259 }
260 
llcp_tx_pause_data(struct ll_conn * conn,enum llcp_tx_q_pause_data_mask pause_mask)261 void llcp_tx_pause_data(struct ll_conn *conn, enum llcp_tx_q_pause_data_mask pause_mask)
262 {
263 	/* Only pause the TX Q if we have not already paused it (by any procedure) */
264 	if (conn->llcp.tx_q_pause_data_mask == 0) {
265 		ull_tx_q_pause_data(&conn->tx_q);
266 	}
267 
268 	/* Add the procedure that paused data */
269 	conn->llcp.tx_q_pause_data_mask |= pause_mask;
270 }
271 
llcp_tx_resume_data(struct ll_conn * conn,enum llcp_tx_q_pause_data_mask resume_mask)272 void llcp_tx_resume_data(struct ll_conn *conn, enum llcp_tx_q_pause_data_mask resume_mask)
273 {
274 	/* Remove the procedure that paused data */
275 	conn->llcp.tx_q_pause_data_mask &= ~resume_mask;
276 
277 	/* Only resume the TX Q if we have removed all procedures that paused data */
278 	if (conn->llcp.tx_q_pause_data_mask == 0) {
279 		ull_tx_q_resume_data(&conn->tx_q);
280 	}
281 }
282 
llcp_rx_node_retain(struct proc_ctx * ctx)283 void llcp_rx_node_retain(struct proc_ctx *ctx)
284 {
285 	LL_ASSERT(ctx->node_ref.rx);
286 
287 	/* Mark RX node to NOT release */
288 	ctx->node_ref.rx->hdr.type = NODE_RX_TYPE_RETAIN;
289 
290 	/* store link element reference to use once this node is moved up */
291 	ctx->node_ref.rx->hdr.link = ctx->node_ref.link;
292 }
293 
llcp_nodes_release(struct ll_conn * conn,struct proc_ctx * ctx)294 void llcp_nodes_release(struct ll_conn *conn, struct proc_ctx *ctx)
295 {
296 	if (ctx->node_ref.rx && ctx->node_ref.rx->hdr.type == NODE_RX_TYPE_RETAIN) {
297 		/* RX node retained, so release */
298 		ctx->node_ref.rx->hdr.link->mem = conn->llcp.rx_node_release;
299 		conn->llcp.rx_node_release = ctx->node_ref.rx;
300 	}
301 #if defined(CONFIG_BT_CTLR_PHY) && defined(CONFIG_BT_CTLR_DATA_LENGTH)
302 	if (ctx->proc == PROC_PHY_UPDATE && ctx->data.pu.ntf_dle_node) {
303 		/* RX node retained, so release */
304 		ctx->data.pu.ntf_dle_node->hdr.link->mem = conn->llcp.rx_node_release;
305 		conn->llcp.rx_node_release = ctx->data.pu.ntf_dle_node;
306 	}
307 #endif
308 
309 	if (ctx->node_ref.tx) {
310 		ctx->node_ref.tx->next = conn->llcp.tx_node_release;
311 		conn->llcp.tx_node_release = ctx->node_ref.tx;
312 	}
313 }
314 
315 /*
316  * LLCP Procedure Creation
317  */
318 
create_procedure(enum llcp_proc proc,struct llcp_mem_pool * ctx_pool)319 static struct proc_ctx *create_procedure(enum llcp_proc proc, struct llcp_mem_pool *ctx_pool)
320 {
321 	struct proc_ctx *ctx;
322 
323 	ctx = proc_ctx_acquire(ctx_pool);
324 	if (!ctx) {
325 		return NULL;
326 	}
327 
328 	ctx->proc = proc;
329 	ctx->collision = 0U;
330 	ctx->done = 0U;
331 	ctx->rx_greedy = 0U;
332 	ctx->node_ref.rx = NULL;
333 	ctx->node_ref.tx_ack = NULL;
334 
335 	/* Clear procedure data */
336 	memset((void *)&ctx->data, 0, sizeof(ctx->data));
337 
338 	/* Initialize opcodes fields to known values */
339 	ctx->rx_opcode = ULL_LLCP_INVALID_OPCODE;
340 	ctx->tx_opcode = ULL_LLCP_INVALID_OPCODE;
341 	ctx->response_opcode = ULL_LLCP_INVALID_OPCODE;
342 
343 	return ctx;
344 }
345 
llcp_create_local_procedure(enum llcp_proc proc)346 struct proc_ctx *llcp_create_local_procedure(enum llcp_proc proc)
347 {
348 	struct proc_ctx *ctx;
349 
350 	ctx = create_procedure(proc, &mem_local_ctx);
351 	if (!ctx) {
352 		return NULL;
353 	}
354 
355 	switch (ctx->proc) {
356 #if defined(CONFIG_BT_CTLR_LE_PING)
357 	case PROC_LE_PING:
358 		llcp_lp_comm_init_proc(ctx);
359 		break;
360 #endif /* CONFIG_BT_CTLR_LE_PING */
361 	case PROC_FEATURE_EXCHANGE:
362 		llcp_lp_comm_init_proc(ctx);
363 		break;
364 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
365 	case PROC_MIN_USED_CHANS:
366 		llcp_lp_comm_init_proc(ctx);
367 		break;
368 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */
369 	case PROC_VERSION_EXCHANGE:
370 		llcp_lp_comm_init_proc(ctx);
371 		break;
372 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_CENTRAL)
373 	case PROC_ENCRYPTION_START:
374 	case PROC_ENCRYPTION_PAUSE:
375 		llcp_lp_enc_init_proc(ctx);
376 		break;
377 #endif /* CONFIG_BT_CTLR_LE_ENC && CONFIG_BT_CENTRAL */
378 #ifdef CONFIG_BT_CTLR_PHY
379 	case PROC_PHY_UPDATE:
380 		llcp_lp_pu_init_proc(ctx);
381 		break;
382 #endif /* CONFIG_BT_CTLR_PHY */
383 	case PROC_CONN_UPDATE:
384 	case PROC_CONN_PARAM_REQ:
385 		llcp_lp_cu_init_proc(ctx);
386 		break;
387 	case PROC_TERMINATE:
388 		llcp_lp_comm_init_proc(ctx);
389 		break;
390 #if defined(CONFIG_BT_CENTRAL)
391 	case PROC_CHAN_MAP_UPDATE:
392 		llcp_lp_chmu_init_proc(ctx);
393 		break;
394 #endif /* CONFIG_BT_CENTRAL */
395 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
396 	case PROC_DATA_LENGTH_UPDATE:
397 		llcp_lp_comm_init_proc(ctx);
398 		break;
399 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
400 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
401 	case PROC_CTE_REQ:
402 		llcp_lp_comm_init_proc(ctx);
403 		break;
404 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
405 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
406 	case PROC_CIS_TERMINATE:
407 		llcp_lp_comm_init_proc(ctx);
408 		break;
409 #endif /* defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) */
410 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
411 	case PROC_CIS_CREATE:
412 		llcp_lp_comm_init_proc(ctx);
413 		break;
414 #endif /* defined(CONFIG_BT_CTLR_CENTRAL_ISO) */
415 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
416 	case PROC_SCA_UPDATE:
417 		llcp_lp_comm_init_proc(ctx);
418 		break;
419 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
420 	default:
421 		/* Unknown procedure */
422 		LL_ASSERT(0);
423 		break;
424 	}
425 
426 	return ctx;
427 }
428 
llcp_create_remote_procedure(enum llcp_proc proc)429 struct proc_ctx *llcp_create_remote_procedure(enum llcp_proc proc)
430 {
431 	struct proc_ctx *ctx;
432 
433 	ctx = create_procedure(proc, &mem_remote_ctx);
434 	if (!ctx) {
435 		return NULL;
436 	}
437 
438 	switch (ctx->proc) {
439 	case PROC_UNKNOWN:
440 		/* Nothing to do */
441 		break;
442 #if defined(CONFIG_BT_CTLR_LE_PING)
443 	case PROC_LE_PING:
444 		llcp_rp_comm_init_proc(ctx);
445 		break;
446 #endif /* CONFIG_BT_CTLR_LE_PING */
447 	case PROC_FEATURE_EXCHANGE:
448 		llcp_rp_comm_init_proc(ctx);
449 		break;
450 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
451 	case PROC_MIN_USED_CHANS:
452 		llcp_rp_comm_init_proc(ctx);
453 		break;
454 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */
455 	case PROC_VERSION_EXCHANGE:
456 		llcp_rp_comm_init_proc(ctx);
457 		break;
458 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_PERIPHERAL)
459 	case PROC_ENCRYPTION_START:
460 	case PROC_ENCRYPTION_PAUSE:
461 		llcp_rp_enc_init_proc(ctx);
462 		break;
463 #endif /* CONFIG_BT_CTLR_LE_ENC && CONFIG_BT_PERIPHERAL */
464 #ifdef CONFIG_BT_CTLR_PHY
465 	case PROC_PHY_UPDATE:
466 		llcp_rp_pu_init_proc(ctx);
467 		break;
468 #endif /* CONFIG_BT_CTLR_PHY */
469 	case PROC_CONN_UPDATE:
470 	case PROC_CONN_PARAM_REQ:
471 		llcp_rp_cu_init_proc(ctx);
472 		break;
473 	case PROC_TERMINATE:
474 		llcp_rp_comm_init_proc(ctx);
475 		break;
476 #if defined(CONFIG_BT_PERIPHERAL)
477 	case PROC_CHAN_MAP_UPDATE:
478 		llcp_rp_chmu_init_proc(ctx);
479 		break;
480 #endif /* CONFIG_BT_PERIPHERAL */
481 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
482 	case PROC_DATA_LENGTH_UPDATE:
483 		llcp_rp_comm_init_proc(ctx);
484 		break;
485 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
486 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
487 	case PROC_CTE_REQ:
488 		llcp_rp_comm_init_proc(ctx);
489 		break;
490 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
491 #if defined(CONFIG_BT_PERIPHERAL) && defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
492 	case PROC_CIS_CREATE:
493 		llcp_rp_cc_init_proc(ctx);
494 		break;
495 #endif /* CONFIG_BT_PERIPHERAL && CONFIG_BT_CTLR_PERIPHERAL_ISO */
496 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
497 	case PROC_CIS_TERMINATE:
498 		llcp_rp_comm_init_proc(ctx);
499 		break;
500 #endif /* defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) */
501 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
502 	case PROC_SCA_UPDATE:
503 		llcp_rp_comm_init_proc(ctx);
504 		break;
505 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
506 
507 	default:
508 		/* Unknown procedure */
509 		LL_ASSERT(0);
510 		break;
511 	}
512 
513 	return ctx;
514 }
515 
516 /*
517  * LLCP Public API
518  */
519 
ull_cp_init(void)520 void ull_cp_init(void)
521 {
522 	mem_init(mem_local_ctx.pool, PROC_CTX_BUF_SIZE,
523 		 CONFIG_BT_CTLR_LLCP_LOCAL_PROC_CTX_BUF_NUM,
524 		 &mem_local_ctx.free);
525 	mem_init(mem_remote_ctx.pool, PROC_CTX_BUF_SIZE,
526 		 CONFIG_BT_CTLR_LLCP_REMOTE_PROC_CTX_BUF_NUM,
527 		 &mem_remote_ctx.free);
528 	mem_init(mem_tx.pool, TX_CTRL_BUF_SIZE, LLCP_TX_CTRL_BUF_COUNT, &mem_tx.free);
529 
530 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
531 	/* Reset buffer alloc management */
532 	sys_slist_init(&tx_buffer_wait_list);
533 	common_tx_buffer_alloc = 0;
534 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
535 }
536 
ull_llcp_init(struct ll_conn * conn)537 void ull_llcp_init(struct ll_conn *conn)
538 {
539 	/* Reset local request fsm */
540 	llcp_lr_init(conn);
541 	sys_slist_init(&conn->llcp.local.pend_proc_list);
542 	conn->llcp.local.pause = 0U;
543 
544 	/* Reset remote request fsm */
545 	llcp_rr_init(conn);
546 	sys_slist_init(&conn->llcp.remote.pend_proc_list);
547 	conn->llcp.remote.pause = 0U;
548 	conn->llcp.remote.incompat = INCOMPAT_NO_COLLISION;
549 	conn->llcp.remote.collision = 0U;
550 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
551 	conn->llcp.remote.paused_cmd = PROC_NONE;
552 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
553 
554 	/* Reset the Procedure Response Timeout to be disabled,
555 	 * 'ull_cp_prt_reload_set' must be called to setup this value.
556 	 */
557 	conn->llcp.prt_reload = 0U;
558 
559 	/* Reset the cached version Information (PROC_VERSION_EXCHANGE) */
560 	memset(&conn->llcp.vex, 0, sizeof(conn->llcp.vex));
561 
562 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
563 	/* Reset the cached min used channels information (PROC_MIN_USED_CHANS) */
564 	memset(&conn->llcp.muc, 0, sizeof(conn->llcp.muc));
565 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */
566 
567 	/* Reset the feature exchange fields */
568 	memset(&conn->llcp.fex, 0, sizeof(conn->llcp.fex));
569 	conn->llcp.fex.features_used = ll_feat_get();
570 
571 #if defined(CONFIG_BT_CTLR_LE_ENC)
572 	/* Reset encryption related state */
573 	conn->lll.enc_tx = 0U;
574 	conn->lll.enc_rx = 0U;
575 #endif /* CONFIG_BT_CTLR_LE_ENC */
576 
577 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
578 	conn->llcp.cte_req.is_enabled = 0U;
579 	conn->llcp.cte_req.req_expire = 0U;
580 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
581 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
582 	conn->llcp.cte_rsp.is_enabled = 0U;
583 	conn->llcp.cte_rsp.is_active = 0U;
584 	conn->llcp.cte_rsp.disable_param = NULL;
585 	conn->llcp.cte_rsp.disable_cb = NULL;
586 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
587 
588 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
589 	conn->llcp.tx_buffer_alloc = 0;
590 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
591 
592 	conn->llcp.tx_q_pause_data_mask = 0;
593 	conn->lll.event_counter = 0;
594 
595 	conn->llcp.tx_node_release = NULL;
596 	conn->llcp.rx_node_release = NULL;
597 }
598 
ull_cp_release_tx(struct ll_conn * conn,struct node_tx * tx)599 void ull_cp_release_tx(struct ll_conn *conn, struct node_tx *tx)
600 {
601 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
602 	if (conn) {
603 		LL_ASSERT(conn->llcp.tx_buffer_alloc > 0);
604 		if (conn->llcp.tx_buffer_alloc > CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM) {
605 			common_tx_buffer_alloc--;
606 		}
607 		conn->llcp.tx_buffer_alloc--;
608 	}
609 #else /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
610 	ARG_UNUSED(conn);
611 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
612 	tx_release(tx);
613 }
614 
prt_elapse(uint16_t * expire,uint16_t elapsed_event)615 static int prt_elapse(uint16_t *expire, uint16_t elapsed_event)
616 {
617 	if (*expire != 0U) {
618 		if (*expire > elapsed_event) {
619 			*expire -= elapsed_event;
620 		} else {
621 			/* Timer expired */
622 			return -ETIMEDOUT;
623 		}
624 	}
625 
626 	/* Timer still running */
627 	return 0;
628 }
629 
ull_cp_prt_elapse(struct ll_conn * conn,uint16_t elapsed_event,uint8_t * error_code)630 int ull_cp_prt_elapse(struct ll_conn *conn, uint16_t elapsed_event, uint8_t *error_code)
631 {
632 	int loc_ret;
633 	int rem_ret;
634 
635 	loc_ret = prt_elapse(&conn->llcp.local.prt_expire, elapsed_event);
636 	if (loc_ret == -ETIMEDOUT) {
637 		/* Local Request Machine timed out */
638 
639 		struct proc_ctx *ctx;
640 
641 		ctx = llcp_lr_peek(conn);
642 		LL_ASSERT(ctx);
643 
644 		if (ctx->proc == PROC_TERMINATE) {
645 			/* Active procedure is ACL Termination */
646 			*error_code = ctx->data.term.error_code;
647 		} else {
648 			*error_code = BT_HCI_ERR_LL_RESP_TIMEOUT;
649 		}
650 
651 		return -ETIMEDOUT;
652 	}
653 
654 	rem_ret = prt_elapse(&conn->llcp.remote.prt_expire, elapsed_event);
655 	if (rem_ret == -ETIMEDOUT) {
656 		/* Remote Request Machine timed out */
657 
658 		*error_code = BT_HCI_ERR_LL_RESP_TIMEOUT;
659 		return -ETIMEDOUT;
660 	}
661 
662 	/* Both timers are still running */
663 	*error_code = BT_HCI_ERR_SUCCESS;
664 	return 0;
665 }
666 
ull_cp_prt_reload_set(struct ll_conn * conn,uint32_t conn_intv_us)667 void ull_cp_prt_reload_set(struct ll_conn *conn, uint32_t conn_intv_us)
668 {
669 	/* Convert 40s Procedure Response Timeout into events */
670 	conn->llcp.prt_reload = RADIO_CONN_EVENTS((40U * 1000U * 1000U), conn_intv_us);
671 }
672 
ull_cp_run(struct ll_conn * conn)673 void ull_cp_run(struct ll_conn *conn)
674 {
675 	llcp_rr_run(conn);
676 	llcp_lr_run(conn);
677 }
678 
ull_cp_state_set(struct ll_conn * conn,uint8_t state)679 void ull_cp_state_set(struct ll_conn *conn, uint8_t state)
680 {
681 	switch (state) {
682 	case ULL_CP_CONNECTED:
683 		llcp_rr_connect(conn);
684 		llcp_lr_connect(conn);
685 		break;
686 	case ULL_CP_DISCONNECTED:
687 		llcp_rr_disconnect(conn);
688 		llcp_lr_disconnect(conn);
689 		break;
690 	default:
691 		break;
692 	}
693 }
694 
ull_cp_release_nodes(struct ll_conn * conn)695 void ull_cp_release_nodes(struct ll_conn *conn)
696 {
697 	struct node_rx_pdu *rx;
698 	struct node_tx *tx;
699 
700 	/* release any llcp retained rx nodes */
701 	rx = conn->llcp.rx_node_release;
702 	while (rx) {
703 		struct node_rx_hdr *hdr;
704 
705 		/* traverse to next rx node */
706 		hdr = &rx->hdr;
707 		rx = hdr->link->mem;
708 
709 		/* Mark for buffer for release */
710 		hdr->type = NODE_RX_TYPE_RELEASE;
711 
712 		/* enqueue rx node towards Thread */
713 		ll_rx_put(hdr->link, hdr);
714 	}
715 	conn->llcp.rx_node_release = NULL;
716 
717 	/* release any llcp pre-allocated tx nodes */
718 	tx = conn->llcp.tx_node_release;
719 	while (tx) {
720 		struct node_tx *tx_release;
721 
722 		tx_release = tx;
723 		tx = tx->next;
724 
725 		ull_cp_release_tx(conn, tx_release);
726 	}
727 	conn->llcp.tx_node_release = NULL;
728 }
729 
730 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
ull_cp_min_used_chans(struct ll_conn * conn,uint8_t phys,uint8_t min_used_chans)731 uint8_t ull_cp_min_used_chans(struct ll_conn *conn, uint8_t phys, uint8_t min_used_chans)
732 {
733 	struct proc_ctx *ctx;
734 
735 	if (conn->lll.role != BT_HCI_ROLE_PERIPHERAL) {
736 		return BT_HCI_ERR_CMD_DISALLOWED;
737 	}
738 
739 	ctx = llcp_create_local_procedure(PROC_MIN_USED_CHANS);
740 	if (!ctx) {
741 		return BT_HCI_ERR_CMD_DISALLOWED;
742 	}
743 
744 	ctx->data.muc.phys = phys;
745 	ctx->data.muc.min_used_chans = min_used_chans;
746 
747 	llcp_lr_enqueue(conn, ctx);
748 
749 	return BT_HCI_ERR_SUCCESS;
750 }
751 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */
752 
753 #if defined(CONFIG_BT_CTLR_LE_PING)
ull_cp_le_ping(struct ll_conn * conn)754 uint8_t ull_cp_le_ping(struct ll_conn *conn)
755 {
756 	struct proc_ctx *ctx;
757 
758 	ctx = llcp_create_local_procedure(PROC_LE_PING);
759 	if (!ctx) {
760 		return BT_HCI_ERR_CMD_DISALLOWED;
761 	}
762 
763 	llcp_lr_enqueue(conn, ctx);
764 
765 	return BT_HCI_ERR_SUCCESS;
766 }
767 #endif /* CONFIG_BT_CTLR_LE_PING */
768 
769 #if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
ull_cp_feature_exchange(struct ll_conn * conn,uint8_t host_initiated)770 uint8_t ull_cp_feature_exchange(struct ll_conn *conn, uint8_t host_initiated)
771 {
772 	struct proc_ctx *ctx;
773 
774 	ctx = llcp_create_local_procedure(PROC_FEATURE_EXCHANGE);
775 	if (!ctx) {
776 		return BT_HCI_ERR_CMD_DISALLOWED;
777 	}
778 
779 	ctx->data.fex.host_initiated = host_initiated;
780 
781 	llcp_lr_enqueue(conn, ctx);
782 
783 	return BT_HCI_ERR_SUCCESS;
784 }
785 #endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
786 
ull_cp_version_exchange(struct ll_conn * conn)787 uint8_t ull_cp_version_exchange(struct ll_conn *conn)
788 {
789 	struct proc_ctx *ctx;
790 
791 	ctx = llcp_create_local_procedure(PROC_VERSION_EXCHANGE);
792 	if (!ctx) {
793 		return BT_HCI_ERR_CMD_DISALLOWED;
794 	}
795 
796 	llcp_lr_enqueue(conn, ctx);
797 
798 	return BT_HCI_ERR_SUCCESS;
799 }
800 
801 #if defined(CONFIG_BT_CTLR_LE_ENC)
802 #if defined(CONFIG_BT_CENTRAL)
ull_cp_encryption_start(struct ll_conn * conn,const uint8_t rand[8],const uint8_t ediv[2],const uint8_t ltk[16])803 uint8_t ull_cp_encryption_start(struct ll_conn *conn, const uint8_t rand[8], const uint8_t ediv[2],
804 				const uint8_t ltk[16])
805 {
806 	struct proc_ctx *ctx;
807 
808 	if (conn->lll.role != BT_HCI_ROLE_CENTRAL) {
809 		return BT_HCI_ERR_CMD_DISALLOWED;
810 	}
811 
812 	ctx = llcp_create_local_procedure(PROC_ENCRYPTION_START);
813 	if (!ctx) {
814 		return BT_HCI_ERR_CMD_DISALLOWED;
815 	}
816 
817 	/* Copy input parameters */
818 	memcpy(ctx->data.enc.rand, rand, sizeof(ctx->data.enc.rand));
819 	ctx->data.enc.ediv[0] = ediv[0];
820 	ctx->data.enc.ediv[1] = ediv[1];
821 	memcpy(ctx->data.enc.ltk, ltk, sizeof(ctx->data.enc.ltk));
822 
823 	/* Enqueue request */
824 	llcp_lr_enqueue(conn, ctx);
825 
826 	return BT_HCI_ERR_SUCCESS;
827 }
828 
ull_cp_encryption_pause(struct ll_conn * conn,const uint8_t rand[8],const uint8_t ediv[2],const uint8_t ltk[16])829 uint8_t ull_cp_encryption_pause(struct ll_conn *conn, const uint8_t rand[8], const uint8_t ediv[2],
830 				const uint8_t ltk[16])
831 {
832 	struct proc_ctx *ctx;
833 
834 	if (conn->lll.role != BT_HCI_ROLE_CENTRAL) {
835 		return BT_HCI_ERR_CMD_DISALLOWED;
836 	}
837 
838 	ctx = llcp_create_local_procedure(PROC_ENCRYPTION_PAUSE);
839 	if (!ctx) {
840 		return BT_HCI_ERR_CMD_DISALLOWED;
841 	}
842 
843 	/* Copy input parameters */
844 	memcpy(ctx->data.enc.rand, rand, sizeof(ctx->data.enc.rand));
845 	ctx->data.enc.ediv[0] = ediv[0];
846 	ctx->data.enc.ediv[1] = ediv[1];
847 	memcpy(ctx->data.enc.ltk, ltk, sizeof(ctx->data.enc.ltk));
848 
849 	/* Enqueue request */
850 	llcp_lr_enqueue(conn, ctx);
851 
852 	return BT_HCI_ERR_SUCCESS;
853 }
854 #endif /* CONFIG_BT_CENTRAL */
855 
ull_cp_encryption_paused(struct ll_conn * conn)856 uint8_t ull_cp_encryption_paused(struct ll_conn *conn)
857 {
858 	struct proc_ctx *ctx;
859 
860 	ctx = llcp_rr_peek(conn);
861 	if (ctx && ctx->proc == PROC_ENCRYPTION_PAUSE) {
862 		return 1;
863 	}
864 
865 	ctx = llcp_lr_peek(conn);
866 	if (ctx && ctx->proc == PROC_ENCRYPTION_PAUSE) {
867 		return 1;
868 	}
869 
870 	return 0;
871 }
872 #endif /* CONFIG_BT_CTLR_LE_ENC */
873 
874 #if defined(CONFIG_BT_CTLR_PHY)
ull_cp_phy_update(struct ll_conn * conn,uint8_t tx,uint8_t flags,uint8_t rx,uint8_t host_initiated)875 uint8_t ull_cp_phy_update(struct ll_conn *conn, uint8_t tx, uint8_t flags, uint8_t rx,
876 			  uint8_t host_initiated)
877 {
878 	struct proc_ctx *ctx;
879 
880 	ctx = llcp_create_local_procedure(PROC_PHY_UPDATE);
881 	if (!ctx) {
882 		return BT_HCI_ERR_CMD_DISALLOWED;
883 	}
884 
885 	ctx->data.pu.tx = tx;
886 	ctx->data.pu.flags = flags;
887 	ctx->data.pu.rx = rx;
888 	ctx->data.pu.host_initiated = host_initiated;
889 
890 	llcp_lr_enqueue(conn, ctx);
891 
892 	return BT_HCI_ERR_SUCCESS;
893 }
894 #endif /* CONFIG_BT_CTLR_PHY */
895 
ull_cp_terminate(struct ll_conn * conn,uint8_t error_code)896 uint8_t ull_cp_terminate(struct ll_conn *conn, uint8_t error_code)
897 {
898 	struct proc_ctx *ctx;
899 
900 	llcp_lr_terminate(conn);
901 	llcp_rr_terminate(conn);
902 
903 	ctx = llcp_create_local_procedure(PROC_TERMINATE);
904 	if (!ctx) {
905 		return BT_HCI_ERR_CMD_DISALLOWED;
906 	}
907 
908 	ctx->data.term.error_code = error_code;
909 
910 	llcp_lr_enqueue(conn, ctx);
911 
912 	return BT_HCI_ERR_SUCCESS;
913 }
914 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
ull_cp_cis_terminate(struct ll_conn * conn,struct ll_conn_iso_stream * cis,uint8_t error_code)915 uint8_t ull_cp_cis_terminate(struct ll_conn *conn,
916 			     struct ll_conn_iso_stream *cis,
917 			     uint8_t error_code)
918 {
919 	struct proc_ctx *ctx;
920 
921 	if (conn->lll.handle != cis->lll.acl_handle) {
922 		return BT_HCI_ERR_CMD_DISALLOWED;
923 	}
924 
925 	ctx = llcp_create_local_procedure(PROC_CIS_TERMINATE);
926 	if (!ctx) {
927 		return BT_HCI_ERR_CMD_DISALLOWED;
928 	}
929 
930 	ctx->data.cis_term.cig_id = cis->group->cig_id;
931 	ctx->data.cis_term.cis_id = cis->cis_id;
932 	ctx->data.cis_term.error_code = error_code;
933 
934 	llcp_lr_enqueue(conn, ctx);
935 
936 	return BT_HCI_ERR_SUCCESS;
937 }
938 #endif /* defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) */
939 
940 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
ull_cp_cis_create(struct ll_conn * conn,struct ll_conn_iso_stream * cis)941 uint8_t ull_cp_cis_create(struct ll_conn *conn, struct ll_conn_iso_stream *cis)
942 {
943 	struct ll_conn_iso_group *cig;
944 	struct proc_ctx *ctx;
945 
946 	if (!conn->llcp.fex.valid) {
947 		/* No feature exchange was performed so initiate before CIS Create */
948 		if (ull_cp_feature_exchange(conn, 0U) != BT_HCI_ERR_SUCCESS) {
949 			return BT_HCI_ERR_CMD_DISALLOWED;
950 		}
951 	}
952 
953 	ctx = llcp_create_local_procedure(PROC_CIS_CREATE);
954 	if (!ctx) {
955 		return BT_HCI_ERR_CMD_DISALLOWED;
956 	}
957 
958 	cig = cis->group;
959 	ctx->data.cis_create.cis_handle = cis->lll.handle;
960 
961 	ctx->data.cis_create.cig_id = cis->group->cig_id;
962 	ctx->data.cis_create.cis_id = cis->cis_id;
963 	ctx->data.cis_create.c_phy = cis->lll.tx.phy;
964 	ctx->data.cis_create.p_phy = cis->lll.rx.phy;
965 	ctx->data.cis_create.c_sdu_interval = cig->c_sdu_interval;
966 	ctx->data.cis_create.p_sdu_interval = cig->p_sdu_interval;
967 	ctx->data.cis_create.c_max_pdu = cis->lll.tx.max_pdu;
968 	ctx->data.cis_create.p_max_pdu = cis->lll.rx.max_pdu;
969 	ctx->data.cis_create.c_max_sdu = cis->c_max_sdu;
970 	ctx->data.cis_create.p_max_sdu = cis->p_max_sdu;
971 	ctx->data.cis_create.iso_interval = cig->iso_interval;
972 	ctx->data.cis_create.framed = cis->framed;
973 	ctx->data.cis_create.nse = cis->lll.nse;
974 	ctx->data.cis_create.sub_interval = cis->lll.sub_interval;
975 	ctx->data.cis_create.c_bn = cis->lll.tx.bn;
976 	ctx->data.cis_create.p_bn = cis->lll.rx.bn;
977 	ctx->data.cis_create.c_ft = cis->lll.tx.ft;
978 	ctx->data.cis_create.p_ft = cis->lll.rx.ft;
979 
980 	/* ctx->data.cis_create.conn_event_count will be filled when Tx PDU is
981 	 * enqueued.
982 	 */
983 
984 	llcp_lr_enqueue(conn, ctx);
985 
986 	return BT_HCI_ERR_SUCCESS;
987 }
988 #endif /* defined(CONFIG_BT_CTLR_CENTRAL_ISO) */
989 
990 #if defined(CONFIG_BT_CENTRAL)
ull_cp_chan_map_update(struct ll_conn * conn,const uint8_t chm[5])991 uint8_t ull_cp_chan_map_update(struct ll_conn *conn, const uint8_t chm[5])
992 {
993 	struct proc_ctx *ctx;
994 
995 	if (conn->lll.role != BT_HCI_ROLE_CENTRAL) {
996 		return BT_HCI_ERR_CMD_DISALLOWED;
997 	}
998 
999 	ctx = llcp_create_local_procedure(PROC_CHAN_MAP_UPDATE);
1000 	if (!ctx) {
1001 		return BT_HCI_ERR_CMD_DISALLOWED;
1002 	}
1003 
1004 	memcpy(ctx->data.chmu.chm, chm, sizeof(ctx->data.chmu.chm));
1005 
1006 	llcp_lr_enqueue(conn, ctx);
1007 
1008 	return BT_HCI_ERR_SUCCESS;
1009 }
1010 #endif /* CONFIG_BT_CENTRAL */
1011 
ull_cp_chan_map_update_pending(struct ll_conn * conn)1012 const uint8_t *ull_cp_chan_map_update_pending(struct ll_conn *conn)
1013 {
1014 	struct proc_ctx *ctx;
1015 
1016 	if (conn->lll.role == BT_HCI_ROLE_CENTRAL) {
1017 		ctx = llcp_lr_peek(conn);
1018 	} else {
1019 		ctx = llcp_rr_peek(conn);
1020 	}
1021 
1022 	if (ctx && ctx->proc == PROC_CHAN_MAP_UPDATE) {
1023 		return ctx->data.chmu.chm;
1024 	}
1025 
1026 	return NULL;
1027 }
1028 
1029 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
ull_cp_data_length_update(struct ll_conn * conn,uint16_t max_tx_octets,uint16_t max_tx_time)1030 uint8_t ull_cp_data_length_update(struct ll_conn *conn, uint16_t max_tx_octets,
1031 				  uint16_t max_tx_time)
1032 {
1033 	struct proc_ctx *ctx;
1034 
1035 	ctx = llcp_create_local_procedure(PROC_DATA_LENGTH_UPDATE);
1036 
1037 	if (!ctx) {
1038 		return BT_HCI_ERR_CMD_DISALLOWED;
1039 	}
1040 
1041 	/* Apply update to local */
1042 	ull_dle_local_tx_update(conn, max_tx_octets, max_tx_time);
1043 
1044 	llcp_lr_enqueue(conn, ctx);
1045 
1046 	return BT_HCI_ERR_SUCCESS;
1047 }
1048 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1049 
1050 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
ull_cp_req_peer_sca(struct ll_conn * conn)1051 uint8_t ull_cp_req_peer_sca(struct ll_conn *conn)
1052 {
1053 	struct proc_ctx *ctx;
1054 
1055 	if (!feature_sca(conn)) {
1056 		return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
1057 	}
1058 
1059 	ctx = llcp_create_local_procedure(PROC_SCA_UPDATE);
1060 
1061 	if (!ctx) {
1062 		return BT_HCI_ERR_CMD_DISALLOWED;
1063 	}
1064 
1065 	llcp_lr_enqueue(conn, ctx);
1066 
1067 	return BT_HCI_ERR_SUCCESS;
1068 }
1069 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
1070 
1071 #if defined(CONFIG_BT_CTLR_LE_ENC)
ull_cp_ltk_req_reply(struct ll_conn * conn,const uint8_t ltk[16])1072 uint8_t ull_cp_ltk_req_reply(struct ll_conn *conn, const uint8_t ltk[16])
1073 {
1074 	struct proc_ctx *ctx;
1075 
1076 	ctx = llcp_rr_peek(conn);
1077 	if (ctx && (ctx->proc == PROC_ENCRYPTION_START || ctx->proc == PROC_ENCRYPTION_PAUSE) &&
1078 	    llcp_rp_enc_ltk_req_reply_allowed(conn, ctx)) {
1079 		memcpy(ctx->data.enc.ltk, ltk, sizeof(ctx->data.enc.ltk));
1080 		llcp_rp_enc_ltk_req_reply(conn, ctx);
1081 		return BT_HCI_ERR_SUCCESS;
1082 	}
1083 	return BT_HCI_ERR_CMD_DISALLOWED;
1084 }
1085 
ull_cp_ltk_req_neq_reply(struct ll_conn * conn)1086 uint8_t ull_cp_ltk_req_neq_reply(struct ll_conn *conn)
1087 {
1088 	struct proc_ctx *ctx;
1089 
1090 	ctx = llcp_rr_peek(conn);
1091 	if (ctx && (ctx->proc == PROC_ENCRYPTION_START || ctx->proc == PROC_ENCRYPTION_PAUSE) &&
1092 	    llcp_rp_enc_ltk_req_reply_allowed(conn, ctx)) {
1093 		llcp_rp_enc_ltk_req_neg_reply(conn, ctx);
1094 		return BT_HCI_ERR_SUCCESS;
1095 	}
1096 	return BT_HCI_ERR_CMD_DISALLOWED;
1097 }
1098 #endif /* CONFIG_BT_CTLR_LE_ENC */
1099 
ull_cp_conn_update(struct ll_conn * conn,uint16_t interval_min,uint16_t interval_max,uint16_t latency,uint16_t timeout,uint16_t * offsets)1100 uint8_t ull_cp_conn_update(struct ll_conn *conn, uint16_t interval_min, uint16_t interval_max,
1101 			   uint16_t latency, uint16_t timeout, uint16_t *offsets)
1102 {
1103 	struct proc_ctx *ctx;
1104 
1105 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1106 	if (feature_conn_param_req(conn)) {
1107 		ctx = llcp_create_local_procedure(PROC_CONN_PARAM_REQ);
1108 	} else if (conn->lll.role == BT_HCI_ROLE_CENTRAL) {
1109 		ctx = llcp_create_local_procedure(PROC_CONN_UPDATE);
1110 	} else {
1111 		return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
1112 	}
1113 #else /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
1114 	if (conn->lll.role == BT_HCI_ROLE_PERIPHERAL) {
1115 		return BT_HCI_ERR_CMD_DISALLOWED;
1116 	}
1117 	ctx = llcp_create_local_procedure(PROC_CONN_UPDATE);
1118 #endif /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
1119 
1120 	if (!ctx) {
1121 		return BT_HCI_ERR_CMD_DISALLOWED;
1122 	}
1123 
1124 	/* Store arguments in corresponding procedure context */
1125 	if (ctx->proc == PROC_CONN_UPDATE) {
1126 		ctx->data.cu.interval_max = interval_max;
1127 		ctx->data.cu.latency = latency;
1128 		ctx->data.cu.timeout = timeout;
1129 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1130 	} else if (ctx->proc == PROC_CONN_PARAM_REQ) {
1131 		ctx->data.cu.interval_min = interval_min;
1132 		ctx->data.cu.interval_max = interval_max;
1133 		ctx->data.cu.latency = latency;
1134 		ctx->data.cu.timeout = timeout;
1135 		ctx->data.cu.offsets[0] = offsets ? offsets[0] : 0x0000;
1136 		ctx->data.cu.offsets[1] = offsets ? offsets[1] : 0xffff;
1137 		ctx->data.cu.offsets[2] = offsets ? offsets[2] : 0xffff;
1138 		ctx->data.cu.offsets[3] = offsets ? offsets[3] : 0xffff;
1139 		ctx->data.cu.offsets[4] = offsets ? offsets[4] : 0xffff;
1140 		ctx->data.cu.offsets[5] = offsets ? offsets[5] : 0xffff;
1141 
1142 		if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
1143 		    (conn->lll.role == BT_HCI_ROLE_PERIPHERAL)) {
1144 			uint16_t handle = ll_conn_handle_get(conn);
1145 
1146 			ull_periph_latency_cancel(conn, handle);
1147 		}
1148 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1149 	} else {
1150 		LL_ASSERT(0); /* Unknown procedure */
1151 	}
1152 
1153 	llcp_lr_enqueue(conn, ctx);
1154 
1155 	return BT_HCI_ERR_SUCCESS;
1156 }
1157 
1158 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
ull_cp_remote_dle_pending(struct ll_conn * conn)1159 uint8_t ull_cp_remote_dle_pending(struct ll_conn *conn)
1160 {
1161 	struct proc_ctx *ctx;
1162 
1163 	ctx = llcp_rr_peek(conn);
1164 
1165 	return (ctx && ctx->proc == PROC_DATA_LENGTH_UPDATE);
1166 }
1167 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1168 
1169 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
ull_cp_conn_param_req_reply(struct ll_conn * conn)1170 void ull_cp_conn_param_req_reply(struct ll_conn *conn)
1171 {
1172 	struct proc_ctx *ctx;
1173 
1174 	ctx = llcp_rr_peek(conn);
1175 	if (ctx && ctx->proc == PROC_CONN_PARAM_REQ) {
1176 		llcp_rp_conn_param_req_reply(conn, ctx);
1177 	}
1178 }
1179 
ull_cp_conn_param_req_neg_reply(struct ll_conn * conn,uint8_t error_code)1180 void ull_cp_conn_param_req_neg_reply(struct ll_conn *conn, uint8_t error_code)
1181 {
1182 	struct proc_ctx *ctx;
1183 
1184 	ctx = llcp_rr_peek(conn);
1185 	if (ctx && ctx->proc == PROC_CONN_PARAM_REQ) {
1186 		ctx->data.cu.error = error_code;
1187 		llcp_rp_conn_param_req_neg_reply(conn, ctx);
1188 	}
1189 }
1190 
ull_cp_remote_cpr_pending(struct ll_conn * conn)1191 uint8_t ull_cp_remote_cpr_pending(struct ll_conn *conn)
1192 {
1193 	struct proc_ctx *ctx;
1194 
1195 	ctx = llcp_rr_peek(conn);
1196 
1197 	return (ctx && ctx->proc == PROC_CONN_PARAM_REQ);
1198 }
1199 
1200 #if defined(CONFIG_BT_CTLR_USER_CPR_ANCHOR_POINT_MOVE)
ull_cp_remote_cpr_apm_awaiting_reply(struct ll_conn * conn)1201 bool ull_cp_remote_cpr_apm_awaiting_reply(struct ll_conn *conn)
1202 {
1203 	struct proc_ctx *ctx;
1204 
1205 	ctx = llcp_rr_peek(conn);
1206 
1207 	if (ctx && ctx->proc == PROC_CONN_PARAM_REQ) {
1208 		return llcp_rp_conn_param_req_apm_awaiting_reply(ctx);
1209 	}
1210 
1211 	return false;
1212 }
1213 
ull_cp_remote_cpr_apm_reply(struct ll_conn * conn,uint16_t * offsets)1214 void ull_cp_remote_cpr_apm_reply(struct ll_conn *conn, uint16_t *offsets)
1215 {
1216 	struct proc_ctx *ctx;
1217 
1218 	ctx = llcp_rr_peek(conn);
1219 
1220 	if (ctx && ctx->proc == PROC_CONN_PARAM_REQ) {
1221 		ctx->data.cu.offsets[0] = offsets[0];
1222 		ctx->data.cu.offsets[1] = offsets[1];
1223 		ctx->data.cu.offsets[2] = offsets[2];
1224 		ctx->data.cu.offsets[3] = offsets[3];
1225 		ctx->data.cu.offsets[4] = offsets[4];
1226 		ctx->data.cu.offsets[5] = offsets[5];
1227 		ctx->data.cu.error = 0U;
1228 		llcp_rp_conn_param_req_apm_reply(conn, ctx);
1229 	}
1230 }
1231 
ull_cp_remote_cpr_apm_neg_reply(struct ll_conn * conn,uint8_t error_code)1232 void ull_cp_remote_cpr_apm_neg_reply(struct ll_conn *conn, uint8_t error_code)
1233 {
1234 	struct proc_ctx *ctx;
1235 
1236 	ctx = llcp_rr_peek(conn);
1237 
1238 	if (ctx && ctx->proc == PROC_CONN_PARAM_REQ) {
1239 		ctx->data.cu.error = error_code;
1240 		llcp_rp_conn_param_req_apm_reply(conn, ctx);
1241 	}
1242 }
1243 #endif /* CONFIG_BT_CTLR_USER_CPR_ANCHOR_POINT_MOVE */
1244 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1245 
1246 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
ull_cp_cte_rsp_enable(struct ll_conn * conn,bool enable,uint8_t max_cte_len,uint8_t cte_types)1247 void ull_cp_cte_rsp_enable(struct ll_conn *conn, bool enable, uint8_t max_cte_len,
1248 			   uint8_t cte_types)
1249 {
1250 	conn->llcp.cte_rsp.is_enabled = enable;
1251 
1252 	if (enable) {
1253 		conn->llcp.cte_rsp.max_cte_len = max_cte_len;
1254 		conn->llcp.cte_rsp.cte_types = cte_types;
1255 	}
1256 }
1257 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
1258 
1259 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
ull_cp_cte_req(struct ll_conn * conn,uint8_t min_cte_len,uint8_t cte_type)1260 uint8_t ull_cp_cte_req(struct ll_conn *conn, uint8_t min_cte_len, uint8_t cte_type)
1261 {
1262 	struct proc_ctx *ctx;
1263 
1264 	/* If Controller gained, awareness:
1265 	 * - by Feature Exchange control procedure that peer device does not support CTE response,
1266 	 * - by reception LL_UNKNOWN_RSP with unknown type LL_CTE_REQ that peer device does not
1267 	 *   recognize CTE request,
1268 	 * then response to Host that CTE request enable command is not possible due to unsupported
1269 	 * remote feature.
1270 	 */
1271 	if ((conn->llcp.fex.valid &&
1272 	     (!(conn->llcp.fex.features_peer & BIT64(BT_LE_FEAT_BIT_CONN_CTE_RESP)))) ||
1273 	    (!conn->llcp.fex.valid && !feature_cte_req(conn))) {
1274 		return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
1275 	}
1276 
1277 	/* The request may be started by periodic CTE request procedure, so it skips earlier
1278 	 * verification of PHY. In case the PHY has changed to CODE the request should be stopped.
1279 	 */
1280 #if defined(CONFIG_BT_CTLR_PHY)
1281 	if (conn->lll.phy_rx != PHY_CODED) {
1282 #else
1283 	if (1) {
1284 #endif /* CONFIG_BT_CTLR_PHY */
1285 		ctx = llcp_create_local_procedure(PROC_CTE_REQ);
1286 		if (!ctx) {
1287 			return BT_HCI_ERR_CMD_DISALLOWED;
1288 		}
1289 
1290 		ctx->data.cte_req.min_len = min_cte_len;
1291 		ctx->data.cte_req.type = cte_type;
1292 
1293 		llcp_lr_enqueue(conn, ctx);
1294 
1295 		return BT_HCI_ERR_SUCCESS;
1296 	}
1297 
1298 	return BT_HCI_ERR_CMD_DISALLOWED;
1299 }
1300 
1301 void ull_cp_cte_req_set_disable(struct ll_conn *conn)
1302 {
1303 	conn->llcp.cte_req.is_enabled = 0U;
1304 	conn->llcp.cte_req.req_interval = 0U;
1305 }
1306 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
1307 
1308 void ull_cp_cc_offset_calc_reply(struct ll_conn *conn, uint32_t cis_offset_min,
1309 				 uint32_t cis_offset_max)
1310 {
1311 	struct proc_ctx *ctx;
1312 
1313 	ctx = llcp_lr_peek(conn);
1314 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1315 		ctx->data.cis_create.cis_offset_min = cis_offset_min;
1316 		ctx->data.cis_create.cis_offset_max = cis_offset_max;
1317 
1318 		llcp_lp_cc_offset_calc_reply(conn, ctx);
1319 	}
1320 }
1321 
1322 #if defined(CONFIG_BT_PERIPHERAL) && defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1323 bool ull_cp_cc_awaiting_reply(struct ll_conn *conn)
1324 {
1325 	struct proc_ctx *ctx;
1326 
1327 	ctx = llcp_rr_peek(conn);
1328 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1329 		return llcp_rp_cc_awaiting_reply(ctx);
1330 	}
1331 
1332 	return false;
1333 }
1334 
1335 uint16_t ull_cp_cc_ongoing_handle(struct ll_conn *conn)
1336 {
1337 	struct proc_ctx *ctx;
1338 
1339 	ctx = llcp_rr_peek(conn);
1340 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1341 		return ctx->data.cis_create.cis_handle;
1342 	}
1343 
1344 	return 0xFFFF;
1345 }
1346 
1347 void ull_cp_cc_accept(struct ll_conn *conn, uint32_t cis_offset_min)
1348 {
1349 	struct proc_ctx *ctx;
1350 
1351 	ctx = llcp_rr_peek(conn);
1352 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1353 		if (cis_offset_min > ctx->data.cis_create.cis_offset_min) {
1354 			if (cis_offset_min > ctx->data.cis_create.cis_offset_max) {
1355 				ctx->data.cis_create.error = BT_HCI_ERR_UNSUPP_LL_PARAM_VAL;
1356 				llcp_rp_cc_reject(conn, ctx);
1357 
1358 				return;
1359 			}
1360 
1361 			ctx->data.cis_create.cis_offset_min = cis_offset_min;
1362 		}
1363 
1364 		llcp_rp_cc_accept(conn, ctx);
1365 	}
1366 }
1367 
1368 void ull_cp_cc_reject(struct ll_conn *conn, uint8_t error_code)
1369 {
1370 	struct proc_ctx *ctx;
1371 
1372 	ctx = llcp_rr_peek(conn);
1373 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1374 		ctx->data.cis_create.error = error_code;
1375 		llcp_rp_cc_reject(conn, ctx);
1376 	}
1377 }
1378 #endif /* CONFIG_BT_PERIPHERAL && CONFIG_BT_CTLR_PERIPHERAL_ISO */
1379 
1380 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1381 bool ull_cp_cc_awaiting_established(struct ll_conn *conn)
1382 {
1383 	struct proc_ctx *ctx;
1384 
1385 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1386 	ctx = llcp_rr_peek(conn);
1387 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1388 		return llcp_rp_cc_awaiting_established(ctx);
1389 	}
1390 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
1391 
1392 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1393 	ctx = llcp_lr_peek(conn);
1394 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1395 		return llcp_lp_cc_awaiting_established(ctx);
1396 	}
1397 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
1398 	return false;
1399 }
1400 
1401 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1402 bool ull_cp_cc_cancel(struct ll_conn *conn)
1403 {
1404 	struct proc_ctx *ctx;
1405 
1406 	ctx = llcp_lr_peek(conn);
1407 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1408 		return llcp_lp_cc_cancel(conn, ctx);
1409 	}
1410 
1411 	return false;
1412 }
1413 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
1414 
1415 void ull_cp_cc_established(struct ll_conn *conn, uint8_t error_code)
1416 {
1417 	struct proc_ctx *ctx;
1418 
1419 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1420 	ctx = llcp_rr_peek(conn);
1421 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1422 		ctx->data.cis_create.error = error_code;
1423 		llcp_rp_cc_established(conn, ctx);
1424 		llcp_rr_check_done(conn, ctx);
1425 	}
1426 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
1427 
1428 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1429 	ctx = llcp_lr_peek(conn);
1430 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1431 		ctx->data.cis_create.error = error_code;
1432 		llcp_lp_cc_established(conn, ctx);
1433 		llcp_lr_check_done(conn, ctx);
1434 	}
1435 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
1436 }
1437 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO */
1438 
1439 #if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1440 bool ull_lp_cc_is_active(struct ll_conn *conn)
1441 {
1442 	struct proc_ctx *ctx;
1443 
1444 	ctx = llcp_lr_peek(conn);
1445 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1446 		return llcp_lp_cc_is_active(ctx);
1447 	}
1448 	return false;
1449 }
1450 
1451 bool ull_lp_cc_is_enqueued(struct ll_conn *conn)
1452 {
1453 	struct proc_ctx *ctx;
1454 
1455 	ctx = llcp_lr_peek_proc(conn, PROC_CIS_CREATE);
1456 
1457 	return (ctx != NULL);
1458 }
1459 #endif /* defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_CTLR_CENTRAL_ISO) */
1460 
1461 static bool pdu_is_expected(struct pdu_data *pdu, struct proc_ctx *ctx)
1462 {
1463 	return (ctx->rx_opcode == pdu->llctrl.opcode || ctx->rx_greedy);
1464 }
1465 
1466 static bool pdu_is_unknown(struct pdu_data *pdu, struct proc_ctx *ctx)
1467 {
1468 	return ((pdu->llctrl.opcode == PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP) &&
1469 		(ctx->tx_opcode == pdu->llctrl.unknown_rsp.type));
1470 }
1471 
1472 static bool pdu_is_reject(struct pdu_data *pdu, struct proc_ctx *ctx)
1473 {
1474 	/* For LL_REJECT_IND there is no simple way of confirming protocol validity of the PDU
1475 	 * for the given procedure, so simply pass it on and let procedure engine deal with it
1476 	 */
1477 	return (pdu->llctrl.opcode == PDU_DATA_LLCTRL_TYPE_REJECT_IND);
1478 }
1479 
1480 static bool pdu_is_reject_ext(struct pdu_data *pdu, struct proc_ctx *ctx)
1481 {
1482 	return ((pdu->llctrl.opcode == PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND) &&
1483 		(ctx->tx_opcode == pdu->llctrl.reject_ext_ind.reject_opcode));
1484 }
1485 
1486 static bool pdu_is_any_reject(struct pdu_data *pdu, struct proc_ctx *ctx)
1487 {
1488 	return (pdu_is_reject_ext(pdu, ctx) || pdu_is_reject(pdu, ctx));
1489 }
1490 
1491 static bool pdu_is_terminate(struct pdu_data *pdu)
1492 {
1493 	return pdu->llctrl.opcode == PDU_DATA_LLCTRL_TYPE_TERMINATE_IND;
1494 }
1495 
1496 #define VALIDATE_PDU_LEN(pdu, type) (pdu->len == PDU_DATA_LLCTRL_LEN(type))
1497 
1498 #if defined(CONFIG_BT_PERIPHERAL)
1499 static bool pdu_validate_conn_update_ind(struct pdu_data *pdu)
1500 {
1501 	return VALIDATE_PDU_LEN(pdu, conn_update_ind);
1502 }
1503 
1504 static bool pdu_validate_chan_map_ind(struct pdu_data *pdu)
1505 {
1506 	return VALIDATE_PDU_LEN(pdu, chan_map_ind);
1507 }
1508 #endif /* CONFIG_BT_PERIPHERAL */
1509 
1510 static bool pdu_validate_terminate_ind(struct pdu_data *pdu)
1511 {
1512 	return VALIDATE_PDU_LEN(pdu, terminate_ind);
1513 }
1514 
1515 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_PERIPHERAL)
1516 static bool pdu_validate_enc_req(struct pdu_data *pdu)
1517 {
1518 	return VALIDATE_PDU_LEN(pdu, enc_req);
1519 }
1520 #endif /* CONFIG_BT_CTLR_LE_ENC && CONFIG_BT_PERIPHERAL */
1521 
1522 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_CENTRAL)
1523 static bool pdu_validate_enc_rsp(struct pdu_data *pdu)
1524 {
1525 	return VALIDATE_PDU_LEN(pdu, enc_rsp);
1526 }
1527 
1528 static bool pdu_validate_start_enc_req(struct pdu_data *pdu)
1529 {
1530 	return VALIDATE_PDU_LEN(pdu, start_enc_req);
1531 }
1532 #endif /* CONFIG_BT_CTLR_LE_ENC && CONFIG_BT_CENTRAL */
1533 
1534 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_PERIPHERAL)
1535 static bool pdu_validate_start_enc_rsp(struct pdu_data *pdu)
1536 {
1537 	return VALIDATE_PDU_LEN(pdu, start_enc_rsp);
1538 }
1539 #endif
1540 
1541 static bool pdu_validate_unknown_rsp(struct pdu_data *pdu)
1542 {
1543 	return VALIDATE_PDU_LEN(pdu, unknown_rsp);
1544 }
1545 
1546 #if defined(CONFIG_BT_PERIPHERAL)
1547 static bool pdu_validate_feature_req(struct pdu_data *pdu)
1548 {
1549 	return VALIDATE_PDU_LEN(pdu, feature_req);
1550 }
1551 #endif
1552 
1553 #if defined(CONFIG_BT_CENTRAL)
1554 static bool pdu_validate_feature_rsp(struct pdu_data *pdu)
1555 {
1556 	return VALIDATE_PDU_LEN(pdu, feature_rsp);
1557 }
1558 #endif
1559 
1560 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_PERIPHERAL)
1561 static bool pdu_validate_pause_enc_req(struct pdu_data *pdu)
1562 {
1563 	return VALIDATE_PDU_LEN(pdu, pause_enc_req);
1564 }
1565 #endif /* CONFIG_BT_CTLR_LE_ENC && CONFIG_BT_PERIPHERAL */
1566 
1567 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_CENTRAL)
1568 static bool pdu_validate_pause_enc_rsp(struct pdu_data *pdu)
1569 {
1570 	return VALIDATE_PDU_LEN(pdu, pause_enc_rsp);
1571 }
1572 #endif
1573 
1574 static bool pdu_validate_version_ind(struct pdu_data *pdu)
1575 {
1576 	return VALIDATE_PDU_LEN(pdu, version_ind);
1577 }
1578 
1579 static bool pdu_validate_reject_ind(struct pdu_data *pdu)
1580 {
1581 	return VALIDATE_PDU_LEN(pdu, reject_ind);
1582 }
1583 
1584 #if defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG) && defined(CONFIG_BT_CENTRAL)
1585 static bool pdu_validate_per_init_feat_xchg(struct pdu_data *pdu)
1586 {
1587 	return VALIDATE_PDU_LEN(pdu, per_init_feat_xchg);
1588 }
1589 #endif /* CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG && CONFIG_BT_CENTRAL */
1590 
1591 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1592 static bool pdu_validate_conn_param_req(struct pdu_data *pdu)
1593 {
1594 	return VALIDATE_PDU_LEN(pdu, conn_param_req);
1595 }
1596 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1597 
1598 static bool pdu_validate_conn_param_rsp(struct pdu_data *pdu)
1599 {
1600 	return VALIDATE_PDU_LEN(pdu, conn_param_rsp);
1601 }
1602 
1603 static bool pdu_validate_reject_ext_ind(struct pdu_data *pdu)
1604 {
1605 	return VALIDATE_PDU_LEN(pdu, reject_ext_ind);
1606 }
1607 
1608 #if defined(CONFIG_BT_CTLR_LE_PING)
1609 static bool pdu_validate_ping_req(struct pdu_data *pdu)
1610 {
1611 	return VALIDATE_PDU_LEN(pdu, ping_req);
1612 }
1613 #endif /* CONFIG_BT_CTLR_LE_PING */
1614 
1615 static bool pdu_validate_ping_rsp(struct pdu_data *pdu)
1616 {
1617 	return VALIDATE_PDU_LEN(pdu, ping_rsp);
1618 }
1619 
1620 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1621 static bool pdu_validate_length_req(struct pdu_data *pdu)
1622 {
1623 	return VALIDATE_PDU_LEN(pdu, length_req);
1624 }
1625 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1626 
1627 static bool pdu_validate_length_rsp(struct pdu_data *pdu)
1628 {
1629 	return VALIDATE_PDU_LEN(pdu, length_rsp);
1630 }
1631 
1632 #if defined(CONFIG_BT_CTLR_PHY)
1633 static bool pdu_validate_phy_req(struct pdu_data *pdu)
1634 {
1635 	return VALIDATE_PDU_LEN(pdu, phy_req);
1636 }
1637 #endif /* CONFIG_BT_CTLR_PHY */
1638 
1639 static bool pdu_validate_phy_rsp(struct pdu_data *pdu)
1640 {
1641 	return VALIDATE_PDU_LEN(pdu, phy_rsp);
1642 }
1643 
1644 static bool pdu_validate_phy_upd_ind(struct pdu_data *pdu)
1645 {
1646 	return VALIDATE_PDU_LEN(pdu, phy_upd_ind);
1647 }
1648 
1649 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_CENTRAL)
1650 static bool pdu_validate_min_used_chan_ind(struct pdu_data *pdu)
1651 {
1652 	return VALIDATE_PDU_LEN(pdu, min_used_chans_ind);
1653 }
1654 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_CENTRAL */
1655 
1656 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
1657 static bool pdu_validate_cte_req(struct pdu_data *pdu)
1658 {
1659 	return VALIDATE_PDU_LEN(pdu, cte_req);
1660 }
1661 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
1662 
1663 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
1664 static bool pdu_validate_cte_resp(struct pdu_data *pdu)
1665 {
1666 	return VALIDATE_PDU_LEN(pdu, cte_rsp);
1667 }
1668 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
1669 
1670 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1671 static bool pdu_validate_clock_accuracy_req(struct pdu_data *pdu)
1672 {
1673 	return VALIDATE_PDU_LEN(pdu, clock_accuracy_req);
1674 }
1675 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1676 
1677 static bool pdu_validate_clock_accuracy_rsp(struct pdu_data *pdu)
1678 {
1679 	return VALIDATE_PDU_LEN(pdu, clock_accuracy_rsp);
1680 }
1681 
1682 typedef bool (*pdu_param_validate_t)(struct pdu_data *pdu);
1683 
1684 struct pdu_validate {
1685 	/* TODO can be just size if no other sanity checks here */
1686 	pdu_param_validate_t validate_cb;
1687 };
1688 
1689 static const struct pdu_validate pdu_validate[] = {
1690 #if defined(CONFIG_BT_PERIPHERAL)
1691 	[PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND] = { pdu_validate_conn_update_ind },
1692 	[PDU_DATA_LLCTRL_TYPE_CHAN_MAP_IND] = { pdu_validate_chan_map_ind },
1693 #endif /* CONFIG_BT_PERIPHERAL */
1694 	[PDU_DATA_LLCTRL_TYPE_TERMINATE_IND] = { pdu_validate_terminate_ind },
1695 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_PERIPHERAL)
1696 	[PDU_DATA_LLCTRL_TYPE_ENC_REQ] = { pdu_validate_enc_req },
1697 #endif /* CONFIG_BT_CTLR_LE_ENC && CONFIG_BT_PERIPHERAL */
1698 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_CENTRAL)
1699 	[PDU_DATA_LLCTRL_TYPE_ENC_RSP] = { pdu_validate_enc_rsp },
1700 	[PDU_DATA_LLCTRL_TYPE_START_ENC_REQ] = { pdu_validate_start_enc_req },
1701 #endif
1702 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_PERIPHERAL)
1703 	[PDU_DATA_LLCTRL_TYPE_START_ENC_RSP] = { pdu_validate_start_enc_rsp },
1704 #endif
1705 	[PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP] = { pdu_validate_unknown_rsp },
1706 #if defined(CONFIG_BT_PERIPHERAL)
1707 	[PDU_DATA_LLCTRL_TYPE_FEATURE_REQ] = { pdu_validate_feature_req },
1708 #endif
1709 #if defined(CONFIG_BT_CENTRAL)
1710 	[PDU_DATA_LLCTRL_TYPE_FEATURE_RSP] = { pdu_validate_feature_rsp },
1711 #endif
1712 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_PERIPHERAL)
1713 	[PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_REQ] = { pdu_validate_pause_enc_req },
1714 #endif /* CONFIG_BT_CTLR_LE_ENC && CONFIG_BT_PERIPHERAL */
1715 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_CENTRAL)
1716 	[PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_RSP] = { pdu_validate_pause_enc_rsp },
1717 #endif
1718 	[PDU_DATA_LLCTRL_TYPE_VERSION_IND] = { pdu_validate_version_ind },
1719 	[PDU_DATA_LLCTRL_TYPE_REJECT_IND] = { pdu_validate_reject_ind },
1720 #if defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG) && defined(CONFIG_BT_CENTRAL)
1721 	[PDU_DATA_LLCTRL_TYPE_PER_INIT_FEAT_XCHG] = { pdu_validate_per_init_feat_xchg },
1722 #endif /* CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG && CONFIG_BT_CENTRAL */
1723 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1724 	[PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ] = { pdu_validate_conn_param_req },
1725 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1726 	[PDU_DATA_LLCTRL_TYPE_CONN_PARAM_RSP] = { pdu_validate_conn_param_rsp },
1727 	[PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND] = { pdu_validate_reject_ext_ind },
1728 #if defined(CONFIG_BT_CTLR_LE_PING)
1729 	[PDU_DATA_LLCTRL_TYPE_PING_REQ] = { pdu_validate_ping_req },
1730 #endif /* CONFIG_BT_CTLR_LE_PING */
1731 	[PDU_DATA_LLCTRL_TYPE_PING_RSP] = { pdu_validate_ping_rsp },
1732 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1733 	[PDU_DATA_LLCTRL_TYPE_LENGTH_REQ] = { pdu_validate_length_req },
1734 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1735 	[PDU_DATA_LLCTRL_TYPE_LENGTH_RSP] = { pdu_validate_length_rsp },
1736 #if defined(CONFIG_BT_CTLR_PHY)
1737 	[PDU_DATA_LLCTRL_TYPE_PHY_REQ] = { pdu_validate_phy_req },
1738 #endif /* CONFIG_BT_CTLR_PHY */
1739 	[PDU_DATA_LLCTRL_TYPE_PHY_RSP] = { pdu_validate_phy_rsp },
1740 	[PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND] = { pdu_validate_phy_upd_ind },
1741 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_CENTRAL)
1742 	[PDU_DATA_LLCTRL_TYPE_MIN_USED_CHAN_IND] = { pdu_validate_min_used_chan_ind },
1743 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_CENTRAL */
1744 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
1745 	[PDU_DATA_LLCTRL_TYPE_CTE_REQ] = { pdu_validate_cte_req },
1746 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
1747 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
1748 	[PDU_DATA_LLCTRL_TYPE_CTE_RSP] = { pdu_validate_cte_resp },
1749 #endif /* PDU_DATA_LLCTRL_TYPE_CTE_RSP */
1750 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1751 	[PDU_DATA_LLCTRL_TYPE_CLOCK_ACCURACY_REQ] = { pdu_validate_clock_accuracy_req },
1752 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
1753 	[PDU_DATA_LLCTRL_TYPE_CLOCK_ACCURACY_RSP] = { pdu_validate_clock_accuracy_rsp },
1754 };
1755 
1756 static bool pdu_is_valid(struct pdu_data *pdu)
1757 {
1758 	/* the should be at least 1 byte of data with opcode*/
1759 	if (pdu->len < 1) {
1760 		/* fake opcode */
1761 		pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
1762 		return false;
1763 	}
1764 
1765 	if (pdu->llctrl.opcode < ARRAY_SIZE(pdu_validate)) {
1766 		pdu_param_validate_t cb;
1767 
1768 		cb = pdu_validate[pdu->llctrl.opcode].validate_cb;
1769 		if (cb) {
1770 			return cb(pdu);
1771 		}
1772 	}
1773 
1774 	/* consider unsupported and unknows PDUs as valid */
1775 	return true;
1776 }
1777 
1778 void ull_cp_tx_ack(struct ll_conn *conn, struct node_tx *tx)
1779 {
1780 	struct proc_ctx *ctx;
1781 
1782 	ctx = llcp_lr_peek(conn);
1783 	if (ctx && ctx->node_ref.tx_ack == tx) {
1784 		/* TX ack re. local request */
1785 		llcp_lr_tx_ack(conn, ctx, tx);
1786 	}
1787 
1788 	ctx = llcp_rr_peek(conn);
1789 	if (ctx && ctx->node_ref.tx_ack == tx) {
1790 		/* TX ack re. remote response */
1791 		llcp_rr_tx_ack(conn, ctx, tx);
1792 	}
1793 }
1794 
1795 void ull_cp_tx_ntf(struct ll_conn *conn)
1796 {
1797 	struct proc_ctx *ctx;
1798 
1799 	ctx = llcp_lr_peek(conn);
1800 	if (ctx) {
1801 		/* TX notifications towards Host */
1802 		llcp_lr_tx_ntf(conn, ctx);
1803 	}
1804 
1805 	ctx = llcp_rr_peek(conn);
1806 	if (ctx) {
1807 		/* TX notifications towards Host */
1808 		llcp_rr_tx_ntf(conn, ctx);
1809 	}
1810 }
1811 
1812 void ull_cp_rx(struct ll_conn *conn, memq_link_t *link, struct node_rx_pdu *rx)
1813 {
1814 	struct proc_ctx *ctx_l;
1815 	struct proc_ctx *ctx_r;
1816 	struct pdu_data *pdu;
1817 	bool unexpected_l;
1818 	bool unexpected_r;
1819 	bool pdu_valid;
1820 
1821 	pdu = (struct pdu_data *)rx->pdu;
1822 
1823 	pdu_valid = pdu_is_valid(pdu);
1824 
1825 	if (!pdu_valid) {
1826 		struct proc_ctx *ctx;
1827 
1828 		ctx = llcp_lr_peek(conn);
1829 		if (ctx && pdu_is_expected(pdu, ctx)) {
1830 			return;
1831 		}
1832 
1833 		ctx = llcp_rr_peek(conn);
1834 		if (ctx && pdu_is_expected(pdu, ctx)) {
1835 			return;
1836 		}
1837 
1838 		/*  Process invalid PDU's as new procedure */
1839 		ctx_l = NULL;
1840 		ctx_r = NULL;
1841 	} else if (pdu_is_terminate(pdu)) {
1842 		/*  Process LL_TERMINATE_IND PDU's as new procedure */
1843 		ctx_l = NULL;
1844 		ctx_r = NULL;
1845 	} else {
1846 		/* Query local and remote activity */
1847 		ctx_l = llcp_lr_peek(conn);
1848 		ctx_r = llcp_rr_peek(conn);
1849 	}
1850 
1851 	if (ctx_l) {
1852 		/* Local active procedure */
1853 
1854 		if (ctx_r) {
1855 			/* Local active procedure
1856 			 * Remote active procedure
1857 			 */
1858 			unexpected_l = !(pdu_is_expected(pdu, ctx_l) ||
1859 					 pdu_is_unknown(pdu, ctx_l) ||
1860 					 pdu_is_any_reject(pdu, ctx_l));
1861 
1862 			unexpected_r = !(pdu_is_expected(pdu, ctx_r) ||
1863 					 pdu_is_unknown(pdu, ctx_r) ||
1864 					 pdu_is_reject_ext(pdu, ctx_r));
1865 
1866 			if (unexpected_l == unexpected_r) {
1867 				/* Both Local and Remote procedure active
1868 				 * and PDU is either
1869 				 * unexpected by both
1870 				 * or
1871 				 * expected by both
1872 				 *
1873 				 * Both situations is a result of invalid behaviour
1874 				 */
1875 				conn->llcp_terminate.reason_final =
1876 					unexpected_r ? BT_HCI_ERR_LMP_PDU_NOT_ALLOWED :
1877 						       BT_HCI_ERR_UNSPECIFIED;
1878 			} else if (unexpected_l) {
1879 				/* Local active procedure
1880 				 * Unexpected local procedure PDU
1881 				 * Remote active procedure
1882 				 * Expected remote procedure PDU
1883 				 */
1884 
1885 				/* Process PDU in remote procedure */
1886 				llcp_rr_rx(conn, ctx_r, link, rx);
1887 			} else if (unexpected_r) {
1888 				/* Local active procedure
1889 				 * Expected local procedure PDU
1890 				 * Remote active procedure
1891 				 * Unexpected remote procedure PDU
1892 				 */
1893 
1894 				/* Process PDU in local procedure */
1895 				llcp_lr_rx(conn, ctx_l, link, rx);
1896 			}
1897 			/* no else clause as this cannot occur with the logic above:
1898 			 * if they are not identical then one must be true
1899 			 */
1900 		} else {
1901 			/* Local active procedure
1902 			 * No remote active procedure
1903 			 */
1904 
1905 			unexpected_l = !(pdu_is_expected(pdu, ctx_l) ||
1906 					 pdu_is_unknown(pdu, ctx_l) ||
1907 					 pdu_is_any_reject(pdu, ctx_l));
1908 
1909 			if (unexpected_l) {
1910 				/* Local active procedure
1911 				 * Unexpected local procedure PDU
1912 				 * No remote active procedure
1913 				 */
1914 
1915 				/* Process PDU as a new remote request */
1916 				LL_ASSERT(pdu_valid);
1917 				llcp_rr_new(conn, link, rx, true);
1918 			} else {
1919 				/* Local active procedure
1920 				 * Expected local procedure PDU
1921 				 * No remote active procedure
1922 				 */
1923 
1924 				/* Process PDU in local procedure */
1925 				llcp_lr_rx(conn, ctx_l, link, rx);
1926 			}
1927 		}
1928 	} else if (ctx_r) {
1929 		/* No local active procedure
1930 		 * Remote active procedure
1931 		 */
1932 
1933 		/* Process PDU in remote procedure */
1934 		llcp_rr_rx(conn, ctx_r, link, rx);
1935 	} else {
1936 		/* No local active procedure
1937 		 * No remote active procedure
1938 		 */
1939 
1940 		/* Process PDU as a new remote request */
1941 		llcp_rr_new(conn, link, rx, pdu_valid);
1942 	}
1943 }
1944 
1945 #ifdef ZTEST_UNITTEST
1946 
1947 uint16_t llcp_local_ctx_buffers_free(void)
1948 {
1949 	return mem_free_count_get(mem_local_ctx.free);
1950 }
1951 
1952 uint16_t llcp_remote_ctx_buffers_free(void)
1953 {
1954 	return mem_free_count_get(mem_remote_ctx.free);
1955 }
1956 
1957 uint16_t llcp_ctx_buffers_free(void)
1958 {
1959 	return llcp_local_ctx_buffers_free() + llcp_remote_ctx_buffers_free();
1960 }
1961 
1962 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
1963 uint8_t llcp_common_tx_buffer_alloc_count(void)
1964 {
1965 	return common_tx_buffer_alloc;
1966 }
1967 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
1968 
1969 struct proc_ctx *llcp_proc_ctx_acquire(void)
1970 {
1971 	return proc_ctx_acquire(&mem_local_ctx);
1972 }
1973 
1974 struct proc_ctx *llcp_create_procedure(enum llcp_proc proc)
1975 {
1976 	return create_procedure(proc, &mem_local_ctx);
1977 }
1978 #endif
1979