1 /*
2  * Copyright (c) 2020 Demant
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 
9 #include <zephyr/sys/byteorder.h>
10 #include <zephyr/sys/slist.h>
11 #include <zephyr/sys/util.h>
12 
13 #include <zephyr/bluetooth/hci_types.h>
14 
15 #include "hal/ccm.h"
16 
17 #include "util/util.h"
18 #include "util/mem.h"
19 #include "util/memq.h"
20 #include "util/dbuf.h"
21 
22 #include "pdu_df.h"
23 #include "lll/pdu_vendor.h"
24 #include "pdu.h"
25 
26 #include "ll.h"
27 #include "ll_feat.h"
28 #include "ll_settings.h"
29 
30 #include "lll.h"
31 #include "lll_clock.h"
32 #include "lll/lll_df_types.h"
33 #include "lll_conn.h"
34 #include "lll_conn_iso.h"
35 
36 #include "ull_tx_queue.h"
37 
38 #include "isoal.h"
39 #include "ull_iso_types.h"
40 #include "ull_conn_iso_types.h"
41 #include "ull_conn_iso_internal.h"
42 #include "ull_central_iso_internal.h"
43 
44 #include "ull_internal.h"
45 #include "ull_conn_types.h"
46 #include "ull_conn_internal.h"
47 #include "ull_llcp.h"
48 #include "ull_llcp_features.h"
49 #include "ull_llcp_internal.h"
50 #include "ull_peripheral_internal.h"
51 
52 #include <soc.h>
53 #include "hal/debug.h"
54 
55 #define LLCTRL_PDU_SIZE (offsetof(struct pdu_data, llctrl) + sizeof(struct pdu_data_llctrl))
56 #define PROC_CTX_BUF_SIZE WB_UP(sizeof(struct proc_ctx))
57 #define TX_CTRL_BUF_SIZE WB_UP(offsetof(struct node_tx, pdu) + LLCTRL_PDU_SIZE)
58 #define NTF_BUF_SIZE WB_UP(offsetof(struct node_rx_pdu, pdu) + LLCTRL_PDU_SIZE)
59 
60 /* LLCP Allocations */
61 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
62 sys_slist_t tx_buffer_wait_list;
63 static uint8_t common_tx_buffer_alloc;
64 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
65 
66 static uint8_t MALIGN(4) buffer_mem_tx[TX_CTRL_BUF_SIZE * LLCP_TX_CTRL_BUF_COUNT];
67 static struct llcp_mem_pool mem_tx = { .pool = buffer_mem_tx };
68 
69 static uint8_t MALIGN(4) buffer_mem_local_ctx[PROC_CTX_BUF_SIZE *
70 				    CONFIG_BT_CTLR_LLCP_LOCAL_PROC_CTX_BUF_NUM];
71 static struct llcp_mem_pool mem_local_ctx = { .pool = buffer_mem_local_ctx };
72 
73 static uint8_t MALIGN(4) buffer_mem_remote_ctx[PROC_CTX_BUF_SIZE *
74 				     CONFIG_BT_CTLR_LLCP_REMOTE_PROC_CTX_BUF_NUM];
75 static struct llcp_mem_pool mem_remote_ctx = { .pool = buffer_mem_remote_ctx };
76 
77 /*
78  * LLCP Resource Management
79  */
proc_ctx_acquire(struct llcp_mem_pool * owner)80 static struct proc_ctx *proc_ctx_acquire(struct llcp_mem_pool *owner)
81 {
82 	struct proc_ctx *ctx;
83 
84 	ctx = (struct proc_ctx *)mem_acquire(&owner->free);
85 
86 	if (ctx) {
87 		/* Set the owner */
88 		ctx->owner = owner;
89 	}
90 
91 	return ctx;
92 }
93 
llcp_proc_ctx_release(struct proc_ctx * ctx)94 void llcp_proc_ctx_release(struct proc_ctx *ctx)
95 {
96 	/* We need to have an owner otherwise the memory allocated would leak */
97 	LL_ASSERT(ctx->owner);
98 
99 	/* Release the memory back to the owner */
100 	mem_release(ctx, &ctx->owner->free);
101 }
102 
103 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
104 /*
105  * @brief Update 'global' tx buffer allowance
106  */
ull_cp_update_tx_buffer_queue(struct ll_conn * conn)107 void ull_cp_update_tx_buffer_queue(struct ll_conn *conn)
108 {
109 	if (conn->llcp.tx_buffer_alloc > CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM) {
110 		common_tx_buffer_alloc -= (conn->llcp.tx_buffer_alloc -
111 					   CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM);
112 	}
113 }
114 
115 
116 /*
117  * @brief Check for per conn pre-allocated tx buffer allowance
118  * @return true if buffer is available
119  */
static_tx_buffer_available(struct ll_conn * conn,struct proc_ctx * ctx)120 static inline bool static_tx_buffer_available(struct ll_conn *conn, struct proc_ctx *ctx)
121 {
122 #if (CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM > 0)
123 	/* Check if per connection pre-aloted tx buffer is available */
124 	if (conn->llcp.tx_buffer_alloc < CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM) {
125 		/* This connection has not yet used up all the pre-aloted tx buffers */
126 		return true;
127 	}
128 #endif /* CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM > 0 */
129 	return false;
130 }
131 
132 /*
133  * @brief pre-alloc/peek of a tx buffer, leave requester on the wait list (@head if first up)
134  *
135  * @return true if alloc is allowed, false if not
136  *
137  */
llcp_tx_alloc_peek(struct ll_conn * conn,struct proc_ctx * ctx)138 bool llcp_tx_alloc_peek(struct ll_conn *conn, struct proc_ctx *ctx)
139 {
140 	if (!static_tx_buffer_available(conn, ctx)) {
141 		/* The conn already has spent its pre-aloted tx buffer(s),
142 		 * so we should consider the common tx buffer pool
143 		 */
144 		if (ctx->wait_reason == WAITING_FOR_NOTHING) {
145 			/* The current procedure is not in line for a tx buffer
146 			 * so sign up on the wait list
147 			 */
148 			sys_slist_append(&tx_buffer_wait_list, &ctx->wait_node);
149 			ctx->wait_reason = WAITING_FOR_TX_BUFFER;
150 		}
151 
152 		/* Now check to see if this procedure context is @ head of the wait list */
153 		if (ctx->wait_reason == WAITING_FOR_TX_BUFFER &&
154 		    sys_slist_peek_head(&tx_buffer_wait_list) == &ctx->wait_node) {
155 			return (common_tx_buffer_alloc <
156 				CONFIG_BT_CTLR_LLCP_COMMON_TX_CTRL_BUF_NUM);
157 		}
158 
159 		return false;
160 	}
161 	return true;
162 }
163 
164 /*
165  * @brief un-peek of a tx buffer, in case ongoing alloc is aborted
166  *
167  */
llcp_tx_alloc_unpeek(struct proc_ctx * ctx)168 void llcp_tx_alloc_unpeek(struct proc_ctx *ctx)
169 {
170 	sys_slist_find_and_remove(&tx_buffer_wait_list, &ctx->wait_node);
171 	ctx->wait_reason = WAITING_FOR_NOTHING;
172 }
173 
174 /*
175  * @brief complete alloc of a tx buffer, must preceded by successful call to
176  * llcp_tx_alloc_peek()
177  *
178  * @return node_tx* that was peek'ed by llcp_tx_alloc_peek()
179  *
180  */
llcp_tx_alloc(struct ll_conn * conn,struct proc_ctx * ctx)181 struct node_tx *llcp_tx_alloc(struct ll_conn *conn, struct proc_ctx *ctx)
182 {
183 	conn->llcp.tx_buffer_alloc++;
184 #if (CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM > 0)
185 	if (conn->llcp.tx_buffer_alloc > CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM) {
186 		common_tx_buffer_alloc++;
187 		/* global buffer allocated, so we're at the head and should just pop head */
188 		sys_slist_get(&tx_buffer_wait_list);
189 	} else {
190 		/* we're allocating conn_tx_buffer, so remove from wait list if waiting */
191 		if (ctx->wait_reason == WAITING_FOR_TX_BUFFER) {
192 			sys_slist_find_and_remove(&tx_buffer_wait_list, &ctx->wait_node);
193 		}
194 	}
195 #else /* CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM > 0 */
196 	/* global buffer allocated, so remove head of wait list */
197 	common_tx_buffer_alloc++;
198 	sys_slist_get(&tx_buffer_wait_list);
199 #endif /* CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM > 0 */
200 	ctx->wait_reason = WAITING_FOR_NOTHING;
201 
202 	return (struct node_tx *)mem_acquire(&mem_tx.free);
203 }
204 #else /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
llcp_tx_alloc_peek(struct ll_conn * conn,struct proc_ctx * ctx)205 bool llcp_tx_alloc_peek(struct ll_conn *conn, struct proc_ctx *ctx)
206 {
207 	ARG_UNUSED(conn);
208 	return mem_tx.free != NULL;
209 }
210 
llcp_tx_alloc_unpeek(struct proc_ctx * ctx)211 void llcp_tx_alloc_unpeek(struct proc_ctx *ctx)
212 {
213 	/* Empty on purpose, as unpeek is not needed when no buffer queueing is used */
214 	ARG_UNUSED(ctx);
215 }
216 
llcp_tx_alloc(struct ll_conn * conn,struct proc_ctx * ctx)217 struct node_tx *llcp_tx_alloc(struct ll_conn *conn, struct proc_ctx *ctx)
218 {
219 	struct pdu_data *pdu;
220 	struct node_tx *tx;
221 
222 	ARG_UNUSED(conn);
223 	tx = (struct node_tx *)mem_acquire(&mem_tx.free);
224 
225 	pdu = (struct pdu_data *)tx->pdu;
226 	ull_pdu_data_init(pdu);
227 
228 	return tx;
229 }
230 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
231 
tx_release(struct node_tx * tx)232 static void tx_release(struct node_tx *tx)
233 {
234 	mem_release(tx, &mem_tx.free);
235 }
236 
llcp_ntf_alloc_is_available(void)237 bool llcp_ntf_alloc_is_available(void)
238 {
239 	return ll_pdu_rx_alloc_peek(1) != NULL;
240 }
241 
llcp_ntf_alloc_num_available(uint8_t count)242 bool llcp_ntf_alloc_num_available(uint8_t count)
243 {
244 	return ll_pdu_rx_alloc_peek(count) != NULL;
245 }
246 
llcp_ntf_alloc(void)247 struct node_rx_pdu *llcp_ntf_alloc(void)
248 {
249 	return ll_pdu_rx_alloc();
250 }
251 
252 /*
253  * ULL -> LLL Interface
254  */
255 
llcp_tx_enqueue(struct ll_conn * conn,struct node_tx * tx)256 void llcp_tx_enqueue(struct ll_conn *conn, struct node_tx *tx)
257 {
258 	ull_tx_q_enqueue_ctrl(&conn->tx_q, tx);
259 }
260 
llcp_tx_pause_data(struct ll_conn * conn,enum llcp_tx_q_pause_data_mask pause_mask)261 void llcp_tx_pause_data(struct ll_conn *conn, enum llcp_tx_q_pause_data_mask pause_mask)
262 {
263 	/* Only pause the TX Q if we have not already paused it (by any procedure) */
264 	if (conn->llcp.tx_q_pause_data_mask == 0) {
265 		ull_tx_q_pause_data(&conn->tx_q);
266 	}
267 
268 	/* Add the procedure that paused data */
269 	conn->llcp.tx_q_pause_data_mask |= pause_mask;
270 }
271 
llcp_tx_resume_data(struct ll_conn * conn,enum llcp_tx_q_pause_data_mask resume_mask)272 void llcp_tx_resume_data(struct ll_conn *conn, enum llcp_tx_q_pause_data_mask resume_mask)
273 {
274 	/* Remove the procedure that paused data */
275 	conn->llcp.tx_q_pause_data_mask &= ~resume_mask;
276 
277 	/* Only resume the TX Q if we have removed all procedures that paused data */
278 	if (conn->llcp.tx_q_pause_data_mask == 0) {
279 		ull_tx_q_resume_data(&conn->tx_q);
280 	}
281 }
282 
llcp_rx_node_retain(struct proc_ctx * ctx)283 void llcp_rx_node_retain(struct proc_ctx *ctx)
284 {
285 	LL_ASSERT(ctx->node_ref.rx);
286 
287 	/* Only retain if not already retained */
288 	if (ctx->node_ref.rx->hdr.type != NODE_RX_TYPE_RETAIN) {
289 		/* Mark RX node to NOT release */
290 		ctx->node_ref.rx->hdr.type = NODE_RX_TYPE_RETAIN;
291 
292 		/* store link element reference to use once this node is moved up */
293 		ctx->node_ref.rx->hdr.link = ctx->node_ref.link;
294 	}
295 }
296 
llcp_rx_node_release(struct proc_ctx * ctx)297 void llcp_rx_node_release(struct proc_ctx *ctx)
298 {
299 	LL_ASSERT(ctx->node_ref.rx);
300 
301 	/* Only release if retained */
302 	if (ctx->node_ref.rx->hdr.type == NODE_RX_TYPE_RETAIN) {
303 		/* Mark RX node to release and release */
304 		ctx->node_ref.rx->hdr.type = NODE_RX_TYPE_RELEASE;
305 		ll_rx_put_sched(ctx->node_ref.rx->hdr.link, ctx->node_ref.rx);
306 	}
307 }
308 
llcp_nodes_release(struct ll_conn * conn,struct proc_ctx * ctx)309 void llcp_nodes_release(struct ll_conn *conn, struct proc_ctx *ctx)
310 {
311 	if (ctx->node_ref.rx && ctx->node_ref.rx->hdr.type == NODE_RX_TYPE_RETAIN) {
312 		/* RX node retained, so release */
313 		ctx->node_ref.rx->hdr.link->mem = conn->llcp.rx_node_release;
314 		ctx->node_ref.rx->hdr.type = NODE_RX_TYPE_RELEASE;
315 		conn->llcp.rx_node_release = ctx->node_ref.rx;
316 	}
317 #if defined(CONFIG_BT_CTLR_PHY) && defined(CONFIG_BT_CTLR_DATA_LENGTH)
318 	if (ctx->proc == PROC_PHY_UPDATE && ctx->data.pu.ntf_dle_node) {
319 		/* RX node retained, so release */
320 		ctx->data.pu.ntf_dle_node->hdr.link->mem = conn->llcp.rx_node_release;
321 		ctx->data.pu.ntf_dle_node->hdr.type = NODE_RX_TYPE_RELEASE;
322 		conn->llcp.rx_node_release = ctx->data.pu.ntf_dle_node;
323 	}
324 #endif
325 
326 	if (ctx->node_ref.tx) {
327 		ctx->node_ref.tx->next = conn->llcp.tx_node_release;
328 		conn->llcp.tx_node_release = ctx->node_ref.tx;
329 	}
330 }
331 
332 /*
333  * LLCP Procedure Creation
334  */
335 
create_procedure(enum llcp_proc proc,struct llcp_mem_pool * ctx_pool)336 static struct proc_ctx *create_procedure(enum llcp_proc proc, struct llcp_mem_pool *ctx_pool)
337 {
338 	struct proc_ctx *ctx;
339 
340 	ctx = proc_ctx_acquire(ctx_pool);
341 	if (!ctx) {
342 		return NULL;
343 	}
344 
345 	ctx->proc = proc;
346 	ctx->done = 0U;
347 	ctx->rx_greedy = 0U;
348 	ctx->node_ref.rx = NULL;
349 	ctx->node_ref.tx_ack = NULL;
350 	ctx->state = LLCP_STATE_IDLE;
351 
352 	/* Clear procedure context data */
353 	memset((void *)&ctx->data, 0, sizeof(ctx->data));
354 
355 	/* Initialize opcodes fields to known values */
356 	ctx->rx_opcode = ULL_LLCP_INVALID_OPCODE;
357 	ctx->tx_opcode = ULL_LLCP_INVALID_OPCODE;
358 	ctx->response_opcode = ULL_LLCP_INVALID_OPCODE;
359 
360 	return ctx;
361 }
362 
llcp_create_local_procedure(enum llcp_proc proc)363 struct proc_ctx *llcp_create_local_procedure(enum llcp_proc proc)
364 {
365 	return create_procedure(proc, &mem_local_ctx);
366 }
367 
llcp_create_remote_procedure(enum llcp_proc proc)368 struct proc_ctx *llcp_create_remote_procedure(enum llcp_proc proc)
369 {
370 	return create_procedure(proc, &mem_remote_ctx);
371 }
372 
373 /*
374  * LLCP Public API
375  */
376 
ull_cp_init(void)377 void ull_cp_init(void)
378 {
379 	mem_init(mem_local_ctx.pool, PROC_CTX_BUF_SIZE,
380 		 CONFIG_BT_CTLR_LLCP_LOCAL_PROC_CTX_BUF_NUM,
381 		 &mem_local_ctx.free);
382 	mem_init(mem_remote_ctx.pool, PROC_CTX_BUF_SIZE,
383 		 CONFIG_BT_CTLR_LLCP_REMOTE_PROC_CTX_BUF_NUM,
384 		 &mem_remote_ctx.free);
385 	mem_init(mem_tx.pool, TX_CTRL_BUF_SIZE, LLCP_TX_CTRL_BUF_COUNT, &mem_tx.free);
386 
387 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
388 	/* Reset buffer alloc management */
389 	sys_slist_init(&tx_buffer_wait_list);
390 	common_tx_buffer_alloc = 0;
391 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
392 }
393 
ull_llcp_init(struct ll_conn * conn)394 void ull_llcp_init(struct ll_conn *conn)
395 {
396 	/* Reset local request fsm */
397 	llcp_lr_init(conn);
398 	sys_slist_init(&conn->llcp.local.pend_proc_list);
399 	conn->llcp.local.pause = 0U;
400 
401 	/* Reset remote request fsm */
402 	llcp_rr_init(conn);
403 	sys_slist_init(&conn->llcp.remote.pend_proc_list);
404 	conn->llcp.remote.pause = 0U;
405 	conn->llcp.remote.incompat = INCOMPAT_NO_COLLISION;
406 	conn->llcp.remote.collision = 0U;
407 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
408 	conn->llcp.remote.paused_cmd = PROC_NONE;
409 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
410 
411 	/* Reset the Procedure Response Timeout to be disabled,
412 	 * 'ull_cp_prt_reload_set' must be called to setup this value.
413 	 */
414 	conn->llcp.prt_reload = 0U;
415 
416 	/* Reset the cached version Information (PROC_VERSION_EXCHANGE) */
417 	memset(&conn->llcp.vex, 0, sizeof(conn->llcp.vex));
418 
419 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
420 	/* Reset the cached min used channels information (PROC_MIN_USED_CHANS) */
421 	memset(&conn->llcp.muc, 0, sizeof(conn->llcp.muc));
422 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */
423 
424 	/* Reset the feature exchange fields */
425 	memset(&conn->llcp.fex, 0, sizeof(conn->llcp.fex));
426 	conn->llcp.fex.features_used = ll_feat_get();
427 
428 #if defined(CONFIG_BT_CTLR_LE_ENC)
429 	/* Reset encryption related state */
430 	conn->lll.enc_tx = 0U;
431 	conn->lll.enc_rx = 0U;
432 #endif /* CONFIG_BT_CTLR_LE_ENC */
433 
434 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
435 	conn->llcp.cte_req.is_enabled = 0U;
436 	conn->llcp.cte_req.req_expire = 0U;
437 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
438 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
439 	conn->llcp.cte_rsp.is_enabled = 0U;
440 	conn->llcp.cte_rsp.is_active = 0U;
441 	conn->llcp.cte_rsp.disable_param = NULL;
442 	conn->llcp.cte_rsp.disable_cb = NULL;
443 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
444 
445 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
446 	conn->llcp.tx_buffer_alloc = 0;
447 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
448 
449 	conn->llcp.tx_q_pause_data_mask = 0;
450 	conn->lll.event_counter = 0;
451 
452 	conn->llcp.tx_node_release = NULL;
453 	conn->llcp.rx_node_release = NULL;
454 }
455 
ull_cp_release_tx(struct ll_conn * conn,struct node_tx * tx)456 void ull_cp_release_tx(struct ll_conn *conn, struct node_tx *tx)
457 {
458 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
459 	if (conn) {
460 		LL_ASSERT(conn->llcp.tx_buffer_alloc > 0);
461 		if (conn->llcp.tx_buffer_alloc > CONFIG_BT_CTLR_LLCP_PER_CONN_TX_CTRL_BUF_NUM) {
462 			common_tx_buffer_alloc--;
463 		}
464 		conn->llcp.tx_buffer_alloc--;
465 	}
466 #else /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
467 	ARG_UNUSED(conn);
468 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
469 	tx_release(tx);
470 }
471 
prt_elapse(uint16_t * expire,uint16_t elapsed_event)472 static int prt_elapse(uint16_t *expire, uint16_t elapsed_event)
473 {
474 	if (*expire != 0U) {
475 		if (*expire > elapsed_event) {
476 			*expire -= elapsed_event;
477 		} else {
478 			/* Timer expired */
479 			return -ETIMEDOUT;
480 		}
481 	}
482 
483 	/* Timer still running */
484 	return 0;
485 }
486 
ull_cp_prt_elapse(struct ll_conn * conn,uint16_t elapsed_event,uint8_t * error_code)487 int ull_cp_prt_elapse(struct ll_conn *conn, uint16_t elapsed_event, uint8_t *error_code)
488 {
489 	int loc_ret;
490 	int rem_ret;
491 
492 	loc_ret = prt_elapse(&conn->llcp.local.prt_expire, elapsed_event);
493 	if (loc_ret == -ETIMEDOUT) {
494 		/* Local Request Machine timed out */
495 
496 		struct proc_ctx *ctx;
497 
498 		ctx = llcp_lr_peek(conn);
499 		LL_ASSERT(ctx);
500 
501 		if (ctx->proc == PROC_TERMINATE) {
502 			/* Active procedure is ACL Termination */
503 			*error_code = ctx->data.term.error_code;
504 		} else {
505 			*error_code = BT_HCI_ERR_LL_RESP_TIMEOUT;
506 		}
507 
508 		return -ETIMEDOUT;
509 	}
510 
511 	rem_ret = prt_elapse(&conn->llcp.remote.prt_expire, elapsed_event);
512 	if (rem_ret == -ETIMEDOUT) {
513 		/* Remote Request Machine timed out */
514 
515 		*error_code = BT_HCI_ERR_LL_RESP_TIMEOUT;
516 		return -ETIMEDOUT;
517 	}
518 
519 	/* Both timers are still running */
520 	*error_code = BT_HCI_ERR_SUCCESS;
521 	return 0;
522 }
523 
ull_cp_prt_reload_set(struct ll_conn * conn,uint32_t conn_intv_us)524 void ull_cp_prt_reload_set(struct ll_conn *conn, uint32_t conn_intv_us)
525 {
526 	/* Convert 40s Procedure Response Timeout into events */
527 	conn->llcp.prt_reload = RADIO_CONN_EVENTS((40U * 1000U * 1000U), conn_intv_us);
528 }
529 
ull_cp_run(struct ll_conn * conn)530 void ull_cp_run(struct ll_conn *conn)
531 {
532 	llcp_rr_run(conn);
533 	llcp_lr_run(conn);
534 }
535 
ull_cp_state_set(struct ll_conn * conn,uint8_t state)536 void ull_cp_state_set(struct ll_conn *conn, uint8_t state)
537 {
538 	switch (state) {
539 	case ULL_CP_CONNECTED:
540 		llcp_rr_connect(conn);
541 		llcp_lr_connect(conn);
542 		break;
543 	case ULL_CP_DISCONNECTED:
544 		llcp_rr_disconnect(conn);
545 		llcp_lr_disconnect(conn);
546 		break;
547 	default:
548 		break;
549 	}
550 }
551 
ull_cp_release_nodes(struct ll_conn * conn)552 void ull_cp_release_nodes(struct ll_conn *conn)
553 {
554 	struct node_rx_pdu *rx;
555 	struct node_tx *tx;
556 
557 	/* release any llcp retained rx nodes */
558 	rx = conn->llcp.rx_node_release;
559 	while (rx) {
560 		struct node_rx_hdr *hdr;
561 
562 		/* traverse to next rx node */
563 		hdr = &rx->hdr;
564 		rx = hdr->link->mem;
565 
566 		/* enqueue rx node towards Thread */
567 		ll_rx_put(hdr->link, hdr);
568 	}
569 	conn->llcp.rx_node_release = NULL;
570 
571 	/* release any llcp pre-allocated tx nodes */
572 	tx = conn->llcp.tx_node_release;
573 	while (tx) {
574 		struct node_tx *tx_release;
575 
576 		tx_release = tx;
577 		tx = tx->next;
578 
579 		ull_cp_release_tx(conn, tx_release);
580 	}
581 	conn->llcp.tx_node_release = NULL;
582 }
583 
584 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
ull_cp_min_used_chans(struct ll_conn * conn,uint8_t phys,uint8_t min_used_chans)585 uint8_t ull_cp_min_used_chans(struct ll_conn *conn, uint8_t phys, uint8_t min_used_chans)
586 {
587 	struct proc_ctx *ctx;
588 
589 	if (conn->lll.role != BT_HCI_ROLE_PERIPHERAL) {
590 		return BT_HCI_ERR_CMD_DISALLOWED;
591 	}
592 
593 	ctx = llcp_create_local_procedure(PROC_MIN_USED_CHANS);
594 	if (!ctx) {
595 		return BT_HCI_ERR_CMD_DISALLOWED;
596 	}
597 
598 	ctx->data.muc.phys = phys;
599 	ctx->data.muc.min_used_chans = min_used_chans;
600 
601 	llcp_lr_enqueue(conn, ctx);
602 
603 	return BT_HCI_ERR_SUCCESS;
604 }
605 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */
606 
607 #if defined(CONFIG_BT_CTLR_LE_PING)
ull_cp_le_ping(struct ll_conn * conn)608 uint8_t ull_cp_le_ping(struct ll_conn *conn)
609 {
610 	struct proc_ctx *ctx;
611 
612 	ctx = llcp_create_local_procedure(PROC_LE_PING);
613 	if (!ctx) {
614 		return BT_HCI_ERR_CMD_DISALLOWED;
615 	}
616 
617 	llcp_lr_enqueue(conn, ctx);
618 
619 	return BT_HCI_ERR_SUCCESS;
620 }
621 #endif /* CONFIG_BT_CTLR_LE_PING */
622 
623 #if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)
ull_cp_feature_exchange(struct ll_conn * conn,uint8_t host_initiated)624 uint8_t ull_cp_feature_exchange(struct ll_conn *conn, uint8_t host_initiated)
625 {
626 	struct proc_ctx *ctx;
627 
628 	ctx = llcp_create_local_procedure(PROC_FEATURE_EXCHANGE);
629 	if (!ctx) {
630 		return BT_HCI_ERR_CMD_DISALLOWED;
631 	}
632 
633 	ctx->data.fex.host_initiated = host_initiated;
634 
635 	llcp_lr_enqueue(conn, ctx);
636 
637 	return BT_HCI_ERR_SUCCESS;
638 }
639 #endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG */
640 
ull_cp_version_exchange(struct ll_conn * conn)641 uint8_t ull_cp_version_exchange(struct ll_conn *conn)
642 {
643 	struct proc_ctx *ctx;
644 
645 	ctx = llcp_create_local_procedure(PROC_VERSION_EXCHANGE);
646 	if (!ctx) {
647 		return BT_HCI_ERR_CMD_DISALLOWED;
648 	}
649 
650 	llcp_lr_enqueue(conn, ctx);
651 
652 	return BT_HCI_ERR_SUCCESS;
653 }
654 
655 #if defined(CONFIG_BT_CTLR_LE_ENC)
656 #if defined(CONFIG_BT_CENTRAL)
ull_cp_encryption_start(struct ll_conn * conn,const uint8_t rand[8],const uint8_t ediv[2],const uint8_t ltk[16])657 uint8_t ull_cp_encryption_start(struct ll_conn *conn, const uint8_t rand[8], const uint8_t ediv[2],
658 				const uint8_t ltk[16])
659 {
660 	struct proc_ctx *ctx;
661 
662 	if (conn->lll.role != BT_HCI_ROLE_CENTRAL) {
663 		return BT_HCI_ERR_CMD_DISALLOWED;
664 	}
665 
666 	ctx = llcp_create_local_procedure(PROC_ENCRYPTION_START);
667 	if (!ctx) {
668 		return BT_HCI_ERR_CMD_DISALLOWED;
669 	}
670 
671 	/* Copy input parameters */
672 	memcpy(ctx->data.enc.rand, rand, sizeof(ctx->data.enc.rand));
673 	ctx->data.enc.ediv[0] = ediv[0];
674 	ctx->data.enc.ediv[1] = ediv[1];
675 	memcpy(ctx->data.enc.ltk, ltk, sizeof(ctx->data.enc.ltk));
676 
677 	/* Enqueue request */
678 	llcp_lr_enqueue(conn, ctx);
679 
680 	return BT_HCI_ERR_SUCCESS;
681 }
682 
ull_cp_encryption_pause(struct ll_conn * conn,const uint8_t rand[8],const uint8_t ediv[2],const uint8_t ltk[16])683 uint8_t ull_cp_encryption_pause(struct ll_conn *conn, const uint8_t rand[8], const uint8_t ediv[2],
684 				const uint8_t ltk[16])
685 {
686 	struct proc_ctx *ctx;
687 
688 	if (conn->lll.role != BT_HCI_ROLE_CENTRAL) {
689 		return BT_HCI_ERR_CMD_DISALLOWED;
690 	}
691 
692 	ctx = llcp_create_local_procedure(PROC_ENCRYPTION_PAUSE);
693 	if (!ctx) {
694 		return BT_HCI_ERR_CMD_DISALLOWED;
695 	}
696 
697 	/* Copy input parameters */
698 	memcpy(ctx->data.enc.rand, rand, sizeof(ctx->data.enc.rand));
699 	ctx->data.enc.ediv[0] = ediv[0];
700 	ctx->data.enc.ediv[1] = ediv[1];
701 	memcpy(ctx->data.enc.ltk, ltk, sizeof(ctx->data.enc.ltk));
702 
703 	/* Enqueue request */
704 	llcp_lr_enqueue(conn, ctx);
705 
706 	return BT_HCI_ERR_SUCCESS;
707 }
708 #endif /* CONFIG_BT_CENTRAL */
709 
ull_cp_encryption_paused(struct ll_conn * conn)710 uint8_t ull_cp_encryption_paused(struct ll_conn *conn)
711 {
712 	struct proc_ctx *ctx;
713 
714 	ctx = llcp_rr_peek(conn);
715 	if (ctx && ctx->proc == PROC_ENCRYPTION_PAUSE) {
716 		return 1;
717 	}
718 
719 	ctx = llcp_lr_peek(conn);
720 	if (ctx && ctx->proc == PROC_ENCRYPTION_PAUSE) {
721 		return 1;
722 	}
723 
724 	return 0;
725 }
726 #endif /* CONFIG_BT_CTLR_LE_ENC */
727 
728 #if defined(CONFIG_BT_CTLR_PHY)
ull_cp_phy_update(struct ll_conn * conn,uint8_t tx,uint8_t flags,uint8_t rx,uint8_t host_initiated)729 uint8_t ull_cp_phy_update(struct ll_conn *conn, uint8_t tx, uint8_t flags, uint8_t rx,
730 			  uint8_t host_initiated)
731 {
732 	struct proc_ctx *ctx;
733 
734 	ctx = llcp_create_local_procedure(PROC_PHY_UPDATE);
735 	if (!ctx) {
736 		return BT_HCI_ERR_CMD_DISALLOWED;
737 	}
738 
739 	ctx->data.pu.tx = tx;
740 	ctx->data.pu.flags = flags;
741 	ctx->data.pu.rx = rx;
742 	ctx->data.pu.host_initiated = host_initiated;
743 
744 	llcp_lr_enqueue(conn, ctx);
745 
746 	return BT_HCI_ERR_SUCCESS;
747 }
748 #endif /* CONFIG_BT_CTLR_PHY */
749 
ull_cp_terminate(struct ll_conn * conn,uint8_t error_code)750 uint8_t ull_cp_terminate(struct ll_conn *conn, uint8_t error_code)
751 {
752 	struct proc_ctx *ctx;
753 
754 	llcp_lr_terminate(conn);
755 	llcp_rr_terminate(conn);
756 
757 	ctx = llcp_create_local_procedure(PROC_TERMINATE);
758 	if (!ctx) {
759 		return BT_HCI_ERR_CMD_DISALLOWED;
760 	}
761 
762 	ctx->data.term.error_code = error_code;
763 
764 	llcp_lr_enqueue(conn, ctx);
765 
766 	return BT_HCI_ERR_SUCCESS;
767 }
768 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
ull_cp_cis_terminate(struct ll_conn * conn,struct ll_conn_iso_stream * cis,uint8_t error_code)769 uint8_t ull_cp_cis_terminate(struct ll_conn *conn,
770 			     struct ll_conn_iso_stream *cis,
771 			     uint8_t error_code)
772 {
773 	struct proc_ctx *ctx;
774 
775 	if (conn->lll.handle != cis->lll.acl_handle) {
776 		return BT_HCI_ERR_CMD_DISALLOWED;
777 	}
778 
779 	ctx = llcp_create_local_procedure(PROC_CIS_TERMINATE);
780 	if (!ctx) {
781 		return BT_HCI_ERR_CMD_DISALLOWED;
782 	}
783 
784 	ctx->data.cis_term.cig_id = cis->group->cig_id;
785 	ctx->data.cis_term.cis_id = cis->cis_id;
786 	ctx->data.cis_term.error_code = error_code;
787 
788 	llcp_lr_enqueue(conn, ctx);
789 
790 	return BT_HCI_ERR_SUCCESS;
791 }
792 #endif /* defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) */
793 
794 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
ull_cp_cis_create(struct ll_conn * conn,struct ll_conn_iso_stream * cis)795 uint8_t ull_cp_cis_create(struct ll_conn *conn, struct ll_conn_iso_stream *cis)
796 {
797 	struct ll_conn_iso_group *cig;
798 	struct proc_ctx *ctx;
799 
800 	if (!conn->llcp.fex.valid) {
801 		/* No feature exchange was performed so initiate before CIS Create */
802 		if (ull_cp_feature_exchange(conn, 0U) != BT_HCI_ERR_SUCCESS) {
803 			return BT_HCI_ERR_CMD_DISALLOWED;
804 		}
805 	}
806 
807 	ctx = llcp_create_local_procedure(PROC_CIS_CREATE);
808 	if (!ctx) {
809 		return BT_HCI_ERR_CMD_DISALLOWED;
810 	}
811 
812 	cig = cis->group;
813 	ctx->data.cis_create.cis_handle = cis->lll.handle;
814 
815 	ctx->data.cis_create.cig_id = cis->group->cig_id;
816 	ctx->data.cis_create.cis_id = cis->cis_id;
817 	ctx->data.cis_create.c_phy = cis->lll.tx.phy;
818 	ctx->data.cis_create.p_phy = cis->lll.rx.phy;
819 	ctx->data.cis_create.c_sdu_interval = cig->c_sdu_interval;
820 	ctx->data.cis_create.p_sdu_interval = cig->p_sdu_interval;
821 	ctx->data.cis_create.c_max_pdu = cis->lll.tx.max_pdu;
822 	ctx->data.cis_create.p_max_pdu = cis->lll.rx.max_pdu;
823 	ctx->data.cis_create.c_max_sdu = cis->c_max_sdu;
824 	ctx->data.cis_create.p_max_sdu = cis->p_max_sdu;
825 	ctx->data.cis_create.iso_interval = cig->iso_interval;
826 	ctx->data.cis_create.framed = cis->framed;
827 	ctx->data.cis_create.nse = cis->lll.nse;
828 	ctx->data.cis_create.sub_interval = cis->lll.sub_interval;
829 	ctx->data.cis_create.c_bn = cis->lll.tx.bn;
830 	ctx->data.cis_create.p_bn = cis->lll.rx.bn;
831 	ctx->data.cis_create.c_ft = cis->lll.tx.ft;
832 	ctx->data.cis_create.p_ft = cis->lll.rx.ft;
833 
834 	/* ctx->data.cis_create.conn_event_count will be filled when Tx PDU is
835 	 * enqueued.
836 	 */
837 
838 	llcp_lr_enqueue(conn, ctx);
839 
840 	return BT_HCI_ERR_SUCCESS;
841 }
842 #endif /* defined(CONFIG_BT_CTLR_CENTRAL_ISO) */
843 
844 #if defined(CONFIG_BT_CENTRAL)
ull_cp_chan_map_update(struct ll_conn * conn,const uint8_t chm[5])845 uint8_t ull_cp_chan_map_update(struct ll_conn *conn, const uint8_t chm[5])
846 {
847 	struct proc_ctx *ctx;
848 
849 	if (conn->lll.role != BT_HCI_ROLE_CENTRAL) {
850 		return BT_HCI_ERR_CMD_DISALLOWED;
851 	}
852 
853 	ctx = llcp_create_local_procedure(PROC_CHAN_MAP_UPDATE);
854 	if (!ctx) {
855 		return BT_HCI_ERR_CMD_DISALLOWED;
856 	}
857 
858 	memcpy(ctx->data.chmu.chm, chm, sizeof(ctx->data.chmu.chm));
859 
860 	llcp_lr_enqueue(conn, ctx);
861 
862 	return BT_HCI_ERR_SUCCESS;
863 }
864 #endif /* CONFIG_BT_CENTRAL */
865 
ull_cp_chan_map_update_pending(struct ll_conn * conn)866 const uint8_t *ull_cp_chan_map_update_pending(struct ll_conn *conn)
867 {
868 	struct proc_ctx *ctx;
869 
870 	if (conn->lll.role == BT_HCI_ROLE_CENTRAL) {
871 		ctx = llcp_lr_peek(conn);
872 	} else {
873 		ctx = llcp_rr_peek(conn);
874 	}
875 
876 	if (ctx && ctx->proc == PROC_CHAN_MAP_UPDATE) {
877 		return ctx->data.chmu.chm;
878 	}
879 
880 	return NULL;
881 }
882 
883 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
ull_cp_data_length_update(struct ll_conn * conn,uint16_t max_tx_octets,uint16_t max_tx_time)884 uint8_t ull_cp_data_length_update(struct ll_conn *conn, uint16_t max_tx_octets,
885 				  uint16_t max_tx_time)
886 {
887 	struct proc_ctx *ctx;
888 
889 	if (!feature_dle(conn)) {
890 		/* Data Length Update procedure not supported */
891 
892 		/* Returning BT_HCI_ERR_SUCCESS here might seem counter-intuitive,
893 		 * but nothing in the specification seems to suggests
894 		 * BT_HCI_ERR_UNSUPP_REMOTE_FEATURE.
895 		 */
896 		return BT_HCI_ERR_SUCCESS;
897 	}
898 
899 	ctx = llcp_create_local_procedure(PROC_DATA_LENGTH_UPDATE);
900 
901 	if (!ctx) {
902 		return BT_HCI_ERR_CMD_DISALLOWED;
903 	}
904 
905 	/* Apply update to local */
906 	ull_dle_local_tx_update(conn, max_tx_octets, max_tx_time);
907 
908 	llcp_lr_enqueue(conn, ctx);
909 
910 	return BT_HCI_ERR_SUCCESS;
911 }
912 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
913 
914 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
ull_cp_req_peer_sca(struct ll_conn * conn)915 uint8_t ull_cp_req_peer_sca(struct ll_conn *conn)
916 {
917 	struct proc_ctx *ctx;
918 
919 	if (!feature_sca(conn)) {
920 		return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
921 	}
922 
923 	ctx = llcp_create_local_procedure(PROC_SCA_UPDATE);
924 
925 	if (!ctx) {
926 		return BT_HCI_ERR_CMD_DISALLOWED;
927 	}
928 
929 	llcp_lr_enqueue(conn, ctx);
930 
931 	return BT_HCI_ERR_SUCCESS;
932 }
933 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
934 
935 #if defined(CONFIG_BT_CTLR_LE_ENC)
ull_cp_ltk_req_reply(struct ll_conn * conn,const uint8_t ltk[16])936 uint8_t ull_cp_ltk_req_reply(struct ll_conn *conn, const uint8_t ltk[16])
937 {
938 	struct proc_ctx *ctx;
939 
940 	ctx = llcp_rr_peek(conn);
941 	if (ctx && (ctx->proc == PROC_ENCRYPTION_START || ctx->proc == PROC_ENCRYPTION_PAUSE) &&
942 	    llcp_rp_enc_ltk_req_reply_allowed(conn, ctx)) {
943 		memcpy(ctx->data.enc.ltk, ltk, sizeof(ctx->data.enc.ltk));
944 		llcp_rp_enc_ltk_req_reply(conn, ctx);
945 		return BT_HCI_ERR_SUCCESS;
946 	}
947 	return BT_HCI_ERR_CMD_DISALLOWED;
948 }
949 
ull_cp_ltk_req_neq_reply(struct ll_conn * conn)950 uint8_t ull_cp_ltk_req_neq_reply(struct ll_conn *conn)
951 {
952 	struct proc_ctx *ctx;
953 
954 	ctx = llcp_rr_peek(conn);
955 	if (ctx && (ctx->proc == PROC_ENCRYPTION_START || ctx->proc == PROC_ENCRYPTION_PAUSE) &&
956 	    llcp_rp_enc_ltk_req_reply_allowed(conn, ctx)) {
957 		llcp_rp_enc_ltk_req_neg_reply(conn, ctx);
958 		return BT_HCI_ERR_SUCCESS;
959 	}
960 	return BT_HCI_ERR_CMD_DISALLOWED;
961 }
962 #endif /* CONFIG_BT_CTLR_LE_ENC */
963 
ull_cp_conn_update(struct ll_conn * conn,uint16_t interval_min,uint16_t interval_max,uint16_t latency,uint16_t timeout,uint16_t * offsets)964 uint8_t ull_cp_conn_update(struct ll_conn *conn, uint16_t interval_min, uint16_t interval_max,
965 			   uint16_t latency, uint16_t timeout, uint16_t *offsets)
966 {
967 	struct proc_ctx *ctx;
968 
969 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
970 	if (feature_conn_param_req(conn)) {
971 		ctx = llcp_create_local_procedure(PROC_CONN_PARAM_REQ);
972 	} else if (conn->lll.role == BT_HCI_ROLE_CENTRAL) {
973 		ctx = llcp_create_local_procedure(PROC_CONN_UPDATE);
974 	} else {
975 		return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
976 	}
977 #else /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
978 	if (conn->lll.role == BT_HCI_ROLE_PERIPHERAL) {
979 		return BT_HCI_ERR_CMD_DISALLOWED;
980 	}
981 	ctx = llcp_create_local_procedure(PROC_CONN_UPDATE);
982 #endif /* !CONFIG_BT_CTLR_CONN_PARAM_REQ */
983 
984 	if (!ctx) {
985 		return BT_HCI_ERR_CMD_DISALLOWED;
986 	}
987 
988 	/* Store arguments in corresponding procedure context */
989 	if (ctx->proc == PROC_CONN_UPDATE) {
990 		ctx->data.cu.interval_max = interval_max;
991 		ctx->data.cu.latency = latency;
992 		ctx->data.cu.timeout = timeout;
993 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
994 	} else if (ctx->proc == PROC_CONN_PARAM_REQ) {
995 		ctx->data.cu.interval_min = interval_min;
996 		ctx->data.cu.interval_max = interval_max;
997 		ctx->data.cu.latency = latency;
998 		ctx->data.cu.timeout = timeout;
999 		ctx->data.cu.offsets[0] = offsets ? offsets[0] : 0x0000;
1000 		ctx->data.cu.offsets[1] = offsets ? offsets[1] : 0xffff;
1001 		ctx->data.cu.offsets[2] = offsets ? offsets[2] : 0xffff;
1002 		ctx->data.cu.offsets[3] = offsets ? offsets[3] : 0xffff;
1003 		ctx->data.cu.offsets[4] = offsets ? offsets[4] : 0xffff;
1004 		ctx->data.cu.offsets[5] = offsets ? offsets[5] : 0xffff;
1005 
1006 		if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
1007 		    (conn->lll.role == BT_HCI_ROLE_PERIPHERAL)) {
1008 			uint16_t handle = ll_conn_handle_get(conn);
1009 
1010 			ull_periph_latency_cancel(conn, handle);
1011 		}
1012 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1013 	} else {
1014 		LL_ASSERT(0); /* Unknown procedure */
1015 	}
1016 
1017 	llcp_lr_enqueue(conn, ctx);
1018 
1019 	return BT_HCI_ERR_SUCCESS;
1020 }
1021 
1022 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
ull_cp_remote_dle_pending(struct ll_conn * conn)1023 uint8_t ull_cp_remote_dle_pending(struct ll_conn *conn)
1024 {
1025 	struct proc_ctx *ctx;
1026 
1027 	ctx = llcp_rr_peek(conn);
1028 
1029 	return (ctx && ctx->proc == PROC_DATA_LENGTH_UPDATE);
1030 }
1031 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1032 
1033 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
ull_cp_conn_param_req_reply(struct ll_conn * conn)1034 void ull_cp_conn_param_req_reply(struct ll_conn *conn)
1035 {
1036 	struct proc_ctx *ctx;
1037 
1038 	ctx = llcp_rr_peek(conn);
1039 	if (ctx && ctx->proc == PROC_CONN_PARAM_REQ) {
1040 		llcp_rp_conn_param_req_reply(conn, ctx);
1041 	}
1042 }
1043 
ull_cp_conn_param_req_neg_reply(struct ll_conn * conn,uint8_t error_code)1044 void ull_cp_conn_param_req_neg_reply(struct ll_conn *conn, uint8_t error_code)
1045 {
1046 	struct proc_ctx *ctx;
1047 
1048 	ctx = llcp_rr_peek(conn);
1049 	if (ctx && ctx->proc == PROC_CONN_PARAM_REQ) {
1050 		ctx->data.cu.error = error_code;
1051 		llcp_rp_conn_param_req_neg_reply(conn, ctx);
1052 	}
1053 }
1054 
ull_cp_remote_cpr_pending(struct ll_conn * conn)1055 uint8_t ull_cp_remote_cpr_pending(struct ll_conn *conn)
1056 {
1057 	struct proc_ctx *ctx;
1058 
1059 	ctx = llcp_rr_peek(conn);
1060 
1061 	return (ctx && ctx->proc == PROC_CONN_PARAM_REQ);
1062 }
1063 
1064 #if defined(CONFIG_BT_CTLR_USER_CPR_ANCHOR_POINT_MOVE)
ull_cp_remote_cpr_apm_awaiting_reply(struct ll_conn * conn)1065 bool ull_cp_remote_cpr_apm_awaiting_reply(struct ll_conn *conn)
1066 {
1067 	struct proc_ctx *ctx;
1068 
1069 	ctx = llcp_rr_peek(conn);
1070 
1071 	if (ctx && ctx->proc == PROC_CONN_PARAM_REQ) {
1072 		return llcp_rp_conn_param_req_apm_awaiting_reply(ctx);
1073 	}
1074 
1075 	return false;
1076 }
1077 
ull_cp_remote_cpr_apm_reply(struct ll_conn * conn,uint16_t * offsets)1078 void ull_cp_remote_cpr_apm_reply(struct ll_conn *conn, uint16_t *offsets)
1079 {
1080 	struct proc_ctx *ctx;
1081 
1082 	ctx = llcp_rr_peek(conn);
1083 
1084 	if (ctx && ctx->proc == PROC_CONN_PARAM_REQ) {
1085 		ctx->data.cu.offsets[0] = offsets[0];
1086 		ctx->data.cu.offsets[1] = offsets[1];
1087 		ctx->data.cu.offsets[2] = offsets[2];
1088 		ctx->data.cu.offsets[3] = offsets[3];
1089 		ctx->data.cu.offsets[4] = offsets[4];
1090 		ctx->data.cu.offsets[5] = offsets[5];
1091 		ctx->data.cu.error = 0U;
1092 		llcp_rp_conn_param_req_apm_reply(conn, ctx);
1093 	}
1094 }
1095 
ull_cp_remote_cpr_apm_neg_reply(struct ll_conn * conn,uint8_t error_code)1096 void ull_cp_remote_cpr_apm_neg_reply(struct ll_conn *conn, uint8_t error_code)
1097 {
1098 	struct proc_ctx *ctx;
1099 
1100 	ctx = llcp_rr_peek(conn);
1101 
1102 	if (ctx && ctx->proc == PROC_CONN_PARAM_REQ) {
1103 		ctx->data.cu.error = error_code;
1104 		llcp_rp_conn_param_req_apm_reply(conn, ctx);
1105 	}
1106 }
1107 #endif /* CONFIG_BT_CTLR_USER_CPR_ANCHOR_POINT_MOVE */
1108 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1109 
1110 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
ull_cp_cte_rsp_enable(struct ll_conn * conn,bool enable,uint8_t max_cte_len,uint8_t cte_types)1111 void ull_cp_cte_rsp_enable(struct ll_conn *conn, bool enable, uint8_t max_cte_len,
1112 			   uint8_t cte_types)
1113 {
1114 	conn->llcp.cte_rsp.is_enabled = enable;
1115 
1116 	if (enable) {
1117 		conn->llcp.cte_rsp.max_cte_len = max_cte_len;
1118 		conn->llcp.cte_rsp.cte_types = cte_types;
1119 	}
1120 }
1121 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
1122 
1123 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
ull_cp_cte_req(struct ll_conn * conn,uint8_t min_cte_len,uint8_t cte_type)1124 uint8_t ull_cp_cte_req(struct ll_conn *conn, uint8_t min_cte_len, uint8_t cte_type)
1125 {
1126 	struct proc_ctx *ctx;
1127 
1128 	/* If Controller gained, awareness:
1129 	 * - by Feature Exchange control procedure that peer device does not support CTE response,
1130 	 * - by reception LL_UNKNOWN_RSP with unknown type LL_CTE_REQ that peer device does not
1131 	 *   recognize CTE request,
1132 	 * then response to Host that CTE request enable command is not possible due to unsupported
1133 	 * remote feature.
1134 	 */
1135 	if ((conn->llcp.fex.valid &&
1136 	     (!(conn->llcp.fex.features_peer & BIT64(BT_LE_FEAT_BIT_CONN_CTE_RESP)))) ||
1137 	    (!conn->llcp.fex.valid && !feature_cte_req(conn))) {
1138 		return BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
1139 	}
1140 
1141 	/* The request may be started by periodic CTE request procedure, so it skips earlier
1142 	 * verification of PHY. In case the PHY has changed to CODE the request should be stopped.
1143 	 */
1144 #if defined(CONFIG_BT_CTLR_PHY)
1145 	if (conn->lll.phy_rx != PHY_CODED) {
1146 #else
1147 	if (1) {
1148 #endif /* CONFIG_BT_CTLR_PHY */
1149 		ctx = llcp_create_local_procedure(PROC_CTE_REQ);
1150 		if (!ctx) {
1151 			return BT_HCI_ERR_CMD_DISALLOWED;
1152 		}
1153 
1154 		ctx->data.cte_req.min_len = min_cte_len;
1155 		ctx->data.cte_req.type = cte_type;
1156 
1157 		llcp_lr_enqueue(conn, ctx);
1158 
1159 		return BT_HCI_ERR_SUCCESS;
1160 	}
1161 
1162 	return BT_HCI_ERR_CMD_DISALLOWED;
1163 }
1164 
1165 void ull_cp_cte_req_set_disable(struct ll_conn *conn)
1166 {
1167 	conn->llcp.cte_req.is_enabled = 0U;
1168 	conn->llcp.cte_req.req_interval = 0U;
1169 }
1170 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
1171 
1172 void ull_cp_cc_offset_calc_reply(struct ll_conn *conn, uint32_t cis_offset_min,
1173 				 uint32_t cis_offset_max)
1174 {
1175 	struct proc_ctx *ctx;
1176 
1177 	ctx = llcp_lr_peek(conn);
1178 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1179 		ctx->data.cis_create.cis_offset_min = cis_offset_min;
1180 		ctx->data.cis_create.cis_offset_max = cis_offset_max;
1181 
1182 		llcp_lp_cc_offset_calc_reply(conn, ctx);
1183 	}
1184 }
1185 
1186 #if defined(CONFIG_BT_PERIPHERAL) && defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1187 bool ull_cp_cc_awaiting_reply(struct ll_conn *conn)
1188 {
1189 	struct proc_ctx *ctx;
1190 
1191 	ctx = llcp_rr_peek(conn);
1192 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1193 		return llcp_rp_cc_awaiting_reply(ctx);
1194 	}
1195 
1196 	return false;
1197 }
1198 
1199 uint16_t ull_cp_cc_ongoing_handle(struct ll_conn *conn)
1200 {
1201 	struct proc_ctx *ctx;
1202 
1203 	ctx = llcp_rr_peek(conn);
1204 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1205 		return ctx->data.cis_create.cis_handle;
1206 	}
1207 
1208 	return 0xFFFF;
1209 }
1210 
1211 void ull_cp_cc_accept(struct ll_conn *conn, uint32_t cis_offset_min)
1212 {
1213 	struct proc_ctx *ctx;
1214 
1215 	ctx = llcp_rr_peek(conn);
1216 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1217 		if (cis_offset_min > ctx->data.cis_create.cis_offset_min) {
1218 			if (cis_offset_min > ctx->data.cis_create.cis_offset_max) {
1219 				ctx->data.cis_create.error = BT_HCI_ERR_UNSUPP_LL_PARAM_VAL;
1220 				llcp_rp_cc_reject(conn, ctx);
1221 
1222 				return;
1223 			}
1224 
1225 			ctx->data.cis_create.cis_offset_min = cis_offset_min;
1226 		}
1227 
1228 		llcp_rp_cc_accept(conn, ctx);
1229 	}
1230 }
1231 
1232 void ull_cp_cc_reject(struct ll_conn *conn, uint8_t error_code)
1233 {
1234 	struct proc_ctx *ctx;
1235 
1236 	ctx = llcp_rr_peek(conn);
1237 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1238 		ctx->data.cis_create.error = error_code;
1239 		llcp_rp_cc_reject(conn, ctx);
1240 	}
1241 }
1242 #endif /* CONFIG_BT_PERIPHERAL && CONFIG_BT_CTLR_PERIPHERAL_ISO */
1243 
1244 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) || defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1245 bool ull_cp_cc_awaiting_established(struct ll_conn *conn)
1246 {
1247 	struct proc_ctx *ctx;
1248 
1249 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1250 	ctx = llcp_rr_peek(conn);
1251 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1252 		return llcp_rp_cc_awaiting_established(ctx);
1253 	}
1254 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
1255 
1256 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1257 	ctx = llcp_lr_peek(conn);
1258 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1259 		return llcp_lp_cc_awaiting_established(ctx);
1260 	}
1261 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
1262 	return false;
1263 }
1264 
1265 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1266 bool ull_cp_cc_cancel(struct ll_conn *conn)
1267 {
1268 	struct proc_ctx *ctx;
1269 
1270 	ctx = llcp_lr_peek(conn);
1271 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1272 		return llcp_lp_cc_cancel(conn, ctx);
1273 	}
1274 
1275 	return false;
1276 }
1277 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
1278 
1279 void ull_cp_cc_established(struct ll_conn *conn, uint8_t error_code)
1280 {
1281 	struct proc_ctx *ctx;
1282 
1283 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1284 	ctx = llcp_rr_peek(conn);
1285 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1286 		ctx->data.cis_create.error = error_code;
1287 		llcp_rp_cc_established(conn, ctx);
1288 		llcp_rr_check_done(conn, ctx);
1289 	}
1290 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO */
1291 
1292 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1293 	ctx = llcp_lr_peek(conn);
1294 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1295 		ctx->data.cis_create.error = error_code;
1296 		llcp_lp_cc_established(conn, ctx);
1297 		llcp_lr_check_done(conn, ctx);
1298 	}
1299 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
1300 }
1301 #endif /* CONFIG_BT_CTLR_PERIPHERAL_ISO || CONFIG_BT_CTLR_CENTRAL_ISO */
1302 
1303 #if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_CTLR_CENTRAL_ISO)
1304 bool ull_lp_cc_is_active(struct ll_conn *conn)
1305 {
1306 	struct proc_ctx *ctx;
1307 
1308 	ctx = llcp_lr_peek(conn);
1309 	if (ctx && ctx->proc == PROC_CIS_CREATE) {
1310 		return llcp_lp_cc_is_active(ctx);
1311 	}
1312 	return false;
1313 }
1314 
1315 bool ull_lp_cc_is_enqueued(struct ll_conn *conn)
1316 {
1317 	struct proc_ctx *ctx;
1318 
1319 	ctx = llcp_lr_peek_proc(conn, PROC_CIS_CREATE);
1320 
1321 	return (ctx != NULL);
1322 }
1323 #endif /* defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_CTLR_CENTRAL_ISO) */
1324 
1325 static bool pdu_is_expected(struct pdu_data *pdu, struct proc_ctx *ctx)
1326 {
1327 	return (ctx->rx_opcode == pdu->llctrl.opcode || ctx->rx_greedy);
1328 }
1329 
1330 static bool pdu_is_unknown(struct pdu_data *pdu, struct proc_ctx *ctx)
1331 {
1332 	return ((pdu->llctrl.opcode == PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP) &&
1333 		(ctx->tx_opcode == pdu->llctrl.unknown_rsp.type));
1334 }
1335 
1336 static bool pdu_is_reject(struct pdu_data *pdu, struct proc_ctx *ctx)
1337 {
1338 	/* For LL_REJECT_IND there is no simple way of confirming protocol validity of the PDU
1339 	 * for the given procedure, so simply pass it on and let procedure engine deal with it
1340 	 */
1341 	return (pdu->llctrl.opcode == PDU_DATA_LLCTRL_TYPE_REJECT_IND);
1342 }
1343 
1344 static bool pdu_is_reject_ext(struct pdu_data *pdu, struct proc_ctx *ctx)
1345 {
1346 	return ((pdu->llctrl.opcode == PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND) &&
1347 		(ctx->tx_opcode == pdu->llctrl.reject_ext_ind.reject_opcode));
1348 }
1349 
1350 static bool pdu_is_any_reject(struct pdu_data *pdu, struct proc_ctx *ctx)
1351 {
1352 	return (pdu_is_reject_ext(pdu, ctx) || pdu_is_reject(pdu, ctx));
1353 }
1354 
1355 static bool pdu_is_terminate(struct pdu_data *pdu)
1356 {
1357 	return pdu->llctrl.opcode == PDU_DATA_LLCTRL_TYPE_TERMINATE_IND;
1358 }
1359 
1360 #define VALIDATE_PDU_LEN(pdu, type) (pdu->len == PDU_DATA_LLCTRL_LEN(type))
1361 
1362 #if defined(CONFIG_BT_PERIPHERAL)
1363 static bool pdu_validate_conn_update_ind(struct pdu_data *pdu)
1364 {
1365 	return VALIDATE_PDU_LEN(pdu, conn_update_ind);
1366 }
1367 
1368 static bool pdu_validate_chan_map_ind(struct pdu_data *pdu)
1369 {
1370 	return VALIDATE_PDU_LEN(pdu, chan_map_ind);
1371 }
1372 #endif /* CONFIG_BT_PERIPHERAL */
1373 
1374 static bool pdu_validate_terminate_ind(struct pdu_data *pdu)
1375 {
1376 	return VALIDATE_PDU_LEN(pdu, terminate_ind);
1377 }
1378 
1379 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_PERIPHERAL)
1380 static bool pdu_validate_enc_req(struct pdu_data *pdu)
1381 {
1382 	return VALIDATE_PDU_LEN(pdu, enc_req);
1383 }
1384 #endif /* CONFIG_BT_CTLR_LE_ENC && CONFIG_BT_PERIPHERAL */
1385 
1386 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_CENTRAL)
1387 static bool pdu_validate_enc_rsp(struct pdu_data *pdu)
1388 {
1389 	return VALIDATE_PDU_LEN(pdu, enc_rsp);
1390 }
1391 
1392 static bool pdu_validate_start_enc_req(struct pdu_data *pdu)
1393 {
1394 	return VALIDATE_PDU_LEN(pdu, start_enc_req);
1395 }
1396 #endif /* CONFIG_BT_CTLR_LE_ENC && CONFIG_BT_CENTRAL */
1397 
1398 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_PERIPHERAL)
1399 static bool pdu_validate_start_enc_rsp(struct pdu_data *pdu)
1400 {
1401 	return VALIDATE_PDU_LEN(pdu, start_enc_rsp);
1402 }
1403 #endif
1404 
1405 static bool pdu_validate_unknown_rsp(struct pdu_data *pdu)
1406 {
1407 	return VALIDATE_PDU_LEN(pdu, unknown_rsp);
1408 }
1409 
1410 #if defined(CONFIG_BT_PERIPHERAL)
1411 static bool pdu_validate_feature_req(struct pdu_data *pdu)
1412 {
1413 	return VALIDATE_PDU_LEN(pdu, feature_req);
1414 }
1415 #endif
1416 
1417 #if defined(CONFIG_BT_CENTRAL)
1418 static bool pdu_validate_feature_rsp(struct pdu_data *pdu)
1419 {
1420 	return VALIDATE_PDU_LEN(pdu, feature_rsp);
1421 }
1422 #endif
1423 
1424 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_PERIPHERAL)
1425 static bool pdu_validate_pause_enc_req(struct pdu_data *pdu)
1426 {
1427 	return VALIDATE_PDU_LEN(pdu, pause_enc_req);
1428 }
1429 #endif /* CONFIG_BT_CTLR_LE_ENC && CONFIG_BT_PERIPHERAL */
1430 
1431 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_CENTRAL)
1432 static bool pdu_validate_pause_enc_rsp(struct pdu_data *pdu)
1433 {
1434 	return VALIDATE_PDU_LEN(pdu, pause_enc_rsp);
1435 }
1436 #endif
1437 
1438 static bool pdu_validate_version_ind(struct pdu_data *pdu)
1439 {
1440 	return VALIDATE_PDU_LEN(pdu, version_ind);
1441 }
1442 
1443 static bool pdu_validate_reject_ind(struct pdu_data *pdu)
1444 {
1445 	return VALIDATE_PDU_LEN(pdu, reject_ind);
1446 }
1447 
1448 #if defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG) && defined(CONFIG_BT_CENTRAL)
1449 static bool pdu_validate_per_init_feat_xchg(struct pdu_data *pdu)
1450 {
1451 	return VALIDATE_PDU_LEN(pdu, per_init_feat_xchg);
1452 }
1453 #endif /* CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG && CONFIG_BT_CENTRAL */
1454 
1455 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1456 static bool pdu_validate_conn_param_req(struct pdu_data *pdu)
1457 {
1458 	return VALIDATE_PDU_LEN(pdu, conn_param_req);
1459 }
1460 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1461 
1462 static bool pdu_validate_conn_param_rsp(struct pdu_data *pdu)
1463 {
1464 	return VALIDATE_PDU_LEN(pdu, conn_param_rsp);
1465 }
1466 
1467 static bool pdu_validate_reject_ext_ind(struct pdu_data *pdu)
1468 {
1469 	return VALIDATE_PDU_LEN(pdu, reject_ext_ind);
1470 }
1471 
1472 #if defined(CONFIG_BT_CTLR_LE_PING)
1473 static bool pdu_validate_ping_req(struct pdu_data *pdu)
1474 {
1475 	return VALIDATE_PDU_LEN(pdu, ping_req);
1476 }
1477 #endif /* CONFIG_BT_CTLR_LE_PING */
1478 
1479 static bool pdu_validate_ping_rsp(struct pdu_data *pdu)
1480 {
1481 	return VALIDATE_PDU_LEN(pdu, ping_rsp);
1482 }
1483 
1484 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1485 static bool pdu_validate_length_req(struct pdu_data *pdu)
1486 {
1487 	return VALIDATE_PDU_LEN(pdu, length_req);
1488 }
1489 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1490 
1491 static bool pdu_validate_length_rsp(struct pdu_data *pdu)
1492 {
1493 	return VALIDATE_PDU_LEN(pdu, length_rsp);
1494 }
1495 
1496 #if defined(CONFIG_BT_CTLR_PHY)
1497 static bool pdu_validate_phy_req(struct pdu_data *pdu)
1498 {
1499 	return VALIDATE_PDU_LEN(pdu, phy_req);
1500 }
1501 #endif /* CONFIG_BT_CTLR_PHY */
1502 
1503 static bool pdu_validate_phy_rsp(struct pdu_data *pdu)
1504 {
1505 	return VALIDATE_PDU_LEN(pdu, phy_rsp);
1506 }
1507 
1508 static bool pdu_validate_phy_upd_ind(struct pdu_data *pdu)
1509 {
1510 	return VALIDATE_PDU_LEN(pdu, phy_upd_ind);
1511 }
1512 
1513 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_CENTRAL)
1514 static bool pdu_validate_min_used_chan_ind(struct pdu_data *pdu)
1515 {
1516 	return VALIDATE_PDU_LEN(pdu, min_used_chans_ind);
1517 }
1518 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_CENTRAL */
1519 
1520 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
1521 static bool pdu_validate_cte_req(struct pdu_data *pdu)
1522 {
1523 	return VALIDATE_PDU_LEN(pdu, cte_req);
1524 }
1525 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
1526 
1527 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
1528 static bool pdu_validate_cte_resp(struct pdu_data *pdu)
1529 {
1530 	return VALIDATE_PDU_LEN(pdu, cte_rsp);
1531 }
1532 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
1533 
1534 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1535 static bool pdu_validate_clock_accuracy_req(struct pdu_data *pdu)
1536 {
1537 	return VALIDATE_PDU_LEN(pdu, clock_accuracy_req);
1538 }
1539 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1540 
1541 static bool pdu_validate_clock_accuracy_rsp(struct pdu_data *pdu)
1542 {
1543 	return VALIDATE_PDU_LEN(pdu, clock_accuracy_rsp);
1544 }
1545 
1546 typedef bool (*pdu_param_validate_t)(struct pdu_data *pdu);
1547 
1548 struct pdu_validate {
1549 	/* TODO can be just size if no other sanity checks here */
1550 	pdu_param_validate_t validate_cb;
1551 };
1552 
1553 static const struct pdu_validate pdu_validate[] = {
1554 #if defined(CONFIG_BT_PERIPHERAL)
1555 	[PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND] = { pdu_validate_conn_update_ind },
1556 	[PDU_DATA_LLCTRL_TYPE_CHAN_MAP_IND] = { pdu_validate_chan_map_ind },
1557 #endif /* CONFIG_BT_PERIPHERAL */
1558 	[PDU_DATA_LLCTRL_TYPE_TERMINATE_IND] = { pdu_validate_terminate_ind },
1559 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_PERIPHERAL)
1560 	[PDU_DATA_LLCTRL_TYPE_ENC_REQ] = { pdu_validate_enc_req },
1561 #endif /* CONFIG_BT_CTLR_LE_ENC && CONFIG_BT_PERIPHERAL */
1562 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_CENTRAL)
1563 	[PDU_DATA_LLCTRL_TYPE_ENC_RSP] = { pdu_validate_enc_rsp },
1564 	[PDU_DATA_LLCTRL_TYPE_START_ENC_REQ] = { pdu_validate_start_enc_req },
1565 #endif
1566 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_PERIPHERAL)
1567 	[PDU_DATA_LLCTRL_TYPE_START_ENC_RSP] = { pdu_validate_start_enc_rsp },
1568 #endif
1569 	[PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP] = { pdu_validate_unknown_rsp },
1570 #if defined(CONFIG_BT_PERIPHERAL)
1571 	[PDU_DATA_LLCTRL_TYPE_FEATURE_REQ] = { pdu_validate_feature_req },
1572 #endif
1573 #if defined(CONFIG_BT_CENTRAL)
1574 	[PDU_DATA_LLCTRL_TYPE_FEATURE_RSP] = { pdu_validate_feature_rsp },
1575 #endif
1576 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_PERIPHERAL)
1577 	[PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_REQ] = { pdu_validate_pause_enc_req },
1578 #endif /* CONFIG_BT_CTLR_LE_ENC && CONFIG_BT_PERIPHERAL */
1579 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_CENTRAL)
1580 	[PDU_DATA_LLCTRL_TYPE_PAUSE_ENC_RSP] = { pdu_validate_pause_enc_rsp },
1581 #endif
1582 	[PDU_DATA_LLCTRL_TYPE_VERSION_IND] = { pdu_validate_version_ind },
1583 	[PDU_DATA_LLCTRL_TYPE_REJECT_IND] = { pdu_validate_reject_ind },
1584 #if defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG) && defined(CONFIG_BT_CENTRAL)
1585 	[PDU_DATA_LLCTRL_TYPE_PER_INIT_FEAT_XCHG] = { pdu_validate_per_init_feat_xchg },
1586 #endif /* CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG && CONFIG_BT_CENTRAL */
1587 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1588 	[PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ] = { pdu_validate_conn_param_req },
1589 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1590 	[PDU_DATA_LLCTRL_TYPE_CONN_PARAM_RSP] = { pdu_validate_conn_param_rsp },
1591 	[PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND] = { pdu_validate_reject_ext_ind },
1592 #if defined(CONFIG_BT_CTLR_LE_PING)
1593 	[PDU_DATA_LLCTRL_TYPE_PING_REQ] = { pdu_validate_ping_req },
1594 #endif /* CONFIG_BT_CTLR_LE_PING */
1595 	[PDU_DATA_LLCTRL_TYPE_PING_RSP] = { pdu_validate_ping_rsp },
1596 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1597 	[PDU_DATA_LLCTRL_TYPE_LENGTH_REQ] = { pdu_validate_length_req },
1598 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1599 	[PDU_DATA_LLCTRL_TYPE_LENGTH_RSP] = { pdu_validate_length_rsp },
1600 #if defined(CONFIG_BT_CTLR_PHY)
1601 	[PDU_DATA_LLCTRL_TYPE_PHY_REQ] = { pdu_validate_phy_req },
1602 #endif /* CONFIG_BT_CTLR_PHY */
1603 	[PDU_DATA_LLCTRL_TYPE_PHY_RSP] = { pdu_validate_phy_rsp },
1604 	[PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND] = { pdu_validate_phy_upd_ind },
1605 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_CENTRAL)
1606 	[PDU_DATA_LLCTRL_TYPE_MIN_USED_CHAN_IND] = { pdu_validate_min_used_chan_ind },
1607 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_CENTRAL */
1608 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
1609 	[PDU_DATA_LLCTRL_TYPE_CTE_REQ] = { pdu_validate_cte_req },
1610 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
1611 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
1612 	[PDU_DATA_LLCTRL_TYPE_CTE_RSP] = { pdu_validate_cte_resp },
1613 #endif /* PDU_DATA_LLCTRL_TYPE_CTE_RSP */
1614 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1615 	[PDU_DATA_LLCTRL_TYPE_CLOCK_ACCURACY_REQ] = { pdu_validate_clock_accuracy_req },
1616 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
1617 	[PDU_DATA_LLCTRL_TYPE_CLOCK_ACCURACY_RSP] = { pdu_validate_clock_accuracy_rsp },
1618 };
1619 
1620 static bool pdu_is_valid(struct pdu_data *pdu)
1621 {
1622 	/* the should be at least 1 byte of data with opcode*/
1623 	if (pdu->len < 1) {
1624 		/* fake opcode */
1625 		pdu->llctrl.opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
1626 		return false;
1627 	}
1628 
1629 	if (pdu->llctrl.opcode < ARRAY_SIZE(pdu_validate)) {
1630 		pdu_param_validate_t cb;
1631 
1632 		cb = pdu_validate[pdu->llctrl.opcode].validate_cb;
1633 		if (cb) {
1634 			return cb(pdu);
1635 		}
1636 	}
1637 
1638 	/* consider unsupported and unknown PDUs as valid */
1639 	return true;
1640 }
1641 
1642 void ull_cp_tx_ack(struct ll_conn *conn, struct node_tx *tx)
1643 {
1644 	struct proc_ctx *ctx;
1645 
1646 	ctx = llcp_lr_peek(conn);
1647 	if (ctx && ctx->node_ref.tx_ack == tx) {
1648 		/* TX ack re. local request */
1649 		llcp_lr_tx_ack(conn, ctx, tx);
1650 	}
1651 
1652 	ctx = llcp_rr_peek(conn);
1653 	if (ctx && ctx->node_ref.tx_ack == tx) {
1654 		/* TX ack re. remote response */
1655 		llcp_rr_tx_ack(conn, ctx, tx);
1656 	}
1657 }
1658 
1659 void ull_cp_tx_ntf(struct ll_conn *conn)
1660 {
1661 	struct proc_ctx *ctx;
1662 
1663 	ctx = llcp_lr_peek(conn);
1664 	if (ctx) {
1665 		/* TX notifications towards Host */
1666 		llcp_lr_tx_ntf(conn, ctx);
1667 	}
1668 
1669 	ctx = llcp_rr_peek(conn);
1670 	if (ctx) {
1671 		/* TX notifications towards Host */
1672 		llcp_rr_tx_ntf(conn, ctx);
1673 	}
1674 }
1675 
1676 void ull_cp_rx(struct ll_conn *conn, memq_link_t *link, struct node_rx_pdu *rx)
1677 {
1678 	struct proc_ctx *ctx_l;
1679 	struct proc_ctx *ctx_r;
1680 	struct pdu_data *pdu;
1681 	bool unexpected_l;
1682 	bool unexpected_r;
1683 	bool pdu_valid;
1684 
1685 	pdu = (struct pdu_data *)rx->pdu;
1686 
1687 	pdu_valid = pdu_is_valid(pdu);
1688 
1689 	if (!pdu_valid) {
1690 		struct proc_ctx *ctx;
1691 
1692 		ctx = llcp_lr_peek(conn);
1693 		if (ctx && pdu_is_expected(pdu, ctx)) {
1694 			return;
1695 		}
1696 
1697 		ctx = llcp_rr_peek(conn);
1698 		if (ctx && pdu_is_expected(pdu, ctx)) {
1699 			return;
1700 		}
1701 
1702 		/*  Process invalid PDU's as new procedure */
1703 		ctx_l = NULL;
1704 		ctx_r = NULL;
1705 	} else if (pdu_is_terminate(pdu)) {
1706 		/*  Process LL_TERMINATE_IND PDU's as new procedure */
1707 		ctx_l = NULL;
1708 		ctx_r = NULL;
1709 	} else {
1710 		/* Query local and remote activity */
1711 		ctx_l = llcp_lr_peek(conn);
1712 		ctx_r = llcp_rr_peek(conn);
1713 	}
1714 
1715 	if (ctx_l) {
1716 		/* Local active procedure */
1717 
1718 		if (ctx_r) {
1719 			/* Local active procedure
1720 			 * Remote active procedure
1721 			 */
1722 			unexpected_l = !(pdu_is_expected(pdu, ctx_l) ||
1723 					 pdu_is_unknown(pdu, ctx_l) ||
1724 					 pdu_is_any_reject(pdu, ctx_l));
1725 
1726 			unexpected_r = !(pdu_is_expected(pdu, ctx_r) ||
1727 					 pdu_is_unknown(pdu, ctx_r) ||
1728 					 pdu_is_reject_ext(pdu, ctx_r));
1729 
1730 			if (unexpected_l == unexpected_r) {
1731 				/* Both Local and Remote procedure active
1732 				 * and PDU is either
1733 				 * unexpected by both
1734 				 * or
1735 				 * expected by both
1736 				 *
1737 				 * Both situations is a result of invalid behaviour
1738 				 */
1739 				conn->llcp_terminate.reason_final =
1740 					unexpected_r ? BT_HCI_ERR_LMP_PDU_NOT_ALLOWED :
1741 						       BT_HCI_ERR_UNSPECIFIED;
1742 			} else if (unexpected_l) {
1743 				/* Local active procedure
1744 				 * Unexpected local procedure PDU
1745 				 * Remote active procedure
1746 				 * Expected remote procedure PDU
1747 				 */
1748 
1749 				/* Process PDU in remote procedure */
1750 				llcp_rr_rx(conn, ctx_r, link, rx);
1751 			} else if (unexpected_r) {
1752 				/* Local active procedure
1753 				 * Expected local procedure PDU
1754 				 * Remote active procedure
1755 				 * Unexpected remote procedure PDU
1756 				 */
1757 
1758 				/* Process PDU in local procedure */
1759 				llcp_lr_rx(conn, ctx_l, link, rx);
1760 			}
1761 			/* no else clause as this cannot occur with the logic above:
1762 			 * if they are not identical then one must be true
1763 			 */
1764 		} else {
1765 			/* Local active procedure
1766 			 * No remote active procedure
1767 			 */
1768 
1769 			unexpected_l = !(pdu_is_expected(pdu, ctx_l) ||
1770 					 pdu_is_unknown(pdu, ctx_l) ||
1771 					 pdu_is_any_reject(pdu, ctx_l));
1772 
1773 			if (unexpected_l) {
1774 				/* Local active procedure
1775 				 * Unexpected local procedure PDU
1776 				 * No remote active procedure
1777 				 */
1778 
1779 				/* Process PDU as a new remote request */
1780 				LL_ASSERT(pdu_valid);
1781 				llcp_rr_new(conn, link, rx, true);
1782 			} else {
1783 				/* Local active procedure
1784 				 * Expected local procedure PDU
1785 				 * No remote active procedure
1786 				 */
1787 
1788 				/* Process PDU in local procedure */
1789 				llcp_lr_rx(conn, ctx_l, link, rx);
1790 			}
1791 		}
1792 	} else if (ctx_r) {
1793 		/* No local active procedure
1794 		 * Remote active procedure
1795 		 */
1796 
1797 		/* Process PDU in remote procedure */
1798 		llcp_rr_rx(conn, ctx_r, link, rx);
1799 	} else {
1800 		/* No local active procedure
1801 		 * No remote active procedure
1802 		 */
1803 
1804 		/* Process PDU as a new remote request */
1805 		llcp_rr_new(conn, link, rx, pdu_valid);
1806 	}
1807 }
1808 
1809 #ifdef ZTEST_UNITTEST
1810 
1811 uint16_t llcp_local_ctx_buffers_free(void)
1812 {
1813 	return mem_free_count_get(mem_local_ctx.free);
1814 }
1815 
1816 uint16_t llcp_remote_ctx_buffers_free(void)
1817 {
1818 	return mem_free_count_get(mem_remote_ctx.free);
1819 }
1820 
1821 uint16_t llcp_ctx_buffers_free(void)
1822 {
1823 	return llcp_local_ctx_buffers_free() + llcp_remote_ctx_buffers_free();
1824 }
1825 
1826 #if defined(LLCP_TX_CTRL_BUF_QUEUE_ENABLE)
1827 uint8_t llcp_common_tx_buffer_alloc_count(void)
1828 {
1829 	return common_tx_buffer_alloc;
1830 }
1831 #endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
1832 
1833 struct proc_ctx *llcp_proc_ctx_acquire(void)
1834 {
1835 	return proc_ctx_acquire(&mem_local_ctx);
1836 }
1837 
1838 struct proc_ctx *llcp_create_procedure(enum llcp_proc proc)
1839 {
1840 	return create_procedure(proc, &mem_local_ctx);
1841 }
1842 #endif
1843