1 /*
2  * Copyright (c) 2020 Demant
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 
9 #include <zephyr/sys/byteorder.h>
10 #include <zephyr/sys/slist.h>
11 #include <zephyr/sys/util.h>
12 
13 #include <zephyr/bluetooth/hci_types.h>
14 
15 #include "hal/ccm.h"
16 
17 #include "util/util.h"
18 #include "util/mem.h"
19 #include "util/memq.h"
20 #include "util/dbuf.h"
21 
22 #include "pdu_df.h"
23 #include "lll/pdu_vendor.h"
24 #include "pdu.h"
25 
26 #include "ll.h"
27 #include "ll_settings.h"
28 
29 #include "lll.h"
30 #include "ll_feat.h"
31 #include "lll/lll_df_types.h"
32 #include "lll_conn.h"
33 #include "lll_conn_iso.h"
34 
35 #include "ull_tx_queue.h"
36 
37 #include "isoal.h"
38 #include "ull_iso_types.h"
39 #include "ull_conn_iso_types.h"
40 #include "ull_iso_internal.h"
41 #include "ull_conn_iso_internal.h"
42 #include "ull_peripheral_iso_internal.h"
43 
44 #include "ull_conn_types.h"
45 #include "ull_chan_internal.h"
46 #include "ull_llcp.h"
47 #include "ull_conn_internal.h"
48 #include "ull_internal.h"
49 #include "ull_llcp_features.h"
50 #include "ull_llcp_internal.h"
51 
52 #include <soc.h>
53 #include "hal/debug.h"
54 
55 /* LLCP Local Procedure FSM states */
56 enum {
57 	LP_COMMON_STATE_IDLE,
58 	LP_COMMON_STATE_WAIT_TX,
59 	LP_COMMON_STATE_WAIT_TX_ACK,
60 	LP_COMMON_STATE_WAIT_RX,
61 	LP_COMMON_STATE_WAIT_NTF_AVAIL,
62 };
63 
64 /* LLCP Local Procedure Common FSM events */
65 enum {
66 	/* Procedure run */
67 	LP_COMMON_EVT_RUN,
68 
69 	/* Response received */
70 	LP_COMMON_EVT_RESPONSE,
71 
72 	/* Reject response received */
73 	LP_COMMON_EVT_REJECT,
74 
75 	/* Unknown response received */
76 	LP_COMMON_EVT_UNKNOWN,
77 
78 	/* Instant collision detected */
79 	LP_COMMON_EVT_COLLISION,
80 
81 	/* Ack received */
82 	LP_COMMON_EVT_ACK,
83 };
84 
85 /* LLCP Remote Procedure Common FSM states */
86 enum {
87 	RP_COMMON_STATE_IDLE,
88 	RP_COMMON_STATE_WAIT_RX,
89 	RP_COMMON_STATE_POSTPONE_TERMINATE,
90 	RP_COMMON_STATE_WAIT_TX,
91 	RP_COMMON_STATE_WAIT_TX_ACK,
92 };
93 /* LLCP Remote Procedure Common FSM events */
94 enum {
95 	/* Procedure run */
96 	RP_COMMON_EVT_RUN,
97 
98 	/* Ack received */
99 	RP_COMMON_EVT_ACK,
100 
101 	/* Request received */
102 	RP_COMMON_EVT_REQUEST,
103 };
104 
105 
106 static void lp_comm_ntf(struct ll_conn *conn, struct proc_ctx *ctx);
107 static void lp_comm_terminate_invalid_pdu(struct ll_conn *conn, struct proc_ctx *ctx);
108 
109 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
110 /**
111  * @brief Stop and tear down a connected ISO stream
112  * This function may be called to tear down a CIS.
113  *
114  * @param cig_id         ID of specific ISO group
115  * @param cis_id         ID of connected ISO stream to stop
116  * @param reason         Termination reason
117  */
llcp_cis_stop_by_id(uint8_t cig_id,uint8_t cis_id,uint8_t reason)118 static void llcp_cis_stop_by_id(uint8_t cig_id, uint8_t cis_id, uint8_t reason)
119 {
120 	struct ll_conn_iso_group *cig = ll_conn_iso_group_get_by_id(cig_id);
121 
122 	if (cig) {
123 		struct ll_conn_iso_stream *cis;
124 		uint16_t cis_handle = UINT16_MAX;
125 
126 		/* Look through CIS's of specified group */
127 		cis = ll_conn_iso_stream_get_by_group(cig, &cis_handle);
128 		while (cis && cis->cis_id != cis_id) {
129 			/* Get next CIS */
130 			cis = ll_conn_iso_stream_get_by_group(cig, &cis_handle);
131 		}
132 		if (cis && cis->lll.handle == cis_handle) {
133 			ull_conn_iso_cis_stop(cis, NULL, reason);
134 		}
135 	}
136 }
137 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
138 
139 /*
140  * LLCP Local Procedure Common FSM
141  */
142 
lp_comm_tx(struct ll_conn * conn,struct proc_ctx * ctx)143 static void lp_comm_tx(struct ll_conn *conn, struct proc_ctx *ctx)
144 {
145 	struct node_tx *tx;
146 	struct pdu_data *pdu;
147 
148 	/* Allocate tx node */
149 	tx = llcp_tx_alloc(conn, ctx);
150 	LL_ASSERT(tx);
151 
152 	pdu = (struct pdu_data *)tx->pdu;
153 
154 	/* Encode LL Control PDU */
155 	switch (ctx->proc) {
156 #if defined(CONFIG_BT_CTLR_LE_PING)
157 	case PROC_LE_PING:
158 		llcp_pdu_encode_ping_req(pdu);
159 		ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_PING_RSP;
160 		break;
161 #endif /* CONFIG_BT_CTLR_LE_PING */
162 	case PROC_FEATURE_EXCHANGE:
163 		llcp_pdu_encode_feature_req(conn, pdu);
164 		ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_FEATURE_RSP;
165 		break;
166 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
167 	case PROC_MIN_USED_CHANS:
168 		llcp_pdu_encode_min_used_chans_ind(ctx, pdu);
169 		ctx->node_ref.tx_ack = tx;
170 		ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
171 		break;
172 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
173 	case PROC_VERSION_EXCHANGE:
174 		llcp_pdu_encode_version_ind(pdu);
175 		ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_VERSION_IND;
176 		break;
177 	case PROC_TERMINATE:
178 		llcp_pdu_encode_terminate_ind(ctx, pdu);
179 		ctx->node_ref.tx_ack = tx;
180 		ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
181 		break;
182 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
183 	case PROC_CIS_TERMINATE:
184 		llcp_pdu_encode_cis_terminate_ind(ctx, pdu);
185 		ctx->node_ref.tx_ack = tx;
186 		ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
187 		break;
188 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
189 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
190 	case PROC_DATA_LENGTH_UPDATE:
191 		llcp_pdu_encode_length_req(conn, pdu);
192 		ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_LENGTH_RSP;
193 		break;
194 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
195 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
196 	case PROC_CTE_REQ:
197 		llcp_pdu_encode_cte_req(ctx, pdu);
198 		ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_CTE_RSP;
199 		break;
200 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
201 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
202 	case PROC_SCA_UPDATE:
203 		llcp_pdu_encode_clock_accuracy_req(ctx, pdu);
204 		ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_CLOCK_ACCURACY_RSP;
205 		break;
206 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
207 	default:
208 		/* Unknown procedure */
209 		LL_ASSERT(0);
210 	}
211 
212 	ctx->tx_opcode = pdu->llctrl.opcode;
213 
214 	/* Enqueue LL Control PDU towards LLL */
215 	llcp_tx_enqueue(conn, tx);
216 
217 	/* Restart procedure response timeout timer */
218 	if (ctx->proc != PROC_TERMINATE) {
219 		/* Use normal timeout value of 40s */
220 		llcp_lr_prt_restart(conn);
221 	} else {
222 		/* Use supervision timeout value
223 		 * NOTE: As the supervision timeout is at most 32s the normal procedure response
224 		 * timeout of 40s will never come into play for the ACL Termination procedure.
225 		 */
226 		const uint32_t conn_interval_us = conn->lll.interval * CONN_INT_UNIT_US;
227 		const uint16_t sto_reload = RADIO_CONN_EVENTS(
228 			(conn->supervision_timeout * 10U * 1000U),
229 			conn_interval_us);
230 		llcp_lr_prt_restart_with_value(conn, sto_reload);
231 	}
232 }
233 
lp_comm_ntf_feature_exchange(struct ll_conn * conn,struct proc_ctx * ctx,struct pdu_data * pdu)234 static void lp_comm_ntf_feature_exchange(struct ll_conn *conn, struct proc_ctx *ctx,
235 					 struct pdu_data *pdu)
236 {
237 	switch (ctx->response_opcode) {
238 	case PDU_DATA_LLCTRL_TYPE_FEATURE_RSP:
239 		llcp_ntf_encode_feature_rsp(conn, pdu);
240 		break;
241 	case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
242 		llcp_ntf_encode_unknown_rsp(ctx, pdu);
243 		break;
244 	default:
245 		/* Unexpected PDU, should not get through, so ASSERT */
246 		LL_ASSERT(0);
247 	}
248 }
249 
lp_comm_ntf_version_ind(struct ll_conn * conn,struct proc_ctx * ctx,struct pdu_data * pdu)250 static void lp_comm_ntf_version_ind(struct ll_conn *conn, struct proc_ctx *ctx,
251 				    struct pdu_data *pdu)
252 {
253 	switch (ctx->response_opcode) {
254 	case PDU_DATA_LLCTRL_TYPE_VERSION_IND:
255 		llcp_ntf_encode_version_ind(conn, pdu);
256 		break;
257 	default:
258 		/* Unexpected PDU, should not get through, so ASSERT */
259 		LL_ASSERT(0);
260 	}
261 }
262 
263 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
lp_comm_ntf_length_change(struct ll_conn * conn,struct proc_ctx * ctx,struct pdu_data * pdu)264 static void lp_comm_ntf_length_change(struct ll_conn *conn, struct proc_ctx *ctx,
265 				      struct pdu_data *pdu)
266 {
267 	llcp_ntf_encode_length_change(conn, pdu);
268 }
269 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
270 
271 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
272 
lp_comm_complete_cte_req_finalize(struct ll_conn * conn)273 static void lp_comm_complete_cte_req_finalize(struct ll_conn *conn)
274 {
275 	llcp_rr_set_paused_cmd(conn, PROC_NONE);
276 	llcp_lr_complete(conn);
277 }
278 
lp_comm_ntf_cte_req(struct ll_conn * conn,struct proc_ctx * ctx,struct pdu_data * pdu)279 static void lp_comm_ntf_cte_req(struct ll_conn *conn, struct proc_ctx *ctx, struct pdu_data *pdu)
280 {
281 	switch (ctx->response_opcode) {
282 	case PDU_DATA_LLCTRL_TYPE_CTE_RSP:
283 		/* Notify host that received LL_CTE_RSP does not have CTE */
284 		if (!ctx->data.cte_remote_rsp.has_cte) {
285 			llcp_ntf_encode_cte_req(pdu);
286 		}
287 		break;
288 	case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
289 		llcp_ntf_encode_unknown_rsp(ctx, pdu);
290 		break;
291 	case PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND:
292 		llcp_ntf_encode_reject_ext_ind(ctx, pdu);
293 		break;
294 	default:
295 		/* Unexpected PDU, should not get through, so ASSERT */
296 		LL_ASSERT(0);
297 	}
298 }
299 
lp_comm_ntf_cte_req_tx(struct ll_conn * conn,struct proc_ctx * ctx)300 static void lp_comm_ntf_cte_req_tx(struct ll_conn *conn, struct proc_ctx *ctx)
301 {
302 	lp_comm_ntf(conn, ctx);
303 	ull_cp_cte_req_set_disable(conn);
304 	ctx->state = LP_COMMON_STATE_IDLE;
305 }
306 
lp_comm_complete_cte_req(struct ll_conn * conn,struct proc_ctx * ctx)307 static void lp_comm_complete_cte_req(struct ll_conn *conn, struct proc_ctx *ctx)
308 {
309 	if (conn->llcp.cte_req.is_enabled) {
310 		if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_CTE_RSP) {
311 			if (ctx->data.cte_remote_rsp.has_cte) {
312 				if (conn->llcp.cte_req.req_interval != 0U) {
313 					conn->llcp.cte_req.req_expire =
314 						conn->llcp.cte_req.req_interval;
315 				} else {
316 					/* Disable the CTE request procedure when it is completed in
317 					 * case it was executed as non-periodic.
318 					 */
319 					conn->llcp.cte_req.is_enabled = 0U;
320 				}
321 				ctx->state = LP_COMMON_STATE_IDLE;
322 			} else {
323 				lp_comm_ntf_cte_req_tx(conn, ctx);
324 			}
325 		} else if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND &&
326 			   ctx->reject_ext_ind.reject_opcode == PDU_DATA_LLCTRL_TYPE_CTE_REQ) {
327 			lp_comm_ntf_cte_req_tx(conn, ctx);
328 		} else if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP &&
329 			   ctx->unknown_response.type == PDU_DATA_LLCTRL_TYPE_CTE_REQ) {
330 			/* CTE response is unsupported in peer, so disable locally for this
331 			 * connection
332 			 */
333 			feature_unmask_features(conn, LL_FEAT_BIT_CONNECTION_CTE_REQ);
334 			lp_comm_ntf_cte_req_tx(conn, ctx);
335 		} else if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_UNUSED) {
336 			/* This path is related with handling disable the CTE REQ when PHY
337 			 * has been changed to CODED PHY. BT 5.3 Core Vol 4 Part E 7.8.85
338 			 * says CTE REQ has to be automatically disabled as if it had been requested
339 			 * by Host. There is no notification send to Host.
340 			 */
341 			ull_cp_cte_req_set_disable(conn);
342 			ctx->state = LP_COMMON_STATE_IDLE;
343 		} else {
344 			/* Illegal response opcode, internally changes state to
345 			 * LP_COMMON_STATE_IDLE
346 			 */
347 			lp_comm_terminate_invalid_pdu(conn, ctx);
348 		}
349 	} else {
350 		/* The CTE_REQ was disabled by Host after the request was send.
351 		 * It does not matter if response has arrived, it should not be handled.
352 		 */
353 		ctx->state = LP_COMMON_STATE_IDLE;
354 	}
355 
356 	if (ctx->state == LP_COMMON_STATE_IDLE) {
357 		lp_comm_complete_cte_req_finalize(conn);
358 	}
359 }
360 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
361 
362 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
lp_comm_ntf_sca(struct node_rx_pdu * ntf,struct proc_ctx * ctx,struct pdu_data * pdu)363 static void lp_comm_ntf_sca(struct node_rx_pdu *ntf, struct proc_ctx *ctx, struct pdu_data *pdu)
364 {
365 	struct node_rx_sca *pdu_sca = (struct node_rx_sca *)pdu;
366 
367 	ntf->hdr.type = NODE_RX_TYPE_REQ_PEER_SCA_COMPLETE;
368 	pdu_sca->status = ctx->data.sca_update.error_code;
369 	pdu_sca->sca = ctx->data.sca_update.sca;
370 }
371 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
372 
lp_comm_ntf(struct ll_conn * conn,struct proc_ctx * ctx)373 static void lp_comm_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
374 {
375 	uint8_t piggy_back = 1U;
376 	struct node_rx_pdu *ntf;
377 	struct pdu_data *pdu;
378 
379 	ntf = ctx->node_ref.rx;
380 	ctx->node_ref.rx = NULL;
381 	if (!ntf) {
382 		/* Allocate ntf node */
383 		ntf = llcp_ntf_alloc();
384 		LL_ASSERT(ntf);
385 		piggy_back = 0U;
386 	}
387 
388 	ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
389 	ntf->hdr.handle = conn->lll.handle;
390 	pdu = (struct pdu_data *)ntf->pdu;
391 
392 	switch (ctx->proc) {
393 	case PROC_FEATURE_EXCHANGE:
394 		lp_comm_ntf_feature_exchange(conn, ctx, pdu);
395 		break;
396 	case PROC_VERSION_EXCHANGE:
397 		lp_comm_ntf_version_ind(conn, ctx, pdu);
398 		break;
399 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
400 	case PROC_DATA_LENGTH_UPDATE:
401 		lp_comm_ntf_length_change(conn, ctx, pdu);
402 		break;
403 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
404 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
405 	case PROC_CTE_REQ:
406 		lp_comm_ntf_cte_req(conn, ctx, pdu);
407 		break;
408 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
409 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
410 	case PROC_SCA_UPDATE:
411 		lp_comm_ntf_sca(ntf, ctx, pdu);
412 		break;
413 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
414 	default:
415 		LL_ASSERT(0);
416 		break;
417 	}
418 
419 	if (!piggy_back) {
420 		/* Enqueue notification towards LL, unless we re-use RX node,
421 		 * in which case it is handled on the ull_cp_rx return path
422 		 */
423 		ll_rx_put_sched(ntf->hdr.link, ntf);
424 	}
425 
426 
427 }
428 
lp_comm_terminate_invalid_pdu(struct ll_conn * conn,struct proc_ctx * ctx)429 static void lp_comm_terminate_invalid_pdu(struct ll_conn *conn, struct proc_ctx *ctx)
430 {
431 	/* Invalid behaviour */
432 	/* Invalid PDU received so terminate connection */
433 	conn->llcp_terminate.reason_final = BT_HCI_ERR_LMP_PDU_NOT_ALLOWED;
434 	llcp_lr_complete(conn);
435 	ctx->state = LP_COMMON_STATE_IDLE;
436 }
437 
lp_comm_complete(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)438 static void lp_comm_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
439 {
440 	switch (ctx->proc) {
441 #if defined(CONFIG_BT_CTLR_LE_PING)
442 	case PROC_LE_PING:
443 		if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP ||
444 		    ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_PING_RSP) {
445 			llcp_lr_complete(conn);
446 			ctx->state = LP_COMMON_STATE_IDLE;
447 		} else {
448 			/* Illegal response opcode */
449 			lp_comm_terminate_invalid_pdu(conn, ctx);
450 		}
451 		break;
452 #endif /* CONFIG_BT_CTLR_LE_PING */
453 	case PROC_FEATURE_EXCHANGE:
454 		if ((ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP ||
455 		     ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_FEATURE_RSP)) {
456 			if (ctx->data.fex.host_initiated) {
457 				lp_comm_ntf(conn, ctx);
458 			}
459 			llcp_lr_complete(conn);
460 			ctx->state = LP_COMMON_STATE_IDLE;
461 		} else {
462 			/* Illegal response opcode */
463 			lp_comm_terminate_invalid_pdu(conn, ctx);
464 		}
465 		break;
466 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
467 	case PROC_MIN_USED_CHANS:
468 		llcp_lr_complete(conn);
469 		ctx->state = LP_COMMON_STATE_IDLE;
470 		break;
471 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
472 	case PROC_VERSION_EXCHANGE:
473 		if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_VERSION_IND) {
474 			if (ctx->node_ref.rx || llcp_ntf_alloc_is_available()) {
475 				/* Either this is a piggy-back or there is a NTF node avail */
476 				lp_comm_ntf(conn, ctx);
477 				llcp_lr_complete(conn);
478 				ctx->state = LP_COMMON_STATE_IDLE;
479 			} else {
480 				/* Handle procedure TO, in case we end up waiting 'forever' for
481 				 * NTF buffer. This is a simple way to implement mechanism to
482 				 * trigger disconnect in case NTF buffer 'never' becomes avail
483 				 *    see elaborate note in lp_comm_st_wait_ntf_avail()
484 				 */
485 				llcp_lr_prt_restart(conn);
486 				ctx->state = LP_COMMON_STATE_WAIT_NTF_AVAIL;
487 			}
488 		} else {
489 			/* Illegal response opcode */
490 			lp_comm_terminate_invalid_pdu(conn, ctx);
491 		}
492 		break;
493 	case PROC_TERMINATE:
494 		/* No notification */
495 		llcp_lr_complete(conn);
496 		ctx->state = LP_COMMON_STATE_IDLE;
497 
498 		/* Mark the connection for termination */
499 		conn->llcp_terminate.reason_final = BT_HCI_ERR_LOCALHOST_TERM_CONN;
500 		break;
501 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
502 	case PROC_CIS_TERMINATE:
503 		/* No notification */
504 		llcp_lr_complete(conn);
505 		ctx->state = LP_COMMON_STATE_IDLE;
506 		break;
507 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
508 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
509 	case PROC_DATA_LENGTH_UPDATE:
510 		if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_LENGTH_RSP) {
511 			/* Apply changes in data lengths/times */
512 			uint8_t dle_changed = ull_dle_update_eff(conn);
513 
514 			if (dle_changed) {
515 				lp_comm_ntf(conn, ctx);
516 			}
517 			llcp_lr_complete(conn);
518 			ctx->state = LP_COMMON_STATE_IDLE;
519 		} else if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP) {
520 			/* Peer does not accept DLU, so disable on current connection */
521 			feature_unmask_features(conn, LL_FEAT_BIT_DLE);
522 
523 			llcp_lr_complete(conn);
524 			ctx->state = LP_COMMON_STATE_IDLE;
525 		} else {
526 			/* Illegal response opcode */
527 			lp_comm_terminate_invalid_pdu(conn, ctx);
528 			break;
529 		}
530 
531 		if (!ull_cp_remote_dle_pending(conn)) {
532 			/* Resume data, but only if there is no remote procedure pending RSP
533 			 * in which case, the RSP tx-ACK will resume data
534 			 */
535 			llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_DATA_LENGTH);
536 		}
537 		break;
538 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
539 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
540 	case PROC_CTE_REQ:
541 		lp_comm_complete_cte_req(conn, ctx);
542 		break;
543 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
544 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
545 	case PROC_SCA_UPDATE:
546 		switch (ctx->response_opcode) {
547 		case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
548 			/* Peer does not support SCA update, so disable on current connection */
549 			feature_unmask_features(conn, LL_FEAT_BIT_SCA_UPDATE);
550 			ctx->data.sca_update.error_code = BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
551 			/* Fall through to complete procedure */
552 		case PDU_DATA_LLCTRL_TYPE_CLOCK_ACCURACY_RSP:
553 #if defined(CONFIG_BT_PERIPHERAL)
554 			if (conn->lll.role == BT_HCI_ROLE_PERIPHERAL &&
555 			    !ctx->data.sca_update.error_code &&
556 			    conn->periph.sca != ctx->data.sca_update.sca) {
557 				conn->periph.sca = ctx->data.sca_update.sca;
558 				ull_conn_update_peer_sca(conn);
559 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
560 				ull_peripheral_iso_update_peer_sca(conn);
561 #endif /* defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) */
562 			}
563 #endif /* CONFIG_BT_PERIPHERAL */
564 			lp_comm_ntf(conn, ctx);
565 			llcp_lr_complete(conn);
566 			ctx->state = LP_COMMON_STATE_IDLE;
567 			break;
568 		default:
569 			/* Illegal response opcode */
570 			lp_comm_terminate_invalid_pdu(conn, ctx);
571 		}
572 		break;
573 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
574 	default:
575 		/* Unknown procedure */
576 		LL_ASSERT(0);
577 	}
578 }
579 
580 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
lp_cis_terminated(struct ll_conn * conn)581 static bool lp_cis_terminated(struct ll_conn *conn)
582 {
583 	return conn->llcp.cis.terminate_ack;
584 }
585 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
586 
lp_comm_tx_proxy(struct ll_conn * conn,struct proc_ctx * ctx,const bool extra_cond)587 static bool lp_comm_tx_proxy(struct ll_conn *conn, struct proc_ctx *ctx, const bool extra_cond)
588 {
589 	if (extra_cond || llcp_lr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx)) {
590 		ctx->state = LP_COMMON_STATE_WAIT_TX;
591 	} else {
592 		lp_comm_tx(conn, ctx);
593 
594 		/* Select correct state, depending on TX ack handling 'request' */
595 		ctx->state = ctx->node_ref.tx_ack ?
596 			LP_COMMON_STATE_WAIT_TX_ACK : LP_COMMON_STATE_WAIT_RX;
597 		return true;
598 	}
599 	return false;
600 }
601 
lp_comm_send_req(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)602 static void lp_comm_send_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
603 {
604 	switch (ctx->proc) {
605 #if defined(CONFIG_BT_CTLR_LE_PING)
606 	case PROC_LE_PING:
607 		lp_comm_tx_proxy(conn, ctx, false);
608 		break;
609 #endif /* CONFIG_BT_CTLR_LE_PING */
610 	case PROC_FEATURE_EXCHANGE:
611 		lp_comm_tx_proxy(conn, ctx, false);
612 		break;
613 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
614 	case PROC_MIN_USED_CHANS:
615 		lp_comm_tx_proxy(conn, ctx, false);
616 		break;
617 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
618 	case PROC_VERSION_EXCHANGE:
619 		/* The Link Layer shall only queue for transmission a maximum of
620 		 * one LL_VERSION_IND PDU during a connection.
621 		 */
622 		if (!conn->llcp.vex.sent) {
623 			if (lp_comm_tx_proxy(conn, ctx, false)) {
624 				conn->llcp.vex.sent = 1;
625 			}
626 		} else {
627 			ctx->response_opcode = PDU_DATA_LLCTRL_TYPE_VERSION_IND;
628 			/* Clear node_ref to signal no NTF piggy-backing */
629 			ctx->node_ref.rx = NULL;
630 			lp_comm_complete(conn, ctx, evt, param);
631 		}
632 		break;
633 	case PROC_TERMINATE:
634 		if (!llcp_tx_alloc_peek(conn, ctx)) {
635 			ctx->state = LP_COMMON_STATE_WAIT_TX;
636 		} else {
637 			lp_comm_tx(conn, ctx);
638 			ctx->data.term.error_code = BT_HCI_ERR_LOCALHOST_TERM_CONN;
639 			ctx->state = LP_COMMON_STATE_WAIT_TX_ACK;
640 		}
641 		break;
642 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
643 	case PROC_CIS_TERMINATE:
644 		lp_comm_tx_proxy(conn, ctx, !lp_cis_terminated(conn));
645 		break;
646 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
647 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
648 	case PROC_DATA_LENGTH_UPDATE:
649 		if (!ull_cp_remote_dle_pending(conn)) {
650 			if (llcp_lr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx)) {
651 				ctx->state = LP_COMMON_STATE_WAIT_TX;
652 			} else {
653 				/* Pause data tx, to ensure we can later (on RSP rx-ack)
654 				 * update DLE without conflicting with out-going LL Data PDUs
655 				 * See BT Core 5.2 Vol6: B-4.5.10 & B-5.1.9
656 				 */
657 				llcp_tx_pause_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_DATA_LENGTH);
658 				lp_comm_tx(conn, ctx);
659 				ctx->state = LP_COMMON_STATE_WAIT_RX;
660 			}
661 		} else {
662 			/* REQ was received from peer and RSP not yet sent
663 			 * lets piggy-back on RSP instead af sending REQ
664 			 * thus we can complete local req
665 			 */
666 			llcp_lr_complete(conn);
667 			ctx->state = LP_COMMON_STATE_IDLE;
668 		}
669 		break;
670 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
671 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
672 	case PROC_CTE_REQ:
673 		if (conn->llcp.cte_req.is_enabled &&
674 #if defined(CONFIG_BT_CTLR_PHY)
675 		    conn->lll.phy_rx != PHY_CODED) {
676 #else
677 		    1) {
678 #endif /* CONFIG_BT_CTLR_PHY */
679 			lp_comm_tx_proxy(conn, ctx,
680 					 llcp_rr_get_paused_cmd(conn) == PROC_CTE_REQ);
681 		} else {
682 			/* The PHY was changed to CODED when the request was waiting in a local
683 			 * request queue.
684 			 *
685 			 * Use of pair: proc PROC_CTE_REQ and rx_opcode PDU_DATA_LLCTRL_TYPE_UNUSED
686 			 * to complete the procedure before sending a request to peer.
687 			 * This is a special complete execution path to disable the procedure
688 			 * due to change of RX PHY to CODED.
689 			 */
690 			ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
691 			ctx->state = LP_COMMON_STATE_IDLE;
692 			llcp_lr_complete(conn);
693 		}
694 		break;
695 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
696 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
697 	case PROC_SCA_UPDATE:
698 		lp_comm_tx_proxy(conn, ctx, false);
699 		break;
700 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
701 	default:
702 		/* Unknown procedure */
703 		LL_ASSERT(0);
704 	}
705 }
706 
707 static void lp_comm_st_idle(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
708 {
709 	switch (evt) {
710 	case LP_COMMON_EVT_RUN:
711 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
712 		if (ctx->proc == PROC_CIS_TERMINATE) {
713 			/* We're getting going on a CIS Terminate */
714 			/* So we should start by requesting Terminate for the CIS in question */
715 
716 			/* Clear terminate ack flag, used to signal CIS Terminated */
717 			conn->llcp.cis.terminate_ack = 0U;
718 			llcp_cis_stop_by_id(ctx->data.cis_term.cig_id, ctx->data.cis_term.cis_id,
719 					    BT_HCI_ERR_LOCALHOST_TERM_CONN);
720 		}
721 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
722 		lp_comm_send_req(conn, ctx, evt, param);
723 		break;
724 	default:
725 		/* Ignore other evts */
726 		break;
727 	}
728 }
729 
730 static void lp_comm_st_wait_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
731 			       void *param)
732 {
733 	switch (evt) {
734 	case LP_COMMON_EVT_RUN:
735 		lp_comm_send_req(conn, ctx, evt, param);
736 		break;
737 	default:
738 		/* Ignore other evts */
739 		break;
740 	}
741 }
742 
743 static void lp_comm_st_wait_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
744 				   void *param)
745 {
746 	switch (evt) {
747 	case LP_COMMON_EVT_ACK:
748 		switch (ctx->proc) {
749 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
750 		case PROC_MIN_USED_CHANS:
751 			lp_comm_complete(conn, ctx, evt, param);
752 			break;
753 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
754 		case PROC_TERMINATE:
755 			lp_comm_complete(conn, ctx, evt, param);
756 			break;
757 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
758 		case PROC_CIS_TERMINATE:
759 			lp_comm_complete(conn, ctx, evt, param);
760 			break;
761 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
762 		default:
763 			/* Ignore for other procedures */
764 			break;
765 		}
766 		break;
767 	default:
768 		/* Ignore other evts */
769 		break;
770 	}
771 }
772 
773 static void lp_comm_rx_decode(struct ll_conn *conn, struct proc_ctx *ctx, struct pdu_data *pdu)
774 {
775 	ctx->response_opcode = pdu->llctrl.opcode;
776 
777 	switch (pdu->llctrl.opcode) {
778 #if defined(CONFIG_BT_CTLR_LE_PING)
779 	case PDU_DATA_LLCTRL_TYPE_PING_RSP:
780 		/* ping_rsp has no data */
781 		break;
782 #endif /* CONFIG_BT_CTLR_LE_PING */
783 	case PDU_DATA_LLCTRL_TYPE_FEATURE_RSP:
784 		llcp_pdu_decode_feature_rsp(conn, pdu);
785 #if defined(CONFIG_BT_CTLR_DATA_LENGTH) && defined(CONFIG_BT_CTLR_PHY)
786 		/* If Coded PHY is now supported we must update local max tx/rx times to reflect */
787 		if (feature_phy_coded(conn)) {
788 			ull_dle_max_time_get(conn, &conn->lll.dle.local.max_rx_time,
789 					     &conn->lll.dle.local.max_tx_time);
790 		}
791 #endif /* CONFIG_BT_CTLR_DATA_LENGTH && CONFIG_BT_CTLR_PHY */
792 		break;
793 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
794 	case PDU_DATA_LLCTRL_TYPE_MIN_USED_CHAN_IND:
795 		/* No response expected */
796 		break;
797 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */
798 	case PDU_DATA_LLCTRL_TYPE_VERSION_IND:
799 		llcp_pdu_decode_version_ind(conn, pdu);
800 		break;
801 	case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
802 		llcp_pdu_decode_unknown_rsp(ctx, pdu);
803 		break;
804 	case PDU_DATA_LLCTRL_TYPE_TERMINATE_IND:
805 		/* No response expected */
806 		LL_ASSERT(0);
807 		break;
808 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
809 	case PDU_DATA_LLCTRL_TYPE_LENGTH_RSP:
810 		llcp_pdu_decode_length_rsp(conn, pdu);
811 		break;
812 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
813 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
814 	case PDU_DATA_LLCTRL_TYPE_CTE_RSP:
815 		llcp_pdu_decode_cte_rsp(ctx, pdu);
816 		break;
817 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
818 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
819 	case PDU_DATA_LLCTRL_TYPE_CLOCK_ACCURACY_RSP:
820 		llcp_pdu_decode_clock_accuracy_rsp(ctx, pdu);
821 		break;
822 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
823 	case PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND:
824 		llcp_pdu_decode_reject_ext_ind(ctx, pdu);
825 		break;
826 	case PDU_DATA_LLCTRL_TYPE_REJECT_IND:
827 		/* Empty on purpose, as we don't care about the PDU content, we'll disconnect */
828 		break;
829 	default:
830 		/* Unknown opcode */
831 		LL_ASSERT(0);
832 	}
833 }
834 
835 static void lp_comm_st_wait_rx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
836 			       void *param)
837 {
838 	switch (evt) {
839 	case LP_COMMON_EVT_RESPONSE:
840 		lp_comm_rx_decode(conn, ctx, (struct pdu_data *)param);
841 		lp_comm_complete(conn, ctx, evt, param);
842 		break;
843 	default:
844 		/* Ignore other evts */
845 		break;
846 	}
847 }
848 
849 static void lp_comm_st_wait_ntf_avail(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
850 				void *param)
851 {
852 	switch (evt) {
853 	case LP_COMMON_EVT_RUN:
854 		switch (ctx->proc) {
855 		case PROC_VERSION_EXCHANGE:
856 			/* Note re. procedure timeout handling:
857 			 * Procedure TO is specifically NOT reset while in wait state, since
858 			 * the mechanism is being 'hi-jacked' to implement a TO on the NTF wait
859 			 * This to catch the very unlikely case:
860 			 *   local VERSION IND started after a VERSION IND had already been TX'ed
861 			 *   in which case the local procedure should complete with NTF without
862 			 *   prior TX (ie no procedure TO handling initiated). IF this NTF never
863 			 *   finds buffer avail it would wait forever, but not with proc TO active
864 			 */
865 			if (llcp_ntf_alloc_is_available()) {
866 				lp_comm_ntf(conn, ctx);
867 				llcp_lr_complete(conn);
868 				ctx->state = LP_COMMON_STATE_IDLE;
869 			}
870 			break;
871 		default:
872 			/* If we get here it is not good since only VERSION EXCHANGE procedure
873 			 * out of the ones handled in ull_llcp_common should end up waiting for
874 			 * non-piggy-back'ed NTF
875 			 */
876 			LL_ASSERT(0);
877 			break;
878 		}
879 		break;
880 	default:
881 		break;
882 	}
883 }
884 
885 static void lp_comm_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
886 				void *param)
887 {
888 	switch (ctx->state) {
889 	case LP_COMMON_STATE_IDLE:
890 		lp_comm_st_idle(conn, ctx, evt, param);
891 		break;
892 	case LP_COMMON_STATE_WAIT_TX:
893 		lp_comm_st_wait_tx(conn, ctx, evt, param);
894 		break;
895 	case LP_COMMON_STATE_WAIT_TX_ACK:
896 		lp_comm_st_wait_tx_ack(conn, ctx, evt, param);
897 		break;
898 	case LP_COMMON_STATE_WAIT_RX:
899 		lp_comm_st_wait_rx(conn, ctx, evt, param);
900 		break;
901 	case LP_COMMON_STATE_WAIT_NTF_AVAIL:
902 		lp_comm_st_wait_ntf_avail(conn, ctx, evt, param);
903 		break;
904 	default:
905 		/* Unknown state */
906 		LL_ASSERT(0);
907 	}
908 }
909 
910 void llcp_lp_comm_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, struct node_tx *tx)
911 {
912 	lp_comm_execute_fsm(conn, ctx, LP_COMMON_EVT_ACK, tx->pdu);
913 }
914 
915 void llcp_lp_comm_rx(struct ll_conn *conn, struct proc_ctx *ctx, struct node_rx_pdu *rx)
916 {
917 	lp_comm_execute_fsm(conn, ctx, LP_COMMON_EVT_RESPONSE, rx->pdu);
918 }
919 
920 void llcp_lp_comm_init_proc(struct proc_ctx *ctx)
921 {
922 	ctx->state = LP_COMMON_STATE_IDLE;
923 }
924 
925 void llcp_lp_comm_run(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
926 {
927 	lp_comm_execute_fsm(conn, ctx, LP_COMMON_EVT_RUN, param);
928 
929 }
930 
931 static void rp_comm_terminate(struct ll_conn *conn, struct proc_ctx *ctx)
932 {
933 	llcp_rr_complete(conn);
934 	ctx->state = RP_COMMON_STATE_IDLE;
935 
936 	/* Mark the connection for termination */
937 	conn->llcp_terminate.reason_final = ctx->data.term.error_code;
938 }
939 
940 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
941 static void rp_comm_stop_cis(struct proc_ctx *ctx)
942 {
943 	llcp_cis_stop_by_id(ctx->data.cis_term.cig_id, ctx->data.cis_term.cis_id,
944 			    ctx->data.cis_term.error_code);
945 }
946 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
947 
948 /*
949  * LLCP Remote Procedure Common FSM
950  */
951 static void rp_comm_rx_decode(struct ll_conn *conn, struct proc_ctx *ctx, struct pdu_data *pdu)
952 {
953 	ctx->response_opcode = pdu->llctrl.opcode;
954 
955 	switch (pdu->llctrl.opcode) {
956 #if defined(CONFIG_BT_CTLR_LE_PING)
957 	case PDU_DATA_LLCTRL_TYPE_PING_REQ:
958 		/* ping_req has no data */
959 		break;
960 #endif /* CONFIG_BT_CTLR_LE_PING */
961 #if defined(CONFIG_BT_PERIPHERAL) || \
962 	(defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG) && defined(CONFIG_BT_CENTRAL))
963 #if defined(CONFIG_BT_PERIPHERAL)
964 	case PDU_DATA_LLCTRL_TYPE_FEATURE_REQ:
965 #endif /* CONFIG_BT_PERIPHERAL */
966 #if defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG) && defined(CONFIG_BT_CENTRAL)
967 	case PDU_DATA_LLCTRL_TYPE_PER_INIT_FEAT_XCHG:
968 #endif /* CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG && CONFIG_BT_CENTRAL */
969 		llcp_pdu_decode_feature_req(conn, pdu);
970 #if defined(CONFIG_BT_CTLR_DATA_LENGTH) && defined(CONFIG_BT_CTLR_PHY)
971 		/* If Coded PHY is now supported we must update local max tx/rx times to reflect */
972 		if (feature_phy_coded(conn)) {
973 			ull_dle_max_time_get(conn, &conn->lll.dle.local.max_rx_time,
974 					     &conn->lll.dle.local.max_tx_time);
975 		}
976 #endif /* CONFIG_BT_CTLR_DATA_LENGTH && CONFIG_BT_CTLR_PHY */
977 		break;
978 #endif /* CONFIG_BT_PERIPHERAL || (CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG && CONFIG_BT_CENTRAL) */
979 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_CENTRAL)
980 	case PDU_DATA_LLCTRL_TYPE_MIN_USED_CHAN_IND:
981 		llcp_pdu_decode_min_used_chans_ind(conn, pdu);
982 		break;
983 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_CENTRAL */
984 	case PDU_DATA_LLCTRL_TYPE_VERSION_IND:
985 		llcp_pdu_decode_version_ind(conn, pdu);
986 		break;
987 	case PDU_DATA_LLCTRL_TYPE_TERMINATE_IND:
988 		llcp_pdu_decode_terminate_ind(ctx, pdu);
989 		/* Make sure no data is tx'ed after RX of terminate ind */
990 		llcp_tx_pause_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_TERMINATE);
991 		break;
992 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
993 	case PDU_DATA_LLCTRL_TYPE_CIS_TERMINATE_IND:
994 		llcp_pdu_decode_cis_terminate_ind(ctx, pdu);
995 		/* Terminate CIS */
996 		rp_comm_stop_cis(ctx);
997 		break;
998 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
999 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1000 	case PDU_DATA_LLCTRL_TYPE_LENGTH_REQ:
1001 		llcp_pdu_decode_length_req(conn, pdu);
1002 		/* On reception of REQ mark RSP open for local piggy-back
1003 		 * Pause data tx, to ensure we can later (on RSP tx ack) update TX DLE without
1004 		 * conflicting with out-going LL Data PDUs
1005 		 * See BT Core 5.2 Vol6: B-4.5.10 & B-5.1.9
1006 		 */
1007 		llcp_tx_pause_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_DATA_LENGTH);
1008 		ctx->data.dle.ntf_dle = ull_dle_update_eff_rx(conn);
1009 
1010 		/* Mark RX pdu to be removed from RX queue, but NOT be released */
1011 		llcp_rx_node_retain(ctx);
1012 		break;
1013 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1014 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
1015 	case PDU_DATA_LLCTRL_TYPE_CTE_REQ:
1016 		llcp_pdu_decode_cte_req(ctx, pdu);
1017 		break;
1018 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
1019 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1020 	case PDU_DATA_LLCTRL_TYPE_CLOCK_ACCURACY_REQ:
1021 		llcp_pdu_decode_clock_accuracy_req(ctx, pdu);
1022 		break;
1023 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
1024 	default:
1025 		/* Unknown opcode */
1026 		LL_ASSERT(0);
1027 	}
1028 }
1029 
1030 static void rp_comm_tx(struct ll_conn *conn, struct proc_ctx *ctx)
1031 {
1032 	struct node_tx *tx;
1033 	struct pdu_data *pdu;
1034 
1035 	/* Allocate tx node */
1036 	tx = llcp_tx_alloc(conn, ctx);
1037 	LL_ASSERT(tx);
1038 
1039 	pdu = (struct pdu_data *)tx->pdu;
1040 
1041 	/* Encode LL Control PDU */
1042 	switch (ctx->proc) {
1043 #if defined(CONFIG_BT_CTLR_LE_PING)
1044 	case PROC_LE_PING:
1045 		llcp_pdu_encode_ping_rsp(pdu);
1046 		ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
1047 		break;
1048 #endif /* CONFIG_BT_CTLR_LE_PING */
1049 	case PROC_FEATURE_EXCHANGE:
1050 		llcp_pdu_encode_feature_rsp(conn, pdu);
1051 		ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
1052 		break;
1053 	case PROC_VERSION_EXCHANGE:
1054 		llcp_pdu_encode_version_ind(pdu);
1055 		ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
1056 		break;
1057 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1058 	case PROC_DATA_LENGTH_UPDATE:
1059 		llcp_pdu_encode_length_rsp(conn, pdu);
1060 		ctx->node_ref.tx_ack = tx;
1061 		ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
1062 		break;
1063 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1064 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
1065 	case PROC_CTE_REQ: {
1066 		uint8_t err_code = 0;
1067 
1068 		if (conn->llcp.cte_rsp.is_enabled == 0) {
1069 			err_code = BT_HCI_ERR_UNSUPP_LL_PARAM_VAL;
1070 		}
1071 
1072 #if defined(CONFIG_BT_PHY_UPDATE)
1073 		/* If the PHY update is not possible, then PHY1M is used.
1074 		 * CTE is supported for PHY1M.
1075 		 */
1076 		if (conn->lll.phy_tx == PHY_CODED) {
1077 			err_code = BT_HCI_ERR_INVALID_LL_PARAM;
1078 		}
1079 #endif /* CONFIG_BT_PHY_UPDATE */
1080 		if (!(conn->llcp.cte_rsp.cte_types & BIT(ctx->data.cte_remote_req.cte_type)) ||
1081 		    conn->llcp.cte_rsp.max_cte_len < ctx->data.cte_remote_req.min_cte_len) {
1082 			err_code = BT_HCI_ERR_UNSUPP_LL_PARAM_VAL;
1083 		}
1084 
1085 		if (!err_code) {
1086 			llcp_pdu_encode_cte_rsp(ctx, pdu);
1087 			ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
1088 		} else {
1089 			llcp_pdu_encode_reject_ext_ind(pdu, PDU_DATA_LLCTRL_TYPE_CTE_REQ, err_code);
1090 			ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
1091 		}
1092 
1093 		ctx->node_ref.tx_ack = tx;
1094 
1095 		break;
1096 	}
1097 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
1098 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1099 	case PROC_SCA_UPDATE:
1100 		llcp_pdu_encode_clock_accuracy_rsp(ctx, pdu);
1101 		ctx->node_ref.tx_ack = tx;
1102 		ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
1103 		break;
1104 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
1105 	default:
1106 		/* Unknown procedure */
1107 		LL_ASSERT(0);
1108 	}
1109 
1110 	ctx->tx_opcode = pdu->llctrl.opcode;
1111 
1112 	/* Enqueue LL Control PDU towards LLL */
1113 	llcp_tx_enqueue(conn, tx);
1114 
1115 	/* Restart procedure response timeout timer */
1116 	llcp_rr_prt_restart(conn);
1117 }
1118 
1119 static void rp_comm_st_idle(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
1120 {
1121 	switch (evt) {
1122 	case RP_COMMON_EVT_RUN:
1123 		ctx->state = RP_COMMON_STATE_WAIT_RX;
1124 		break;
1125 	default:
1126 		/* Ignore other evts */
1127 		break;
1128 	}
1129 }
1130 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1131 static void rp_comm_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t generate_ntf)
1132 {
1133 	struct node_rx_pdu *ntf;
1134 	struct pdu_data *pdu;
1135 
1136 	/* Allocate ntf node */
1137 	ntf = ctx->node_ref.rx;
1138 	LL_ASSERT(ntf);
1139 
1140 	/* This should be an 'old' RX node, so put/sched when done */
1141 	LL_ASSERT(ntf->hdr.type == NODE_RX_TYPE_RETAIN);
1142 
1143 	/* And release memory if no NTF to be generated */
1144 	ntf->hdr.type = NODE_RX_TYPE_RELEASE;
1145 
1146 	if (generate_ntf) {
1147 		ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
1148 		ntf->hdr.handle = conn->lll.handle;
1149 		pdu = (struct pdu_data *)ntf->pdu;
1150 		LL_ASSERT(ctx->proc == PROC_DATA_LENGTH_UPDATE);
1151 		llcp_ntf_encode_length_change(conn, pdu);
1152 	}
1153 
1154 	/* Enqueue notification towards LL - releases mem if no ntf */
1155 	ll_rx_put_sched(ntf->hdr.link, ntf);
1156 }
1157 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1158 
1159 static bool rp_comm_tx_proxy(struct ll_conn *conn, struct proc_ctx *ctx, const bool complete)
1160 {
1161 	if (llcp_rr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx)) {
1162 		ctx->state = RP_COMMON_STATE_WAIT_TX;
1163 		return false;
1164 	}
1165 
1166 	rp_comm_tx(conn, ctx);
1167 	ctx->state = RP_COMMON_STATE_WAIT_TX_ACK;
1168 	if (complete) {
1169 		llcp_rr_complete(conn);
1170 		ctx->state = RP_COMMON_STATE_IDLE;
1171 	}
1172 
1173 	return true;
1174 }
1175 
1176 static void rp_comm_send_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
1177 {
1178 	switch (ctx->proc) {
1179 #if defined(CONFIG_BT_CTLR_LE_PING)
1180 	case PROC_LE_PING:
1181 		/* Always respond on remote ping */
1182 		rp_comm_tx_proxy(conn, ctx, true);
1183 		break;
1184 #endif /* CONFIG_BT_CTLR_LE_PING */
1185 	case PROC_FEATURE_EXCHANGE:
1186 		/* Always respond on remote feature exchange */
1187 		rp_comm_tx_proxy(conn, ctx, true);
1188 		break;
1189 	case PROC_VERSION_EXCHANGE:
1190 		/* The Link Layer shall only queue for transmission a maximum of one
1191 		 * LL_VERSION_IND PDU during a connection.
1192 		 * If the Link Layer receives an LL_VERSION_IND PDU and has already sent an
1193 		 * LL_VERSION_IND PDU then the Link Layer shall not send another
1194 		 * LL_VERSION_IND PDU to the peer device.
1195 		 */
1196 		if (!conn->llcp.vex.sent) {
1197 			if (rp_comm_tx_proxy(conn, ctx, true)) {
1198 				conn->llcp.vex.sent = 1;
1199 			}
1200 		} else {
1201 			/* Invalid behaviour
1202 			 * A procedure already sent a LL_VERSION_IND and received a LL_VERSION_IND.
1203 			 * Ignore and complete the procedure.
1204 			 */
1205 			llcp_rr_complete(conn);
1206 			ctx->state = RP_COMMON_STATE_IDLE;
1207 		}
1208 
1209 		break;
1210 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_CENTRAL)
1211 	case PROC_MIN_USED_CHANS:
1212 		/*
1213 		 * Spec says (5.2, Vol.6, Part B, Section 5.1.11):
1214 		 *     The procedure has completed when the Link Layer acknowledgment of the
1215 		 *     LL_MIN_USED_CHANNELS_IND PDU is sent or received.
1216 		 * In effect, for this procedure, this is equivalent to RX of PDU
1217 		 *
1218 		 * Also:
1219 		 *     If the Link Layer receives an LL_MIN_USED_CHANNELS_IND PDU, it should ensure
1220 		 *     that, whenever the Peripheral-to-Central PHY is one of those specified,
1221 		 *     the connection uses at least the number of channels given in the
1222 		 *     MinUsedChannels field of the PDU.
1223 		 *
1224 		 * The 'should' is here interpreted as 'permission' to do nothing
1225 		 *
1226 		 * Future improvement could implement logic to support this
1227 		 */
1228 
1229 		llcp_rr_complete(conn);
1230 		ctx->state = RP_COMMON_STATE_IDLE;
1231 		break;
1232 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_CENTRAL */
1233 	case PROC_TERMINATE:
1234 #if defined(CONFIG_BT_CENTRAL)
1235 		if (conn->lll.role == BT_HCI_ROLE_CENTRAL) {
1236 			/* No response, but postpone terminate until next event
1237 			 * to ensure acking the reception of TERMINATE_IND
1238 			 */
1239 			ctx->state = RP_COMMON_STATE_POSTPONE_TERMINATE;
1240 			break;
1241 		}
1242 #endif
1243 #if defined(CONFIG_BT_PERIPHERAL)
1244 		/* Terminate right away */
1245 		rp_comm_terminate(conn, ctx);
1246 #endif
1247 		break;
1248 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1249 	case PROC_CIS_TERMINATE:
1250 		/* No response */
1251 		llcp_rr_complete(conn);
1252 		ctx->state = RP_COMMON_STATE_IDLE;
1253 
1254 		break;
1255 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
1256 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1257 	case PROC_DATA_LENGTH_UPDATE:
1258 		rp_comm_tx_proxy(conn, ctx, false);
1259 		break;
1260 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1261 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
1262 	case PROC_CTE_REQ:
1263 		if (llcp_rr_ispaused(conn) ||
1264 		    !llcp_tx_alloc_peek(conn, ctx) ||
1265 		    (llcp_rr_get_paused_cmd(conn) == PROC_CTE_REQ)) {
1266 			ctx->state = RP_COMMON_STATE_WAIT_TX;
1267 		} else {
1268 			llcp_rr_set_paused_cmd(conn, PROC_PHY_UPDATE);
1269 			rp_comm_tx(conn, ctx);
1270 			ctx->state = RP_COMMON_STATE_WAIT_TX_ACK;
1271 		}
1272 		break;
1273 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
1274 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1275 	case PROC_SCA_UPDATE:
1276 		/* Always respond to remote SCA */
1277 		rp_comm_tx_proxy(conn, ctx, false);
1278 		break;
1279 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
1280 	default:
1281 		/* Unknown procedure */
1282 		LL_ASSERT(0);
1283 	}
1284 }
1285 
1286 static void rp_comm_st_wait_rx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
1287 {
1288 	switch (evt) {
1289 	case RP_COMMON_EVT_REQUEST:
1290 		rp_comm_rx_decode(conn, ctx, (struct pdu_data *)param);
1291 		rp_comm_send_rsp(conn, ctx, evt, param);
1292 		break;
1293 	default:
1294 		/* Ignore other evts */
1295 		break;
1296 	}
1297 }
1298 
1299 static void rp_comm_st_postpone_terminate(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1300 				   void *param)
1301 {
1302 	switch (evt) {
1303 	case RP_COMMON_EVT_RUN:
1304 		LL_ASSERT(ctx->proc == PROC_TERMINATE);
1305 
1306 		/* Note: now we terminate, mimicking legacy LLCP behaviour
1307 		 * A check should be added to ensure that the ack of the terminate_ind was
1308 		 * indeed tx'ed and not scheduled out/postponed by LLL
1309 		 */
1310 		rp_comm_terminate(conn, ctx);
1311 
1312 		break;
1313 	default:
1314 		/* Ignore other evts */
1315 		break;
1316 	}
1317 }
1318 
1319 static void rp_comm_st_wait_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
1320 {
1321 	switch (evt) {
1322 	case LP_COMMON_EVT_RUN:
1323 		rp_comm_send_rsp(conn, ctx, evt, param);
1324 		break;
1325 	default:
1326 		/* Ignore other evts */
1327 		break;
1328 	}
1329 }
1330 
1331 static void rp_comm_st_wait_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1332 				   void *param)
1333 {
1334 	switch (evt) {
1335 	case RP_COMMON_EVT_ACK:
1336 		switch (ctx->proc) {
1337 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1338 		case PROC_DATA_LENGTH_UPDATE: {
1339 			/* Apply changes in data lengths/times */
1340 			uint8_t dle_changed = ull_dle_update_eff_tx(conn);
1341 
1342 			dle_changed |= ctx->data.dle.ntf_dle;
1343 			llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_DATA_LENGTH);
1344 
1345 			rp_comm_ntf(conn, ctx, dle_changed);
1346 			llcp_rr_complete(conn);
1347 			ctx->state = RP_COMMON_STATE_IDLE;
1348 			break;
1349 		}
1350 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1351 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
1352 		case PROC_CTE_REQ: {
1353 			/* add PHY update pause = false here */
1354 			llcp_rr_set_paused_cmd(conn, PROC_NONE);
1355 			llcp_rr_complete(conn);
1356 			ctx->state = RP_COMMON_STATE_IDLE;
1357 		}
1358 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
1359 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1360 		case PROC_SCA_UPDATE: {
1361 #if defined(CONFIG_BT_PERIPHERAL)
1362 			if (conn->lll.role == BT_HCI_ROLE_PERIPHERAL) {
1363 				conn->periph.sca = ctx->data.sca_update.sca;
1364 				ull_conn_update_peer_sca(conn);
1365 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1366 				ull_peripheral_iso_update_peer_sca(conn);
1367 #endif /* defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) */
1368 			}
1369 #endif /* CONFIG_BT_PERIPHERAL */
1370 			llcp_rr_complete(conn);
1371 			ctx->state = RP_COMMON_STATE_IDLE;
1372 		}
1373 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
1374 		default:
1375 			/* Ignore other procedures */
1376 			break;
1377 		}
1378 		break;
1379 	default:
1380 		/* Ignore other evts */
1381 		break;
1382 	}
1383 }
1384 
1385 static void rp_comm_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1386 				void *param)
1387 {
1388 	switch (ctx->state) {
1389 	case RP_COMMON_STATE_IDLE:
1390 		rp_comm_st_idle(conn, ctx, evt, param);
1391 		break;
1392 	case RP_COMMON_STATE_WAIT_RX:
1393 		rp_comm_st_wait_rx(conn, ctx, evt, param);
1394 		break;
1395 	case RP_COMMON_STATE_POSTPONE_TERMINATE:
1396 		rp_comm_st_postpone_terminate(conn, ctx, evt, param);
1397 		break;
1398 	case RP_COMMON_STATE_WAIT_TX:
1399 		rp_comm_st_wait_tx(conn, ctx, evt, param);
1400 		break;
1401 	case RP_COMMON_STATE_WAIT_TX_ACK:
1402 		rp_comm_st_wait_tx_ack(conn, ctx, evt, param);
1403 		break;
1404 	default:
1405 		/* Unknown state */
1406 		LL_ASSERT(0);
1407 	}
1408 }
1409 
1410 void llcp_rp_comm_rx(struct ll_conn *conn, struct proc_ctx *ctx, struct node_rx_pdu *rx)
1411 {
1412 	rp_comm_execute_fsm(conn, ctx, RP_COMMON_EVT_REQUEST, rx->pdu);
1413 }
1414 
1415 void llcp_rp_comm_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, struct node_tx *tx)
1416 {
1417 	rp_comm_execute_fsm(conn, ctx, RP_COMMON_EVT_ACK, tx->pdu);
1418 }
1419 
1420 void llcp_rp_comm_init_proc(struct proc_ctx *ctx)
1421 {
1422 	ctx->state = RP_COMMON_STATE_IDLE;
1423 }
1424 
1425 void llcp_rp_comm_run(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
1426 {
1427 	rp_comm_execute_fsm(conn, ctx, RP_COMMON_EVT_RUN, param);
1428 }
1429