1 /*
2 * Copyright (c) 2020 Demant
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8
9 #include <zephyr/sys/byteorder.h>
10 #include <zephyr/sys/slist.h>
11 #include <zephyr/sys/util.h>
12
13 #include <zephyr/bluetooth/hci_types.h>
14
15 #include "hal/ccm.h"
16
17 #include "util/util.h"
18 #include "util/mem.h"
19 #include "util/memq.h"
20 #include "util/dbuf.h"
21 #include "util/mayfly.h"
22
23 #include "pdu_df.h"
24 #include "lll/pdu_vendor.h"
25 #include "pdu.h"
26
27 #include "ll.h"
28 #include "ll_settings.h"
29
30 #include "lll.h"
31 #include "ll_feat.h"
32 #include "lll/lll_df_types.h"
33 #include "lll_conn.h"
34 #include "lll_conn_iso.h"
35 #include "lll_sync.h"
36 #include "lll_sync_iso.h"
37 #include "lll_scan.h"
38 #include "lll/lll_adv_types.h"
39 #include "lll_adv.h"
40 #include "lll/lll_adv_pdu.h"
41
42 #include "ull_tx_queue.h"
43
44 #include "isoal.h"
45 #include "ull_iso_types.h"
46 #include "ull_conn_iso_types.h"
47 #include "ull_sync_types.h"
48 #include "ull_scan_types.h"
49 #include "ull_adv_types.h"
50 #include "ull_adv_internal.h"
51 #include "ull_iso_internal.h"
52 #include "ull_conn_iso_internal.h"
53 #include "ull_peripheral_iso_internal.h"
54
55 #include "ull_conn_types.h"
56 #include "ull_chan_internal.h"
57 #include "ull_llcp.h"
58 #include "ull_conn_internal.h"
59 #include "ull_internal.h"
60 #include "ull_sync_internal.h"
61 #include "ull_llcp_features.h"
62 #include "ull_llcp_internal.h"
63
64 #include <soc.h>
65 #include "hal/debug.h"
66
67 /* LLCP Local Procedure FSM states */
68 enum {
69 LP_COMMON_STATE_IDLE = LLCP_STATE_IDLE,
70 LP_COMMON_STATE_WAIT_TX,
71 LP_COMMON_STATE_WAIT_TX_ACK,
72 LP_COMMON_STATE_WAIT_RX,
73 LP_COMMON_STATE_WAIT_NTF_AVAIL,
74 };
75
76 /* LLCP Local Procedure Common FSM events */
77 enum {
78 /* Procedure run */
79 LP_COMMON_EVT_RUN,
80
81 /* Response received */
82 LP_COMMON_EVT_RESPONSE,
83
84 /* Reject response received */
85 LP_COMMON_EVT_REJECT,
86
87 /* Unknown response received */
88 LP_COMMON_EVT_UNKNOWN,
89
90 /* Instant collision detected */
91 LP_COMMON_EVT_COLLISION,
92
93 /* Ack received */
94 LP_COMMON_EVT_ACK,
95 };
96
97 /* LLCP Remote Procedure Common FSM states */
98 enum {
99 RP_COMMON_STATE_IDLE = LLCP_STATE_IDLE,
100 RP_COMMON_STATE_WAIT_RX,
101 RP_COMMON_STATE_POSTPONE_TERMINATE,
102 RP_COMMON_STATE_WAIT_TX,
103 RP_COMMON_STATE_WAIT_TX_ACK,
104 };
105 /* LLCP Remote Procedure Common FSM events */
106 enum {
107 /* Procedure run */
108 RP_COMMON_EVT_RUN,
109
110 /* Ack received */
111 RP_COMMON_EVT_ACK,
112
113 /* Request received */
114 RP_COMMON_EVT_REQUEST,
115 };
116
117
118 static void lp_comm_ntf(struct ll_conn *conn, struct proc_ctx *ctx);
119 static void lp_comm_terminate_invalid_pdu(struct ll_conn *conn, struct proc_ctx *ctx);
120
121 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
122 /**
123 * @brief Stop and tear down a connected ISO stream
124 * This function may be called to tear down a CIS.
125 *
126 * @param cig_id ID of specific ISO group
127 * @param cis_id ID of connected ISO stream to stop
128 * @param reason Termination reason
129 */
llcp_cis_stop_by_id(uint8_t cig_id,uint8_t cis_id,uint8_t reason)130 static void llcp_cis_stop_by_id(uint8_t cig_id, uint8_t cis_id, uint8_t reason)
131 {
132 struct ll_conn_iso_group *cig = ll_conn_iso_group_get_by_id(cig_id);
133
134 if (cig) {
135 struct ll_conn_iso_stream *cis;
136 uint16_t cis_handle = UINT16_MAX;
137
138 /* Look through CIS's of specified group */
139 cis = ll_conn_iso_stream_get_by_group(cig, &cis_handle);
140 while (cis && cis->cis_id != cis_id) {
141 /* Get next CIS */
142 cis = ll_conn_iso_stream_get_by_group(cig, &cis_handle);
143 }
144 if (cis && cis->lll.handle == cis_handle) {
145 ull_conn_iso_cis_stop(cis, NULL, reason);
146 }
147 }
148 }
149 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
150
151 /*
152 * LLCP Local Procedure Common FSM
153 */
154
lp_comm_tx(struct ll_conn * conn,struct proc_ctx * ctx)155 static void lp_comm_tx(struct ll_conn *conn, struct proc_ctx *ctx)
156 {
157 struct node_tx *tx;
158 struct pdu_data *pdu;
159
160 /* Allocate tx node */
161 tx = llcp_tx_alloc(conn, ctx);
162 LL_ASSERT(tx);
163
164 pdu = (struct pdu_data *)tx->pdu;
165
166 /* Encode LL Control PDU */
167 switch (ctx->proc) {
168 #if defined(CONFIG_BT_CTLR_LE_PING)
169 case PROC_LE_PING:
170 llcp_pdu_encode_ping_req(pdu);
171 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_PING_RSP;
172 break;
173 #endif /* CONFIG_BT_CTLR_LE_PING */
174 case PROC_FEATURE_EXCHANGE:
175 llcp_pdu_encode_feature_req(conn, pdu);
176 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_FEATURE_RSP;
177 break;
178 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
179 case PROC_MIN_USED_CHANS:
180 llcp_pdu_encode_min_used_chans_ind(ctx, pdu);
181 ctx->node_ref.tx_ack = tx;
182 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
183 break;
184 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
185 case PROC_VERSION_EXCHANGE:
186 llcp_pdu_encode_version_ind(pdu);
187 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_VERSION_IND;
188 break;
189 case PROC_TERMINATE:
190 llcp_pdu_encode_terminate_ind(ctx, pdu);
191 ctx->node_ref.tx_ack = tx;
192 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
193 break;
194 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
195 case PROC_CIS_TERMINATE:
196 llcp_pdu_encode_cis_terminate_ind(ctx, pdu);
197 ctx->node_ref.tx_ack = tx;
198 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
199 break;
200 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
201 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
202 case PROC_DATA_LENGTH_UPDATE:
203 llcp_pdu_encode_length_req(conn, pdu);
204 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_LENGTH_RSP;
205 break;
206 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
207 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
208 case PROC_CTE_REQ:
209 llcp_pdu_encode_cte_req(ctx, pdu);
210 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_CTE_RSP;
211 break;
212 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
213 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
214 case PROC_SCA_UPDATE:
215 llcp_pdu_encode_clock_accuracy_req(ctx, pdu);
216 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_CLOCK_ACCURACY_RSP;
217 break;
218 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
219 default:
220 /* Unknown procedure */
221 LL_ASSERT(0);
222 }
223
224 ctx->tx_opcode = pdu->llctrl.opcode;
225
226 /* Enqueue LL Control PDU towards LLL */
227 llcp_tx_enqueue(conn, tx);
228
229 /* Restart procedure response timeout timer */
230 if (ctx->proc != PROC_TERMINATE) {
231 /* Use normal timeout value of 40s */
232 llcp_lr_prt_restart(conn);
233 } else {
234 /* Use supervision timeout value
235 * NOTE: As the supervision timeout is at most 32s the normal procedure response
236 * timeout of 40s will never come into play for the ACL Termination procedure.
237 */
238 const uint32_t conn_interval_us = conn->lll.interval * CONN_INT_UNIT_US;
239 const uint16_t sto_reload = RADIO_CONN_EVENTS(
240 (conn->supervision_timeout * 10U * 1000U),
241 conn_interval_us);
242 llcp_lr_prt_restart_with_value(conn, sto_reload);
243 }
244 }
245
lp_comm_ntf_feature_exchange(struct ll_conn * conn,struct proc_ctx * ctx,struct pdu_data * pdu)246 static void lp_comm_ntf_feature_exchange(struct ll_conn *conn, struct proc_ctx *ctx,
247 struct pdu_data *pdu)
248 {
249 switch (ctx->response_opcode) {
250 case PDU_DATA_LLCTRL_TYPE_FEATURE_RSP:
251 llcp_ntf_encode_feature_rsp(conn, pdu);
252 break;
253 case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
254 llcp_ntf_encode_unknown_rsp(ctx, pdu);
255 break;
256 default:
257 /* Unexpected PDU, should not get through, so ASSERT */
258 LL_ASSERT(0);
259 }
260 }
261
lp_comm_ntf_version_ind(struct ll_conn * conn,struct proc_ctx * ctx,struct pdu_data * pdu)262 static void lp_comm_ntf_version_ind(struct ll_conn *conn, struct proc_ctx *ctx,
263 struct pdu_data *pdu)
264 {
265 switch (ctx->response_opcode) {
266 case PDU_DATA_LLCTRL_TYPE_VERSION_IND:
267 llcp_ntf_encode_version_ind(conn, pdu);
268 break;
269 default:
270 /* Unexpected PDU, should not get through, so ASSERT */
271 LL_ASSERT(0);
272 }
273 }
274
275 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
lp_comm_ntf_length_change(struct ll_conn * conn,struct proc_ctx * ctx,struct pdu_data * pdu)276 static void lp_comm_ntf_length_change(struct ll_conn *conn, struct proc_ctx *ctx,
277 struct pdu_data *pdu)
278 {
279 llcp_ntf_encode_length_change(conn, pdu);
280 }
281 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
282
283 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
284
lp_comm_complete_cte_req_finalize(struct ll_conn * conn)285 static void lp_comm_complete_cte_req_finalize(struct ll_conn *conn)
286 {
287 llcp_rr_set_paused_cmd(conn, PROC_NONE);
288 llcp_lr_complete(conn);
289 }
290
lp_comm_ntf_cte_req(struct ll_conn * conn,struct proc_ctx * ctx,struct pdu_data * pdu)291 static void lp_comm_ntf_cte_req(struct ll_conn *conn, struct proc_ctx *ctx, struct pdu_data *pdu)
292 {
293 switch (ctx->response_opcode) {
294 case PDU_DATA_LLCTRL_TYPE_CTE_RSP:
295 /* Notify host that received LL_CTE_RSP does not have CTE */
296 if (!ctx->data.cte_remote_rsp.has_cte) {
297 llcp_ntf_encode_cte_req(pdu);
298 }
299 break;
300 case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
301 llcp_ntf_encode_unknown_rsp(ctx, pdu);
302 break;
303 case PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND:
304 llcp_ntf_encode_reject_ext_ind(ctx, pdu);
305 break;
306 default:
307 /* Unexpected PDU, should not get through, so ASSERT */
308 LL_ASSERT(0);
309 }
310 }
311
lp_comm_ntf_cte_req_tx(struct ll_conn * conn,struct proc_ctx * ctx)312 static void lp_comm_ntf_cte_req_tx(struct ll_conn *conn, struct proc_ctx *ctx)
313 {
314 lp_comm_ntf(conn, ctx);
315 ull_cp_cte_req_set_disable(conn);
316 ctx->state = LP_COMMON_STATE_IDLE;
317 }
318
lp_comm_complete_cte_req(struct ll_conn * conn,struct proc_ctx * ctx)319 static void lp_comm_complete_cte_req(struct ll_conn *conn, struct proc_ctx *ctx)
320 {
321 if (conn->llcp.cte_req.is_enabled) {
322 if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_CTE_RSP) {
323 if (ctx->data.cte_remote_rsp.has_cte) {
324 if (conn->llcp.cte_req.req_interval != 0U) {
325 conn->llcp.cte_req.req_expire =
326 conn->llcp.cte_req.req_interval;
327 } else {
328 /* Disable the CTE request procedure when it is completed in
329 * case it was executed as non-periodic.
330 */
331 conn->llcp.cte_req.is_enabled = 0U;
332 }
333 ctx->state = LP_COMMON_STATE_IDLE;
334 } else {
335 lp_comm_ntf_cte_req_tx(conn, ctx);
336 }
337 } else if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND &&
338 ctx->reject_ext_ind.reject_opcode == PDU_DATA_LLCTRL_TYPE_CTE_REQ) {
339 lp_comm_ntf_cte_req_tx(conn, ctx);
340 } else if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP &&
341 ctx->unknown_response.type == PDU_DATA_LLCTRL_TYPE_CTE_REQ) {
342 /* CTE response is unsupported in peer, so disable locally for this
343 * connection
344 */
345 feature_unmask_features(conn, LL_FEAT_BIT_CONNECTION_CTE_REQ);
346 lp_comm_ntf_cte_req_tx(conn, ctx);
347 } else if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_UNUSED) {
348 /* This path is related with handling disable the CTE REQ when PHY
349 * has been changed to CODED PHY. BT 5.3 Core Vol 4 Part E 7.8.85
350 * says CTE REQ has to be automatically disabled as if it had been requested
351 * by Host. There is no notification send to Host.
352 */
353 ull_cp_cte_req_set_disable(conn);
354 ctx->state = LP_COMMON_STATE_IDLE;
355 } else {
356 /* Illegal response opcode, internally changes state to
357 * LP_COMMON_STATE_IDLE
358 */
359 lp_comm_terminate_invalid_pdu(conn, ctx);
360 }
361 } else {
362 /* The CTE_REQ was disabled by Host after the request was send.
363 * It does not matter if response has arrived, it should not be handled.
364 */
365 ctx->state = LP_COMMON_STATE_IDLE;
366 }
367
368 if (ctx->state == LP_COMMON_STATE_IDLE) {
369 lp_comm_complete_cte_req_finalize(conn);
370 }
371 }
372 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
373
374 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
lp_comm_ntf_sca(struct node_rx_pdu * ntf,struct proc_ctx * ctx,struct pdu_data * pdu)375 static void lp_comm_ntf_sca(struct node_rx_pdu *ntf, struct proc_ctx *ctx, struct pdu_data *pdu)
376 {
377 struct node_rx_sca *pdu_sca = (struct node_rx_sca *)pdu;
378
379 ntf->hdr.type = NODE_RX_TYPE_REQ_PEER_SCA_COMPLETE;
380 pdu_sca->status = ctx->data.sca_update.error_code;
381 pdu_sca->sca = ctx->data.sca_update.sca;
382 }
383 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
384
lp_comm_ntf(struct ll_conn * conn,struct proc_ctx * ctx)385 static void lp_comm_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
386 {
387 uint8_t piggy_back = 1U;
388 struct node_rx_pdu *ntf;
389 struct pdu_data *pdu;
390
391 ntf = ctx->node_ref.rx;
392 ctx->node_ref.rx = NULL;
393 if (!ntf) {
394 /* Allocate ntf node */
395 ntf = llcp_ntf_alloc();
396 LL_ASSERT(ntf);
397 piggy_back = 0U;
398 }
399
400 ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
401 ntf->hdr.handle = conn->lll.handle;
402 pdu = (struct pdu_data *)ntf->pdu;
403
404 switch (ctx->proc) {
405 case PROC_FEATURE_EXCHANGE:
406 lp_comm_ntf_feature_exchange(conn, ctx, pdu);
407 break;
408 case PROC_VERSION_EXCHANGE:
409 lp_comm_ntf_version_ind(conn, ctx, pdu);
410 break;
411 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
412 case PROC_DATA_LENGTH_UPDATE:
413 lp_comm_ntf_length_change(conn, ctx, pdu);
414 break;
415 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
416 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
417 case PROC_CTE_REQ:
418 lp_comm_ntf_cte_req(conn, ctx, pdu);
419 break;
420 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
421 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
422 case PROC_SCA_UPDATE:
423 lp_comm_ntf_sca(ntf, ctx, pdu);
424 break;
425 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
426 default:
427 LL_ASSERT(0);
428 break;
429 }
430
431 if (!piggy_back) {
432 /* Enqueue notification towards LL, unless we reuse RX node,
433 * in which case it is handled on the ull_cp_rx return path
434 */
435 ll_rx_put_sched(ntf->hdr.link, ntf);
436 }
437
438
439 }
440
lp_comm_terminate_invalid_pdu(struct ll_conn * conn,struct proc_ctx * ctx)441 static void lp_comm_terminate_invalid_pdu(struct ll_conn *conn, struct proc_ctx *ctx)
442 {
443 /* Invalid behaviour */
444 /* Invalid PDU received so terminate connection */
445 conn->llcp_terminate.reason_final = BT_HCI_ERR_LMP_PDU_NOT_ALLOWED;
446 llcp_lr_complete(conn);
447 ctx->state = LP_COMMON_STATE_IDLE;
448 }
449
lp_comm_complete(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)450 static void lp_comm_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
451 {
452 switch (ctx->proc) {
453 #if defined(CONFIG_BT_CTLR_LE_PING)
454 case PROC_LE_PING:
455 if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP ||
456 ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_PING_RSP) {
457 llcp_lr_complete(conn);
458 ctx->state = LP_COMMON_STATE_IDLE;
459 } else {
460 /* Illegal response opcode */
461 lp_comm_terminate_invalid_pdu(conn, ctx);
462 }
463 break;
464 #endif /* CONFIG_BT_CTLR_LE_PING */
465 case PROC_FEATURE_EXCHANGE:
466 if ((ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP ||
467 ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_FEATURE_RSP)) {
468 if (ctx->data.fex.host_initiated) {
469 lp_comm_ntf(conn, ctx);
470 }
471 llcp_lr_complete(conn);
472 ctx->state = LP_COMMON_STATE_IDLE;
473 } else {
474 /* Illegal response opcode */
475 lp_comm_terminate_invalid_pdu(conn, ctx);
476 }
477 break;
478 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
479 case PROC_MIN_USED_CHANS:
480 llcp_lr_complete(conn);
481 ctx->state = LP_COMMON_STATE_IDLE;
482 break;
483 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
484 case PROC_VERSION_EXCHANGE:
485 if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_VERSION_IND) {
486 if (ctx->node_ref.rx || llcp_ntf_alloc_is_available()) {
487 /* Either this is a piggy-back or there is a NTF node avail */
488 lp_comm_ntf(conn, ctx);
489 llcp_lr_complete(conn);
490 ctx->state = LP_COMMON_STATE_IDLE;
491 } else {
492 /* Handle procedure TO, in case we end up waiting 'forever' for
493 * NTF buffer. This is a simple way to implement mechanism to
494 * trigger disconnect in case NTF buffer 'never' becomes avail
495 * see elaborate note in lp_comm_st_wait_ntf_avail()
496 */
497 llcp_lr_prt_restart(conn);
498 ctx->state = LP_COMMON_STATE_WAIT_NTF_AVAIL;
499 }
500 } else {
501 /* Illegal response opcode */
502 lp_comm_terminate_invalid_pdu(conn, ctx);
503 }
504 break;
505 case PROC_TERMINATE:
506 /* No notification */
507 llcp_lr_complete(conn);
508 ctx->state = LP_COMMON_STATE_IDLE;
509
510 /* Mark the connection for termination */
511 conn->llcp_terminate.reason_final = BT_HCI_ERR_LOCALHOST_TERM_CONN;
512 break;
513 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
514 case PROC_CIS_TERMINATE:
515 /* No notification */
516 llcp_lr_complete(conn);
517 ctx->state = LP_COMMON_STATE_IDLE;
518 break;
519 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
520 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
521 case PROC_DATA_LENGTH_UPDATE:
522 if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_LENGTH_RSP) {
523 /* Apply changes in data lengths/times */
524 uint8_t dle_changed = ull_dle_update_eff(conn);
525
526 if (dle_changed) {
527 lp_comm_ntf(conn, ctx);
528 }
529 llcp_lr_complete(conn);
530 ctx->state = LP_COMMON_STATE_IDLE;
531 } else if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP) {
532 /* Peer does not accept DLU, so disable on current connection */
533 feature_unmask_features(conn, LL_FEAT_BIT_DLE);
534
535 llcp_lr_complete(conn);
536 ctx->state = LP_COMMON_STATE_IDLE;
537 } else {
538 /* Illegal response opcode */
539 lp_comm_terminate_invalid_pdu(conn, ctx);
540 break;
541 }
542
543 if (!ull_cp_remote_dle_pending(conn)) {
544 /* Resume data, but only if there is no remote procedure pending RSP
545 * in which case, the RSP tx-ACK will resume data
546 */
547 llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_DATA_LENGTH);
548 }
549 break;
550 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
551 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
552 case PROC_CTE_REQ:
553 lp_comm_complete_cte_req(conn, ctx);
554 break;
555 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
556 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
557 case PROC_SCA_UPDATE:
558 switch (ctx->response_opcode) {
559 case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
560 /* Peer does not support SCA update, so disable on current connection */
561 feature_unmask_features(conn, LL_FEAT_BIT_SCA_UPDATE);
562 ctx->data.sca_update.error_code = BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
563 /* Fall through to complete procedure */
564 case PDU_DATA_LLCTRL_TYPE_CLOCK_ACCURACY_RSP:
565 #if defined(CONFIG_BT_PERIPHERAL)
566 if (conn->lll.role == BT_HCI_ROLE_PERIPHERAL &&
567 !ctx->data.sca_update.error_code &&
568 conn->periph.sca != ctx->data.sca_update.sca) {
569 conn->periph.sca = ctx->data.sca_update.sca;
570 ull_conn_update_peer_sca(conn);
571 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
572 ull_peripheral_iso_update_peer_sca(conn);
573 #endif /* defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) */
574 }
575 #endif /* CONFIG_BT_PERIPHERAL */
576 lp_comm_ntf(conn, ctx);
577 llcp_lr_complete(conn);
578 ctx->state = LP_COMMON_STATE_IDLE;
579 break;
580 default:
581 /* Illegal response opcode */
582 lp_comm_terminate_invalid_pdu(conn, ctx);
583 }
584 break;
585 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
586 default:
587 /* Unknown procedure */
588 LL_ASSERT(0);
589 }
590 }
591
592 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
lp_cis_terminated(struct ll_conn * conn)593 static bool lp_cis_terminated(struct ll_conn *conn)
594 {
595 return conn->llcp.cis.terminate_ack;
596 }
597 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
598
lp_comm_tx_proxy(struct ll_conn * conn,struct proc_ctx * ctx,const bool extra_cond)599 static bool lp_comm_tx_proxy(struct ll_conn *conn, struct proc_ctx *ctx, const bool extra_cond)
600 {
601 if (extra_cond || llcp_lr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx)) {
602 ctx->state = LP_COMMON_STATE_WAIT_TX;
603 } else {
604 lp_comm_tx(conn, ctx);
605
606 /* Select correct state, depending on TX ack handling 'request' */
607 ctx->state = ctx->node_ref.tx_ack ?
608 LP_COMMON_STATE_WAIT_TX_ACK : LP_COMMON_STATE_WAIT_RX;
609 return true;
610 }
611 return false;
612 }
613
lp_comm_send_req(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)614 static void lp_comm_send_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
615 {
616 switch (ctx->proc) {
617 #if defined(CONFIG_BT_CTLR_LE_PING)
618 case PROC_LE_PING:
619 lp_comm_tx_proxy(conn, ctx, false);
620 break;
621 #endif /* CONFIG_BT_CTLR_LE_PING */
622 case PROC_FEATURE_EXCHANGE:
623 lp_comm_tx_proxy(conn, ctx, false);
624 break;
625 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
626 case PROC_MIN_USED_CHANS:
627 lp_comm_tx_proxy(conn, ctx, false);
628 break;
629 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
630 case PROC_VERSION_EXCHANGE:
631 /* The Link Layer shall only queue for transmission a maximum of
632 * one LL_VERSION_IND PDU during a connection.
633 */
634 if (!conn->llcp.vex.sent) {
635 if (lp_comm_tx_proxy(conn, ctx, false)) {
636 conn->llcp.vex.sent = 1;
637 }
638 } else {
639 ctx->response_opcode = PDU_DATA_LLCTRL_TYPE_VERSION_IND;
640 /* Clear node_ref to signal no NTF piggy-backing */
641 ctx->node_ref.rx = NULL;
642 lp_comm_complete(conn, ctx, evt, param);
643 }
644 break;
645 case PROC_TERMINATE:
646 if (!llcp_tx_alloc_peek(conn, ctx)) {
647 ctx->state = LP_COMMON_STATE_WAIT_TX;
648 } else {
649 lp_comm_tx(conn, ctx);
650 ctx->data.term.error_code = BT_HCI_ERR_LOCALHOST_TERM_CONN;
651 ctx->state = LP_COMMON_STATE_WAIT_TX_ACK;
652 }
653 break;
654 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
655 case PROC_CIS_TERMINATE:
656 lp_comm_tx_proxy(conn, ctx, !lp_cis_terminated(conn));
657 break;
658 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
659 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
660 case PROC_DATA_LENGTH_UPDATE:
661 if (feature_dle(conn) && !ull_cp_remote_dle_pending(conn)) {
662 if (llcp_lr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx)) {
663 ctx->state = LP_COMMON_STATE_WAIT_TX;
664 } else {
665 /* Pause data tx, to ensure we can later (on RSP rx-ack)
666 * update DLE without conflicting with out-going LL Data PDUs
667 * See BT Core 5.2 Vol6: B-4.5.10 & B-5.1.9
668 */
669 llcp_tx_pause_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_DATA_LENGTH);
670 lp_comm_tx(conn, ctx);
671 ctx->state = LP_COMMON_STATE_WAIT_RX;
672 }
673 } else {
674 /* REQ was received from peer and RSP not yet sent
675 * lets piggy-back on RSP instead af sending REQ
676 * thus we can complete local req
677 *
678 * OR
679 *
680 * Data Length Update procedure no longer supported
681 */
682 llcp_lr_complete(conn);
683 ctx->state = LP_COMMON_STATE_IDLE;
684 }
685 break;
686 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
687 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
688 case PROC_CTE_REQ:
689 if (conn->llcp.cte_req.is_enabled &&
690 #if defined(CONFIG_BT_CTLR_PHY)
691 conn->lll.phy_rx != PHY_CODED) {
692 #else
693 1) {
694 #endif /* CONFIG_BT_CTLR_PHY */
695 lp_comm_tx_proxy(conn, ctx,
696 llcp_rr_get_paused_cmd(conn) == PROC_CTE_REQ);
697 } else {
698 /* The PHY was changed to CODED when the request was waiting in a local
699 * request queue.
700 *
701 * Use of pair: proc PROC_CTE_REQ and rx_opcode PDU_DATA_LLCTRL_TYPE_UNUSED
702 * to complete the procedure before sending a request to peer.
703 * This is a special complete execution path to disable the procedure
704 * due to change of RX PHY to CODED.
705 */
706 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
707 ctx->state = LP_COMMON_STATE_IDLE;
708 llcp_lr_complete(conn);
709 }
710 break;
711 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
712 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
713 case PROC_SCA_UPDATE:
714 lp_comm_tx_proxy(conn, ctx, false);
715 break;
716 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
717 default:
718 /* Unknown procedure */
719 LL_ASSERT(0);
720 }
721 }
722
723 static void lp_comm_st_idle(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
724 {
725 switch (evt) {
726 case LP_COMMON_EVT_RUN:
727 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
728 if (ctx->proc == PROC_CIS_TERMINATE) {
729 /* We're getting going on a CIS Terminate */
730 /* So we should start by requesting Terminate for the CIS in question */
731
732 /* Clear terminate ack flag, used to signal CIS Terminated */
733 conn->llcp.cis.terminate_ack = 0U;
734 llcp_cis_stop_by_id(ctx->data.cis_term.cig_id, ctx->data.cis_term.cis_id,
735 BT_HCI_ERR_LOCALHOST_TERM_CONN);
736 }
737 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
738 lp_comm_send_req(conn, ctx, evt, param);
739 break;
740 default:
741 /* Ignore other evts */
742 break;
743 }
744 }
745
746 static void lp_comm_st_wait_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
747 void *param)
748 {
749 switch (evt) {
750 case LP_COMMON_EVT_RUN:
751 lp_comm_send_req(conn, ctx, evt, param);
752 break;
753 default:
754 /* Ignore other evts */
755 break;
756 }
757 }
758
759 static void lp_comm_st_wait_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
760 void *param)
761 {
762 switch (evt) {
763 case LP_COMMON_EVT_ACK:
764 switch (ctx->proc) {
765 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
766 case PROC_MIN_USED_CHANS:
767 lp_comm_complete(conn, ctx, evt, param);
768 break;
769 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
770 case PROC_TERMINATE:
771 lp_comm_complete(conn, ctx, evt, param);
772 break;
773 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
774 case PROC_CIS_TERMINATE:
775 lp_comm_complete(conn, ctx, evt, param);
776 break;
777 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
778 default:
779 /* Ignore for other procedures */
780 break;
781 }
782 break;
783 default:
784 /* Ignore other evts */
785 break;
786 }
787 }
788
789 static void lp_comm_rx_decode(struct ll_conn *conn, struct proc_ctx *ctx, struct pdu_data *pdu)
790 {
791 ctx->response_opcode = pdu->llctrl.opcode;
792
793 switch (pdu->llctrl.opcode) {
794 #if defined(CONFIG_BT_CTLR_LE_PING)
795 case PDU_DATA_LLCTRL_TYPE_PING_RSP:
796 /* ping_rsp has no data */
797 break;
798 #endif /* CONFIG_BT_CTLR_LE_PING */
799 case PDU_DATA_LLCTRL_TYPE_FEATURE_RSP:
800 llcp_pdu_decode_feature_rsp(conn, pdu);
801 #if defined(CONFIG_BT_CTLR_DATA_LENGTH) && defined(CONFIG_BT_CTLR_PHY)
802 /* If Coded PHY is now supported we must update local max tx/rx times to reflect */
803 if (feature_phy_coded(conn)) {
804 ull_dle_max_time_get(conn, &conn->lll.dle.local.max_rx_time,
805 &conn->lll.dle.local.max_tx_time);
806 }
807 #endif /* CONFIG_BT_CTLR_DATA_LENGTH && CONFIG_BT_CTLR_PHY */
808 break;
809 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
810 case PDU_DATA_LLCTRL_TYPE_MIN_USED_CHAN_IND:
811 /* No response expected */
812 break;
813 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */
814 case PDU_DATA_LLCTRL_TYPE_VERSION_IND:
815 llcp_pdu_decode_version_ind(conn, pdu);
816 break;
817 case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
818 llcp_pdu_decode_unknown_rsp(ctx, pdu);
819 break;
820 case PDU_DATA_LLCTRL_TYPE_TERMINATE_IND:
821 /* No response expected */
822 LL_ASSERT(0);
823 break;
824 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
825 case PDU_DATA_LLCTRL_TYPE_LENGTH_RSP:
826 llcp_pdu_decode_length_rsp(conn, pdu);
827 break;
828 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
829 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
830 case PDU_DATA_LLCTRL_TYPE_CTE_RSP:
831 llcp_pdu_decode_cte_rsp(ctx, pdu);
832 break;
833 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
834 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
835 case PDU_DATA_LLCTRL_TYPE_CLOCK_ACCURACY_RSP:
836 llcp_pdu_decode_clock_accuracy_rsp(ctx, pdu);
837 break;
838 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
839 case PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND:
840 llcp_pdu_decode_reject_ext_ind(ctx, pdu);
841 break;
842 case PDU_DATA_LLCTRL_TYPE_REJECT_IND:
843 /* Empty on purpose, as we don't care about the PDU content, we'll disconnect */
844 break;
845 default:
846 /* Unknown opcode */
847 LL_ASSERT(0);
848 }
849 }
850
851 static void lp_comm_st_wait_rx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
852 void *param)
853 {
854 switch (evt) {
855 case LP_COMMON_EVT_RESPONSE:
856 lp_comm_rx_decode(conn, ctx, (struct pdu_data *)param);
857 lp_comm_complete(conn, ctx, evt, param);
858 break;
859 default:
860 /* Ignore other evts */
861 break;
862 }
863 }
864
865 static void lp_comm_st_wait_ntf_avail(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
866 void *param)
867 {
868 switch (evt) {
869 case LP_COMMON_EVT_RUN:
870 switch (ctx->proc) {
871 case PROC_VERSION_EXCHANGE:
872 /* Note re. procedure timeout handling:
873 * Procedure TO is specifically NOT reset while in wait state, since
874 * the mechanism is being 'hi-jacked' to implement a TO on the NTF wait
875 * This to catch the very unlikely case:
876 * local VERSION IND started after a VERSION IND had already been TX'ed
877 * in which case the local procedure should complete with NTF without
878 * prior TX (ie no procedure TO handling initiated). IF this NTF never
879 * finds buffer avail it would wait forever, but not with proc TO active
880 */
881 if (llcp_ntf_alloc_is_available()) {
882 lp_comm_ntf(conn, ctx);
883 llcp_lr_complete(conn);
884 ctx->state = LP_COMMON_STATE_IDLE;
885 }
886 break;
887 default:
888 /* If we get here it is not good since only VERSION EXCHANGE procedure
889 * out of the ones handled in ull_llcp_common should end up waiting for
890 * non-piggy-back'ed NTF
891 */
892 LL_ASSERT(0);
893 break;
894 }
895 break;
896 default:
897 break;
898 }
899 }
900
901 static void lp_comm_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
902 void *param)
903 {
904 switch (ctx->state) {
905 case LP_COMMON_STATE_IDLE:
906 lp_comm_st_idle(conn, ctx, evt, param);
907 break;
908 case LP_COMMON_STATE_WAIT_TX:
909 lp_comm_st_wait_tx(conn, ctx, evt, param);
910 break;
911 case LP_COMMON_STATE_WAIT_TX_ACK:
912 lp_comm_st_wait_tx_ack(conn, ctx, evt, param);
913 break;
914 case LP_COMMON_STATE_WAIT_RX:
915 lp_comm_st_wait_rx(conn, ctx, evt, param);
916 break;
917 case LP_COMMON_STATE_WAIT_NTF_AVAIL:
918 lp_comm_st_wait_ntf_avail(conn, ctx, evt, param);
919 break;
920 default:
921 /* Unknown state */
922 LL_ASSERT(0);
923 }
924 }
925
926 void llcp_lp_comm_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, struct node_tx *tx)
927 {
928 lp_comm_execute_fsm(conn, ctx, LP_COMMON_EVT_ACK, tx->pdu);
929 }
930
931 void llcp_lp_comm_rx(struct ll_conn *conn, struct proc_ctx *ctx, struct node_rx_pdu *rx)
932 {
933 lp_comm_execute_fsm(conn, ctx, LP_COMMON_EVT_RESPONSE, rx->pdu);
934 }
935
936 void llcp_lp_comm_run(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
937 {
938 lp_comm_execute_fsm(conn, ctx, LP_COMMON_EVT_RUN, param);
939
940 }
941
942 static void rp_comm_terminate(struct ll_conn *conn, struct proc_ctx *ctx)
943 {
944 llcp_rr_complete(conn);
945 ctx->state = RP_COMMON_STATE_IDLE;
946
947 /* Mark the connection for termination */
948 conn->llcp_terminate.reason_final = ctx->data.term.error_code;
949 }
950
951 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
952 static void rp_comm_stop_cis(struct proc_ctx *ctx)
953 {
954 llcp_cis_stop_by_id(ctx->data.cis_term.cig_id, ctx->data.cis_term.cis_id,
955 ctx->data.cis_term.error_code);
956 }
957 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
958
959 /*
960 * LLCP Remote Procedure Common FSM
961 */
962 static void rp_comm_rx_decode(struct ll_conn *conn, struct proc_ctx *ctx, struct pdu_data *pdu)
963 {
964 ctx->response_opcode = pdu->llctrl.opcode;
965
966 switch (pdu->llctrl.opcode) {
967 #if defined(CONFIG_BT_CTLR_LE_PING)
968 case PDU_DATA_LLCTRL_TYPE_PING_REQ:
969 /* ping_req has no data */
970 break;
971 #endif /* CONFIG_BT_CTLR_LE_PING */
972 #if defined(CONFIG_BT_PERIPHERAL) || \
973 (defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG) && defined(CONFIG_BT_CENTRAL))
974 #if defined(CONFIG_BT_PERIPHERAL)
975 case PDU_DATA_LLCTRL_TYPE_FEATURE_REQ:
976 #endif /* CONFIG_BT_PERIPHERAL */
977 #if defined(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG) && defined(CONFIG_BT_CENTRAL)
978 case PDU_DATA_LLCTRL_TYPE_PER_INIT_FEAT_XCHG:
979 #endif /* CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG && CONFIG_BT_CENTRAL */
980 llcp_pdu_decode_feature_req(conn, pdu);
981 #if defined(CONFIG_BT_CTLR_DATA_LENGTH) && defined(CONFIG_BT_CTLR_PHY)
982 /* If Coded PHY is now supported we must update local max tx/rx times to reflect */
983 if (feature_phy_coded(conn)) {
984 ull_dle_max_time_get(conn, &conn->lll.dle.local.max_rx_time,
985 &conn->lll.dle.local.max_tx_time);
986 }
987 #endif /* CONFIG_BT_CTLR_DATA_LENGTH && CONFIG_BT_CTLR_PHY */
988 break;
989 #endif /* CONFIG_BT_PERIPHERAL || (CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG && CONFIG_BT_CENTRAL) */
990 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_CENTRAL)
991 case PDU_DATA_LLCTRL_TYPE_MIN_USED_CHAN_IND:
992 llcp_pdu_decode_min_used_chans_ind(conn, pdu);
993 break;
994 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_CENTRAL */
995 case PDU_DATA_LLCTRL_TYPE_VERSION_IND:
996 llcp_pdu_decode_version_ind(conn, pdu);
997 break;
998 case PDU_DATA_LLCTRL_TYPE_TERMINATE_IND:
999 llcp_pdu_decode_terminate_ind(ctx, pdu);
1000 /* Make sure no data is tx'ed after RX of terminate ind */
1001 llcp_tx_pause_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_TERMINATE);
1002 break;
1003 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1004 case PDU_DATA_LLCTRL_TYPE_CIS_TERMINATE_IND:
1005 llcp_pdu_decode_cis_terminate_ind(ctx, pdu);
1006 /* Terminate CIS */
1007 rp_comm_stop_cis(ctx);
1008 break;
1009 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
1010 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1011 case PDU_DATA_LLCTRL_TYPE_LENGTH_REQ:
1012 llcp_pdu_decode_length_req(conn, pdu);
1013 /* On reception of REQ mark RSP open for local piggy-back
1014 * Pause data tx, to ensure we can later (on RSP tx ack) update TX DLE without
1015 * conflicting with out-going LL Data PDUs
1016 * See BT Core 5.2 Vol6: B-4.5.10 & B-5.1.9
1017 */
1018 llcp_tx_pause_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_DATA_LENGTH);
1019 ctx->data.dle.ntf_dle = ull_dle_update_eff_rx(conn);
1020
1021 /* Mark RX pdu to be removed from RX queue, but NOT be released */
1022 llcp_rx_node_retain(ctx);
1023 break;
1024 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1025 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
1026 case PDU_DATA_LLCTRL_TYPE_CTE_REQ:
1027 llcp_pdu_decode_cte_req(ctx, pdu);
1028 break;
1029 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
1030 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1031 case PDU_DATA_LLCTRL_TYPE_CLOCK_ACCURACY_REQ:
1032 llcp_pdu_decode_clock_accuracy_req(ctx, pdu);
1033 break;
1034 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
1035 default:
1036 /* Unknown opcode */
1037 LL_ASSERT(0);
1038 }
1039 }
1040
1041 static void rp_comm_tx(struct ll_conn *conn, struct proc_ctx *ctx)
1042 {
1043 struct node_tx *tx;
1044 struct pdu_data *pdu;
1045
1046 /* Allocate tx node */
1047 tx = llcp_tx_alloc(conn, ctx);
1048 LL_ASSERT(tx);
1049
1050 pdu = (struct pdu_data *)tx->pdu;
1051
1052 /* Encode LL Control PDU */
1053 switch (ctx->proc) {
1054 #if defined(CONFIG_BT_CTLR_LE_PING)
1055 case PROC_LE_PING:
1056 llcp_pdu_encode_ping_rsp(pdu);
1057 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
1058 break;
1059 #endif /* CONFIG_BT_CTLR_LE_PING */
1060 case PROC_FEATURE_EXCHANGE:
1061 llcp_pdu_encode_feature_rsp(conn, pdu);
1062 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
1063 break;
1064 case PROC_VERSION_EXCHANGE:
1065 llcp_pdu_encode_version_ind(pdu);
1066 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
1067 break;
1068 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1069 case PROC_DATA_LENGTH_UPDATE:
1070 llcp_pdu_encode_length_rsp(conn, pdu);
1071 ctx->node_ref.tx_ack = tx;
1072 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
1073 break;
1074 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1075 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
1076 case PROC_CTE_REQ: {
1077 uint8_t err_code = 0;
1078
1079 if (conn->llcp.cte_rsp.is_enabled == 0) {
1080 err_code = BT_HCI_ERR_UNSUPP_LL_PARAM_VAL;
1081 }
1082
1083 #if defined(CONFIG_BT_PHY_UPDATE)
1084 /* If the PHY update is not possible, then PHY1M is used.
1085 * CTE is supported for PHY1M.
1086 */
1087 if (conn->lll.phy_tx == PHY_CODED) {
1088 err_code = BT_HCI_ERR_INVALID_LL_PARAM;
1089 }
1090 #endif /* CONFIG_BT_PHY_UPDATE */
1091 if (!(conn->llcp.cte_rsp.cte_types & BIT(ctx->data.cte_remote_req.cte_type)) ||
1092 conn->llcp.cte_rsp.max_cte_len < ctx->data.cte_remote_req.min_cte_len) {
1093 err_code = BT_HCI_ERR_UNSUPP_LL_PARAM_VAL;
1094 }
1095
1096 if (!err_code) {
1097 llcp_pdu_encode_cte_rsp(ctx, pdu);
1098 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
1099 } else {
1100 llcp_pdu_encode_reject_ext_ind(pdu, PDU_DATA_LLCTRL_TYPE_CTE_REQ, err_code);
1101 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
1102 }
1103
1104 ctx->node_ref.tx_ack = tx;
1105
1106 break;
1107 }
1108 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
1109 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1110 case PROC_SCA_UPDATE:
1111 llcp_pdu_encode_clock_accuracy_rsp(ctx, pdu);
1112 ctx->node_ref.tx_ack = tx;
1113 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
1114 break;
1115 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
1116 default:
1117 /* Unknown procedure */
1118 LL_ASSERT(0);
1119 }
1120
1121 ctx->tx_opcode = pdu->llctrl.opcode;
1122
1123 /* Enqueue LL Control PDU towards LLL */
1124 llcp_tx_enqueue(conn, tx);
1125
1126 /* Restart procedure response timeout timer */
1127 llcp_rr_prt_restart(conn);
1128 }
1129
1130 static void rp_comm_st_idle(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
1131 {
1132 switch (evt) {
1133 case RP_COMMON_EVT_RUN:
1134 ctx->state = RP_COMMON_STATE_WAIT_RX;
1135 break;
1136 default:
1137 /* Ignore other evts */
1138 break;
1139 }
1140 }
1141 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1142 static void rp_comm_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t generate_ntf)
1143 {
1144 struct node_rx_pdu *ntf;
1145 struct pdu_data *pdu;
1146
1147 /* Allocate ntf node */
1148 ntf = ctx->node_ref.rx;
1149 ctx->node_ref.rx = NULL;
1150 LL_ASSERT(ntf);
1151
1152 /* This should be an 'old' RX node, so put/sched when done */
1153 LL_ASSERT(ntf->hdr.type == NODE_RX_TYPE_RETAIN);
1154
1155 /* And release memory if no NTF to be generated */
1156 ntf->hdr.type = NODE_RX_TYPE_RELEASE;
1157
1158 if (generate_ntf) {
1159 ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
1160 ntf->hdr.handle = conn->lll.handle;
1161 pdu = (struct pdu_data *)ntf->pdu;
1162 LL_ASSERT(ctx->proc == PROC_DATA_LENGTH_UPDATE);
1163 llcp_ntf_encode_length_change(conn, pdu);
1164 }
1165
1166 /* Enqueue notification towards LL - releases mem if no ntf */
1167 ll_rx_put_sched(ntf->hdr.link, ntf);
1168 }
1169 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1170
1171 static bool rp_comm_tx_proxy(struct ll_conn *conn, struct proc_ctx *ctx, const bool complete)
1172 {
1173 if (llcp_rr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx)) {
1174 ctx->state = RP_COMMON_STATE_WAIT_TX;
1175 return false;
1176 }
1177
1178 rp_comm_tx(conn, ctx);
1179 ctx->state = RP_COMMON_STATE_WAIT_TX_ACK;
1180 if (complete) {
1181 llcp_rr_complete(conn);
1182 ctx->state = RP_COMMON_STATE_IDLE;
1183 }
1184
1185 return true;
1186 }
1187
1188 static void rp_comm_send_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
1189 {
1190 switch (ctx->proc) {
1191 #if defined(CONFIG_BT_CTLR_LE_PING)
1192 case PROC_LE_PING:
1193 /* Always respond on remote ping */
1194 rp_comm_tx_proxy(conn, ctx, true);
1195 break;
1196 #endif /* CONFIG_BT_CTLR_LE_PING */
1197 case PROC_FEATURE_EXCHANGE:
1198 /* Always respond on remote feature exchange */
1199 rp_comm_tx_proxy(conn, ctx, true);
1200 break;
1201 case PROC_VERSION_EXCHANGE:
1202 /* The Link Layer shall only queue for transmission a maximum of one
1203 * LL_VERSION_IND PDU during a connection.
1204 * If the Link Layer receives an LL_VERSION_IND PDU and has already sent an
1205 * LL_VERSION_IND PDU then the Link Layer shall not send another
1206 * LL_VERSION_IND PDU to the peer device.
1207 */
1208 if (!conn->llcp.vex.sent) {
1209 if (rp_comm_tx_proxy(conn, ctx, true)) {
1210 conn->llcp.vex.sent = 1;
1211 }
1212 } else {
1213 /* Invalid behaviour
1214 * A procedure already sent a LL_VERSION_IND and received a LL_VERSION_IND.
1215 * Ignore and complete the procedure.
1216 */
1217 llcp_rr_complete(conn);
1218 ctx->state = RP_COMMON_STATE_IDLE;
1219 }
1220
1221 break;
1222 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_CENTRAL)
1223 case PROC_MIN_USED_CHANS:
1224 /*
1225 * Spec says (5.2, Vol.6, Part B, Section 5.1.11):
1226 * The procedure has completed when the Link Layer acknowledgment of the
1227 * LL_MIN_USED_CHANNELS_IND PDU is sent or received.
1228 * In effect, for this procedure, this is equivalent to RX of PDU
1229 *
1230 * Also:
1231 * If the Link Layer receives an LL_MIN_USED_CHANNELS_IND PDU, it should ensure
1232 * that, whenever the Peripheral-to-Central PHY is one of those specified,
1233 * the connection uses at least the number of channels given in the
1234 * MinUsedChannels field of the PDU.
1235 *
1236 * The 'should' is here interpreted as 'permission' to do nothing
1237 *
1238 * Future improvement could implement logic to support this
1239 */
1240
1241 llcp_rr_complete(conn);
1242 ctx->state = RP_COMMON_STATE_IDLE;
1243 break;
1244 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_CENTRAL */
1245 case PROC_TERMINATE:
1246 #if defined(CONFIG_BT_CENTRAL)
1247 if (conn->lll.role == BT_HCI_ROLE_CENTRAL) {
1248 /* No response, but postpone terminate until next event
1249 * to ensure acking the reception of TERMINATE_IND
1250 */
1251 ctx->state = RP_COMMON_STATE_POSTPONE_TERMINATE;
1252 break;
1253 }
1254 #endif
1255 #if defined(CONFIG_BT_PERIPHERAL)
1256 /* Terminate right away */
1257 rp_comm_terminate(conn, ctx);
1258 #endif
1259 break;
1260 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1261 case PROC_CIS_TERMINATE:
1262 /* No response */
1263 llcp_rr_complete(conn);
1264 ctx->state = RP_COMMON_STATE_IDLE;
1265
1266 break;
1267 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
1268 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1269 case PROC_DATA_LENGTH_UPDATE:
1270 rp_comm_tx_proxy(conn, ctx, false);
1271 break;
1272 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1273 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
1274 case PROC_CTE_REQ:
1275 if (llcp_rr_ispaused(conn) ||
1276 !llcp_tx_alloc_peek(conn, ctx) ||
1277 (llcp_rr_get_paused_cmd(conn) == PROC_CTE_REQ)) {
1278 ctx->state = RP_COMMON_STATE_WAIT_TX;
1279 } else {
1280 llcp_rr_set_paused_cmd(conn, PROC_PHY_UPDATE);
1281 rp_comm_tx(conn, ctx);
1282 ctx->state = RP_COMMON_STATE_WAIT_TX_ACK;
1283 }
1284 break;
1285 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
1286 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1287 case PROC_SCA_UPDATE:
1288 /* Always respond to remote SCA */
1289 rp_comm_tx_proxy(conn, ctx, false);
1290 break;
1291 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
1292 default:
1293 /* Unknown procedure */
1294 LL_ASSERT(0);
1295 }
1296 }
1297
1298 static void rp_comm_st_wait_rx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
1299 {
1300 switch (evt) {
1301 case RP_COMMON_EVT_REQUEST:
1302 rp_comm_rx_decode(conn, ctx, (struct pdu_data *)param);
1303 rp_comm_send_rsp(conn, ctx, evt, param);
1304 break;
1305 default:
1306 /* Ignore other evts */
1307 break;
1308 }
1309 }
1310
1311 static void rp_comm_st_postpone_terminate(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1312 void *param)
1313 {
1314 switch (evt) {
1315 case RP_COMMON_EVT_RUN:
1316 LL_ASSERT(ctx->proc == PROC_TERMINATE);
1317
1318 /* Note: now we terminate, mimicking legacy LLCP behaviour
1319 * A check should be added to ensure that the ack of the terminate_ind was
1320 * indeed tx'ed and not scheduled out/postponed by LLL
1321 */
1322 rp_comm_terminate(conn, ctx);
1323
1324 break;
1325 default:
1326 /* Ignore other evts */
1327 break;
1328 }
1329 }
1330
1331 static void rp_comm_st_wait_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
1332 {
1333 switch (evt) {
1334 case LP_COMMON_EVT_RUN:
1335 rp_comm_send_rsp(conn, ctx, evt, param);
1336 break;
1337 default:
1338 /* Ignore other evts */
1339 break;
1340 }
1341 }
1342
1343 static void rp_comm_st_wait_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1344 void *param)
1345 {
1346 switch (evt) {
1347 case RP_COMMON_EVT_ACK:
1348 switch (ctx->proc) {
1349 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1350 case PROC_DATA_LENGTH_UPDATE: {
1351 /* Apply changes in data lengths/times */
1352 uint8_t dle_changed = ull_dle_update_eff_tx(conn);
1353
1354 dle_changed |= ctx->data.dle.ntf_dle;
1355 llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_DATA_LENGTH);
1356
1357 rp_comm_ntf(conn, ctx, dle_changed);
1358 llcp_rr_complete(conn);
1359 ctx->state = RP_COMMON_STATE_IDLE;
1360 break;
1361 }
1362 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1363 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
1364 case PROC_CTE_REQ: {
1365 /* add PHY update pause = false here */
1366 llcp_rr_set_paused_cmd(conn, PROC_NONE);
1367 llcp_rr_complete(conn);
1368 ctx->state = RP_COMMON_STATE_IDLE;
1369 }
1370 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
1371 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
1372 case PROC_SCA_UPDATE: {
1373 #if defined(CONFIG_BT_PERIPHERAL)
1374 if (conn->lll.role == BT_HCI_ROLE_PERIPHERAL) {
1375 conn->periph.sca = ctx->data.sca_update.sca;
1376 ull_conn_update_peer_sca(conn);
1377 #if defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
1378 ull_peripheral_iso_update_peer_sca(conn);
1379 #endif /* defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) */
1380 }
1381 #endif /* CONFIG_BT_PERIPHERAL */
1382 llcp_rr_complete(conn);
1383 ctx->state = RP_COMMON_STATE_IDLE;
1384 }
1385 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
1386 default:
1387 /* Ignore other procedures */
1388 break;
1389 }
1390 break;
1391 default:
1392 /* Ignore other evts */
1393 break;
1394 }
1395 }
1396
1397 static void rp_comm_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1398 void *param)
1399 {
1400 switch (ctx->state) {
1401 case RP_COMMON_STATE_IDLE:
1402 rp_comm_st_idle(conn, ctx, evt, param);
1403 break;
1404 case RP_COMMON_STATE_WAIT_RX:
1405 rp_comm_st_wait_rx(conn, ctx, evt, param);
1406 break;
1407 case RP_COMMON_STATE_POSTPONE_TERMINATE:
1408 rp_comm_st_postpone_terminate(conn, ctx, evt, param);
1409 break;
1410 case RP_COMMON_STATE_WAIT_TX:
1411 rp_comm_st_wait_tx(conn, ctx, evt, param);
1412 break;
1413 case RP_COMMON_STATE_WAIT_TX_ACK:
1414 rp_comm_st_wait_tx_ack(conn, ctx, evt, param);
1415 break;
1416 default:
1417 /* Unknown state */
1418 LL_ASSERT(0);
1419 }
1420 }
1421
1422 void llcp_rp_comm_rx(struct ll_conn *conn, struct proc_ctx *ctx, struct node_rx_pdu *rx)
1423 {
1424 rp_comm_execute_fsm(conn, ctx, RP_COMMON_EVT_REQUEST, rx->pdu);
1425 }
1426
1427 void llcp_rp_comm_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, struct node_tx *tx)
1428 {
1429 rp_comm_execute_fsm(conn, ctx, RP_COMMON_EVT_ACK, tx->pdu);
1430 }
1431
1432 void llcp_rp_comm_run(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
1433 {
1434 rp_comm_execute_fsm(conn, ctx, RP_COMMON_EVT_RUN, param);
1435 }
1436