1 /*
2 * Copyright (c) 2020 Demant
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8
9 #include <zephyr/sys/byteorder.h>
10 #include <zephyr/sys/slist.h>
11 #include <zephyr/sys/util.h>
12
13 #include <zephyr/bluetooth/hci_types.h>
14
15 #include "hal/ccm.h"
16
17 #include "util/util.h"
18 #include "util/mem.h"
19 #include "util/memq.h"
20 #include "util/dbuf.h"
21
22 #include "pdu_df.h"
23 #include "lll/pdu_vendor.h"
24 #include "pdu.h"
25
26 #include "ll.h"
27 #include "ll_feat.h"
28 #include "ll_settings.h"
29
30 #include "lll.h"
31 #include "lll/lll_df_types.h"
32 #include "lll_conn.h"
33
34 #include "lll_conn_iso.h"
35
36 #include "ull_tx_queue.h"
37
38 #include "isoal.h"
39 #include "ull_iso_types.h"
40 #include "ull_conn_iso_types.h"
41 #include "ull_conn_iso_internal.h"
42
43 #include "ull_conn_internal.h"
44 #include "ull_conn_types.h"
45
46 #if defined(CONFIG_BT_CTLR_USER_EXT)
47 #include "ull_vendor.h"
48 #endif /* CONFIG_BT_CTLR_USER_EXT */
49
50 #include "ull_internal.h"
51 #include "ull_llcp.h"
52 #include "ull_llcp_features.h"
53 #include "ull_llcp_internal.h"
54
55 #include <soc.h>
56 #include "hal/debug.h"
57
58 /* Hardcoded instant delta +6 */
59 #define CONN_UPDATE_INSTANT_DELTA 6U
60
61 /* CPR parameter ranges */
62 #define CONN_UPDATE_TIMEOUT_100MS 10U
63 #define CONN_UPDATE_TIMEOUT_32SEC 3200U
64 #define CONN_UPDATE_LATENCY_MAX 499U
65 #define CONN_UPDATE_CONN_INTV_4SEC 3200U
66
67 /*
68 * TODO - Known, missing items (missing implementation):
69 *
70 * If DLE procedure supported:
71 * and current PHY is Coded PHY:
72 * ... (5.3.6.B.5.1.1) the new connection interval shall be at least connIntervalCodedMin us.
73 * ... (5.3.6.B.5.1.7.4) packet tx time restrictions should be in effect
74 *
75 * Inter-connection mutual exclusion on CPR
76 *
77 * LL/CON/MAS/BV-34-C [Accepting Connection Parameter Request w. event masked]
78 */
79
80 /* LLCP Local Procedure Connection Update FSM states */
81 enum {
82 LP_CU_STATE_IDLE = LLCP_STATE_IDLE,
83 LP_CU_STATE_WAIT_TX_CONN_PARAM_REQ,
84 LP_CU_STATE_WAIT_RX_CONN_PARAM_RSP,
85 LP_CU_STATE_WAIT_TX_CONN_UPDATE_IND,
86 LP_CU_STATE_WAIT_RX_CONN_UPDATE_IND,
87 LP_CU_STATE_WAIT_TX_REJECT_EXT_IND,
88 LP_CU_STATE_WAIT_INSTANT,
89 LP_CU_STATE_WAIT_NTF_AVAIL,
90 };
91
92 /* LLCP Local Procedure Connection Update FSM events */
93 enum {
94 /* Procedure run */
95 LP_CU_EVT_RUN,
96
97 /* Response received */
98 LP_CU_EVT_CONN_PARAM_RSP,
99
100 /* Indication received */
101 LP_CU_EVT_CONN_UPDATE_IND,
102
103 /* Reject response received */
104 LP_CU_EVT_REJECT,
105
106 /* Unknown response received */
107 LP_CU_EVT_UNKNOWN,
108 };
109
110 /* LLCP Remote Procedure Connection Update FSM states */
111 enum {
112 RP_CU_STATE_IDLE = LLCP_STATE_IDLE,
113 RP_CU_STATE_WAIT_RX_CONN_PARAM_REQ,
114 RP_CU_STATE_WAIT_CONN_PARAM_REQ_AVAILABLE,
115 RP_CU_STATE_WAIT_NTF_CONN_PARAM_REQ,
116 RP_CU_STATE_WAIT_CONN_PARAM_REQ_REPLY,
117 RP_CU_STATE_WAIT_CONN_PARAM_REQ_REPLY_CONTINUE,
118 RP_CU_STATE_WAIT_TX_REJECT_EXT_IND,
119 RP_CU_STATE_WAIT_USER_REPLY,
120 RP_CU_STATE_WAIT_TX_CONN_PARAM_RSP,
121 RP_CU_STATE_WAIT_TX_CONN_UPDATE_IND,
122 RP_CU_STATE_WAIT_RX_CONN_UPDATE_IND,
123 RP_CU_STATE_WAIT_INSTANT,
124 RP_CU_STATE_WAIT_NTF_AVAIL,
125 RP_CU_STATE_WAIT_TX_UNKNOWN_RSP
126 };
127
128 /* LLCP Remote Procedure Connection Update FSM events */
129 enum {
130 /* Procedure run */
131 RP_CU_EVT_RUN,
132
133 /* Request received */
134 RP_CU_EVT_CONN_PARAM_REQ,
135
136 /* Indication received */
137 RP_CU_EVT_CONN_UPDATE_IND,
138
139 /* CONN_PARAM_REQ reply */
140 RP_CU_EVT_CONN_PARAM_REQ_REPLY,
141
142 /* CONN_PARAM_REQ negative reply */
143 RP_CU_EVT_CONN_PARAM_REQ_NEG_REPLY,
144
145 /* CONN_PARAM_REQ Ancjor Point Move reply */
146 RP_CU_EVT_CONN_PARAM_REQ_USER_REPLY,
147 };
148
149 /*
150 * LLCP Local Procedure Connection Update FSM
151 */
152
cu_have_params_changed(struct ll_conn * conn,uint16_t interval,uint16_t latency,uint16_t timeout)153 static bool cu_have_params_changed(struct ll_conn *conn, uint16_t interval, uint16_t latency,
154 uint16_t timeout)
155 {
156 struct lll_conn *lll = &conn->lll;
157
158 if ((interval != lll->interval) || (latency != lll->latency) ||
159 (timeout != conn->supervision_timeout)) {
160 return true;
161 }
162 return false;
163 }
164
cu_update_conn_parameters(struct ll_conn * conn,struct proc_ctx * ctx)165 static void cu_update_conn_parameters(struct ll_conn *conn, struct proc_ctx *ctx)
166 {
167 ctx->data.cu.params_changed = cu_have_params_changed(
168 conn, ctx->data.cu.interval_max, ctx->data.cu.latency, ctx->data.cu.timeout);
169
170 ull_conn_update_parameters(conn, (ctx->proc == PROC_CONN_UPDATE), ctx->data.cu.win_size,
171 ctx->data.cu.win_offset_us, ctx->data.cu.interval_max,
172 ctx->data.cu.latency, ctx->data.cu.timeout,
173 ctx->data.cu.instant);
174 }
175
176 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
cu_check_conn_parameters(struct ll_conn * conn,struct proc_ctx * ctx)177 static bool cu_check_conn_parameters(struct ll_conn *conn, struct proc_ctx *ctx)
178 {
179 const uint16_t interval_min = ctx->data.cu.interval_min;
180 const uint16_t interval_max = ctx->data.cu.interval_max; /* unit conn events (ie 1.25ms) */
181 const uint16_t timeout = ctx->data.cu.timeout; /* unit 10ms */
182 const uint16_t latency = ctx->data.cu.latency;
183 const uint16_t preferred_periodicity = ctx->data.cu.preferred_periodicity;
184
185 /* Invalid parameters */
186 const bool invalid = ((interval_min < CONN_INTERVAL_MIN(conn)) ||
187 (interval_max > CONN_UPDATE_CONN_INTV_4SEC) ||
188 (interval_min > interval_max) ||
189 (latency > CONN_UPDATE_LATENCY_MAX) ||
190 (timeout < CONN_UPDATE_TIMEOUT_100MS) || (timeout > CONN_UPDATE_TIMEOUT_32SEC) ||
191 ((timeout * 4U) <= /* *4U re. conn events is equivalent to *2U re. ms */
192 ((latency + 1) * interval_max)) ||
193 (preferred_periodicity > interval_max));
194
195 return !invalid;
196 }
197 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
198
cu_check_conn_ind_parameters(struct ll_conn * conn,struct proc_ctx * ctx)199 static bool cu_check_conn_ind_parameters(struct ll_conn *conn, struct proc_ctx *ctx)
200 {
201 const uint16_t interval_max = ctx->data.cu.interval_max; /* unit 1.25ms */
202 const uint16_t timeout = ctx->data.cu.timeout; /* unit 10ms */
203 const uint16_t latency = ctx->data.cu.latency;
204
205 /* Valid conn_update_ind parameters */
206 return (interval_max >= CONN_INTERVAL_MIN(conn)) &&
207 (interval_max <= CONN_UPDATE_CONN_INTV_4SEC) &&
208 (latency <= CONN_UPDATE_LATENCY_MAX) &&
209 (timeout >= CONN_UPDATE_TIMEOUT_100MS) &&
210 (timeout <= CONN_UPDATE_TIMEOUT_32SEC) &&
211 ((timeout * 4U) > /* *4U re. conn events is equivalent to *2U re. ms */
212 ((latency + 1U) * interval_max));
213 }
214
cu_prepare_update_ind(struct ll_conn * conn,struct proc_ctx * ctx)215 static void cu_prepare_update_ind(struct ll_conn *conn, struct proc_ctx *ctx)
216 {
217 ctx->data.cu.win_size = 1U;
218 ctx->data.cu.win_offset_us = 0U;
219
220 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
221 /* Handle preferred periodicity */
222 const uint8_t preferred_periodicity = ctx->data.cu.preferred_periodicity;
223
224 if (preferred_periodicity) {
225 const uint16_t interval_max = (ctx->data.cu.interval_max / preferred_periodicity) *
226 preferred_periodicity;
227 if (interval_max >= ctx->data.cu.interval_min) {
228 /* In case of there is no underflowing interval_min use 'preferred max' */
229 ctx->data.cu.interval_max = interval_max;
230 }
231 }
232
233 #if !defined(CONFIG_BT_CTLR_SCHED_ADVANCED)
234 /* Use valid offset0 in range [0..interval]. An offset of
235 * 0xffff means not valid. Disregard other preferred offsets.
236 */
237 /* Handle win_offset/'anchor point move' */
238 if (ctx->data.cu.offsets[0] <= ctx->data.cu.interval_max) {
239 ctx->data.cu.win_offset_us = ctx->data.cu.offsets[0] * CONN_INT_UNIT_US;
240 }
241 #endif /* !CONFIG_BT_CTLR_SCHED_ADVANCED */
242 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
243
244 ctx->data.cu.instant = ull_conn_event_counter(conn) + conn->lll.latency +
245 CONN_UPDATE_INSTANT_DELTA;
246 }
247
cu_should_notify_host(struct proc_ctx * ctx)248 static bool cu_should_notify_host(struct proc_ctx *ctx)
249 {
250 return (((ctx->proc == PROC_CONN_PARAM_REQ) && (ctx->data.cu.error != 0U)) ||
251 (ctx->data.cu.params_changed != 0U));
252 }
253
cu_ntf(struct ll_conn * conn,struct proc_ctx * ctx)254 static void cu_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
255 {
256 struct node_rx_pdu *ntf;
257 struct node_rx_cu *pdu;
258 uint8_t piggy_back;
259
260 /* Allocate ntf node */
261 ntf = ctx->node_ref.rx;
262 ctx->node_ref.rx = NULL;
263 LL_ASSERT(ntf);
264
265 piggy_back = (ntf->hdr.type != NODE_RX_TYPE_RETAIN);
266
267 ntf->hdr.type = NODE_RX_TYPE_CONN_UPDATE;
268 ntf->hdr.handle = conn->lll.handle;
269 pdu = (struct node_rx_cu *)ntf->pdu;
270
271 pdu->status = ctx->data.cu.error;
272 if (!ctx->data.cu.error) {
273 pdu->interval = ctx->data.cu.interval_max;
274 pdu->latency = ctx->data.cu.latency;
275 pdu->timeout = ctx->data.cu.timeout;
276 } else {
277 pdu->interval = conn->lll.interval;
278 pdu->latency = conn->lll.latency;
279 pdu->timeout = conn->supervision_timeout;
280 }
281
282 if (!piggy_back) {
283 /* Enqueue notification towards LL, unless piggy-backing,
284 * in which case this is done on the rx return path
285 */
286 ll_rx_put_sched(ntf->hdr.link, ntf);
287 }
288 }
289
290 #if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
lp_cu_tx(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t opcode)291 static void lp_cu_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t opcode)
292 {
293 struct node_tx *tx;
294 struct pdu_data *pdu;
295
296 /* Get pre-allocated tx node */
297 tx = ctx->node_ref.tx;
298 ctx->node_ref.tx = NULL;
299
300 if (!tx) {
301 /* Allocate tx node if non pre-alloc'ed */
302 tx = llcp_tx_alloc(conn, ctx);
303 LL_ASSERT(tx);
304 }
305
306 pdu = (struct pdu_data *)tx->pdu;
307
308 /* Encode LL Control PDU */
309 switch (opcode) {
310 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
311 case PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ:
312 llcp_pdu_encode_conn_param_req(ctx, pdu);
313 break;
314 case PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND:
315 llcp_pdu_encode_reject_ext_ind(pdu, ctx->data.cu.rejected_opcode,
316 ctx->data.cu.error);
317 break;
318 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
319 #if defined(CONFIG_BT_CENTRAL)
320 case PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND:
321 llcp_pdu_encode_conn_update_ind(ctx, pdu);
322 break;
323 #endif /* CONFIG_BT_CENTRAL */
324 default:
325 /* Unknown opcode */
326 LL_ASSERT(0);
327 break;
328 }
329
330 ctx->tx_opcode = pdu->llctrl.opcode;
331
332 /* Enqueue LL Control PDU towards LLL */
333 llcp_tx_enqueue(conn, tx);
334
335 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
336 if (ctx->proc == PROC_CONN_PARAM_REQ) {
337 /* Restart procedure response timeout timer */
338 llcp_lr_prt_restart(conn);
339 }
340 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
341 }
342 #endif /* CONFIG_BT_CENTRAL || CONFIG_BT_CTLR_CONN_PARAM_REQ */
343
lp_cu_complete(struct ll_conn * conn,struct proc_ctx * ctx)344 static void lp_cu_complete(struct ll_conn *conn, struct proc_ctx *ctx)
345 {
346 llcp_lr_complete(conn);
347 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
348 if (ctx->proc == PROC_CONN_PARAM_REQ &&
349 !(conn->lll.role && ull_cp_remote_cpr_pending(conn))) {
350 /* For a peripheral without a remote initiated CPR */
351 cpr_active_check_and_reset(conn);
352 }
353 #endif /* defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) */
354 ctx->state = LP_CU_STATE_IDLE;
355 }
356
lp_cu_ntf_complete(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)357 static void lp_cu_ntf_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
358 void *param)
359 {
360 cu_ntf(conn, ctx);
361 lp_cu_complete(conn, ctx);
362 }
363
364 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
lp_cu_send_reject_ext_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)365 static void lp_cu_send_reject_ext_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
366 void *param)
367 {
368 if (llcp_lr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx)) {
369 ctx->state = LP_CU_STATE_WAIT_TX_REJECT_EXT_IND;
370 } else {
371 llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
372 lp_cu_tx(conn, ctx, PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND);
373 lp_cu_complete(conn, ctx);
374 }
375 }
376
lp_cu_send_conn_param_req(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)377 static void lp_cu_send_conn_param_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
378 void *param)
379 {
380 if (cpr_active_is_set(conn) || llcp_lr_ispaused(conn) ||
381 llcp_rr_get_collision(conn) || !llcp_tx_alloc_peek(conn, ctx)) {
382 ctx->state = LP_CU_STATE_WAIT_TX_CONN_PARAM_REQ;
383 } else {
384 uint16_t event_counter = ull_conn_event_counter(conn);
385
386 llcp_rr_set_incompat(conn, INCOMPAT_RESOLVABLE);
387
388 ctx->data.cu.reference_conn_event_count = event_counter;
389 ctx->data.cu.preferred_periodicity = 0U;
390
391 /* Mark CPR as active */
392 cpr_active_set(conn);
393
394 lp_cu_tx(conn, ctx, PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ);
395
396 switch (conn->lll.role) {
397 #if defined(CONFIG_BT_CENTRAL)
398 case BT_HCI_ROLE_CENTRAL:
399 ctx->state = LP_CU_STATE_WAIT_RX_CONN_PARAM_RSP;
400 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_CONN_PARAM_RSP;
401 break;
402 #endif /* CONFIG_BT_CENTRAL */
403 #if defined(CONFIG_BT_PERIPHERAL)
404 case BT_HCI_ROLE_PERIPHERAL:
405 ctx->state = LP_CU_STATE_WAIT_RX_CONN_UPDATE_IND;
406 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND;
407 break;
408 #endif /* CONFIG_BT_PERIPHERAL */
409 default:
410 /* Unknown role */
411 LL_ASSERT(0);
412 break;
413 }
414 }
415 }
416 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
417
418 #if defined(CONFIG_BT_CENTRAL)
lp_cu_send_conn_update_ind_finalize(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)419 static void lp_cu_send_conn_update_ind_finalize(struct ll_conn *conn, struct proc_ctx *ctx,
420 uint8_t evt, void *param)
421 {
422 if (ctx->node_ref.rx == NULL) {
423 /* If we get here without RX node we know one is avail to be allocated,
424 * so pre-alloc NTF node
425 */
426 ctx->node_ref.rx = llcp_ntf_alloc();
427 }
428
429 /* Signal put/sched on NTF - ie non-RX node piggy */
430 ctx->node_ref.rx->hdr.type = NODE_RX_TYPE_RETAIN;
431
432 cu_prepare_update_ind(conn, ctx);
433 lp_cu_tx(conn, ctx, PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND);
434 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
435 ctx->state = LP_CU_STATE_WAIT_INSTANT;
436 }
437
lp_cu_send_conn_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)438 static void lp_cu_send_conn_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
439 void *param)
440 {
441 if (llcp_lr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx)) {
442 ctx->state = LP_CU_STATE_WAIT_TX_CONN_UPDATE_IND;
443 } else {
444 /* ensure alloc of TX node, before possibly waiting for NTF node */
445 ctx->node_ref.tx = llcp_tx_alloc(conn, ctx);
446 if (ctx->node_ref.rx == NULL && !llcp_ntf_alloc_is_available()) {
447 /* No RX node piggy, and no NTF avail, so go wait for one, before TX'ing */
448 ctx->state = LP_CU_STATE_WAIT_NTF_AVAIL;
449 } else {
450 lp_cu_send_conn_update_ind_finalize(conn, ctx, evt, param);
451 }
452 }
453 }
454
lp_cu_st_wait_ntf_avail(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)455 static void lp_cu_st_wait_ntf_avail(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
456 void *param)
457 {
458 switch (evt) {
459 case LP_CU_EVT_RUN:
460 if (llcp_ntf_alloc_is_available()) {
461 lp_cu_send_conn_update_ind_finalize(conn, ctx, evt, param);
462 }
463 break;
464 default:
465 /* Ignore other evts */
466 break;
467 }
468 }
469 #endif /* CONFIG_BT_CENTRAL */
470
lp_cu_st_idle(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)471 static void lp_cu_st_idle(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
472 {
473 switch (evt) {
474 case LP_CU_EVT_RUN:
475 switch (ctx->proc) {
476 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
477 case PROC_CONN_PARAM_REQ:
478 lp_cu_send_conn_param_req(conn, ctx, evt, param);
479 break;
480 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
481 #if defined(CONFIG_BT_CENTRAL)
482 case PROC_CONN_UPDATE:
483 /* Ensure the non-piggy-back'ing is signaled */
484 ctx->node_ref.rx = NULL;
485 lp_cu_send_conn_update_ind(conn, ctx, evt, param);
486 break;
487 #endif /* CONFIG_BT_CENTRAL */
488 default:
489 /* Unknown procedure */
490 LL_ASSERT(0);
491 break;
492 }
493 break;
494 default:
495 /* Ignore other evts */
496 break;
497 }
498 }
499
500 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
lp_cu_st_wait_tx_reject_ext_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)501 static void lp_cu_st_wait_tx_reject_ext_ind(struct ll_conn *conn, struct proc_ctx *ctx,
502 uint8_t evt, void *param)
503 {
504 switch (evt) {
505 case LP_CU_EVT_RUN:
506 lp_cu_send_reject_ext_ind(conn, ctx, evt, param);
507 break;
508 default:
509 /* Ignore other evts */
510 break;
511 }
512 }
513
514
lp_cu_st_wait_tx_conn_param_req(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)515 static void lp_cu_st_wait_tx_conn_param_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
516 void *param)
517 {
518 switch (evt) {
519 case LP_CU_EVT_RUN:
520 lp_cu_send_conn_param_req(conn, ctx, evt, param);
521 break;
522 default:
523 /* Ignore other evts */
524 break;
525 }
526 }
527 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
528
529 #if defined(CONFIG_BT_CENTRAL)
530 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
lp_cu_st_wait_rx_conn_param_rsp(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)531 static void lp_cu_st_wait_rx_conn_param_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
532 void *param)
533 {
534 struct pdu_data *pdu = (struct pdu_data *)param;
535
536 switch (evt) {
537 case LP_CU_EVT_CONN_PARAM_RSP:
538 llcp_pdu_decode_conn_param_rsp(ctx, param);
539 llcp_rr_set_incompat(conn, INCOMPAT_RESERVED);
540 /* Perform Param check and possibly reject (LL_REJECT_EXT_IND) */
541 if (!cu_check_conn_parameters(conn, ctx)) {
542 ctx->data.cu.rejected_opcode = PDU_DATA_LLCTRL_TYPE_CONN_PARAM_RSP;
543 ctx->data.cu.error = BT_HCI_ERR_INVALID_LL_PARAM;
544 lp_cu_send_reject_ext_ind(conn, ctx, evt, param);
545 break;
546 }
547 /* Keep RX node to use for NTF */
548 llcp_rx_node_retain(ctx);
549 lp_cu_send_conn_update_ind(conn, ctx, evt, param);
550 break;
551 case LP_CU_EVT_UNKNOWN:
552 llcp_rr_set_incompat(conn, INCOMPAT_RESERVED);
553 /* Unsupported in peer, so disable locally for this connection */
554 feature_unmask_features(conn, LL_FEAT_BIT_CONN_PARAM_REQ);
555 /* Keep RX node to use for NTF */
556 llcp_rx_node_retain(ctx);
557 lp_cu_send_conn_update_ind(conn, ctx, evt, param);
558 break;
559 case LP_CU_EVT_REJECT:
560 if (pdu->llctrl.reject_ext_ind.error_code == BT_HCI_ERR_UNSUPP_REMOTE_FEATURE) {
561 /* Remote legacy Host */
562 llcp_rr_set_incompat(conn, INCOMPAT_RESERVED);
563 /* Unsupported in peer, so disable locally for this connection */
564 feature_unmask_features(conn, LL_FEAT_BIT_CONN_PARAM_REQ);
565 /* Keep RX node to use for NTF */
566 llcp_rx_node_retain(ctx);
567 lp_cu_send_conn_update_ind(conn, ctx, evt, param);
568 } else {
569 llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
570 ctx->data.cu.error = pdu->llctrl.reject_ext_ind.error_code;
571 lp_cu_ntf_complete(conn, ctx, evt, param);
572 }
573 break;
574 default:
575 /* Ignore other evts */
576 break;
577 }
578 }
579 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
580
lp_cu_st_wait_tx_conn_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)581 static void lp_cu_st_wait_tx_conn_update_ind(struct ll_conn *conn, struct proc_ctx *ctx,
582 uint8_t evt, void *param)
583 {
584 switch (evt) {
585 case LP_CU_EVT_RUN:
586 lp_cu_send_conn_update_ind(conn, ctx, evt, param);
587 break;
588 default:
589 /* Ignore other evts */
590 break;
591 }
592 }
593 #endif /* CONFIG_BT_CENTRAL */
594
595 #if defined(CONFIG_BT_PERIPHERAL)
lp_cu_st_wait_rx_conn_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)596 static void lp_cu_st_wait_rx_conn_update_ind(struct ll_conn *conn, struct proc_ctx *ctx,
597 uint8_t evt, void *param)
598 {
599 struct pdu_data *pdu = (struct pdu_data *)param;
600
601 switch (evt) {
602 case LP_CU_EVT_CONN_UPDATE_IND:
603 llcp_pdu_decode_conn_update_ind(ctx, param);
604
605 /* Invalid PDU, mark the connection for termination */
606 if (!cu_check_conn_ind_parameters(conn, ctx)) {
607 llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
608 conn->llcp_terminate.reason_final = BT_HCI_ERR_INVALID_LL_PARAM;
609 lp_cu_complete(conn, ctx);
610 break;
611 }
612
613 llcp_rr_set_incompat(conn, INCOMPAT_RESERVED);
614
615 /* Keep RX node to use for NTF */
616 llcp_rx_node_retain(ctx);
617
618 ctx->state = LP_CU_STATE_WAIT_INSTANT;
619 break;
620 case LP_CU_EVT_UNKNOWN:
621 /* Unsupported in peer, so disable locally for this connection */
622 feature_unmask_features(conn, LL_FEAT_BIT_CONN_PARAM_REQ);
623 ctx->data.cu.error = BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
624 lp_cu_ntf_complete(conn, ctx, evt, param);
625 break;
626 case LP_CU_EVT_REJECT:
627 ctx->data.cu.error = pdu->llctrl.reject_ext_ind.error_code;
628 lp_cu_ntf_complete(conn, ctx, evt, param);
629 break;
630 default:
631 /* Ignore other evts */
632 break;
633 }
634 }
635 #endif /* CONFIG_BT_PERIPHERAL */
636
lp_cu_check_instant(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)637 static void lp_cu_check_instant(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
638 void *param)
639 {
640 uint16_t event_counter = ull_conn_event_counter(conn);
641
642 if (is_instant_reached_or_passed(ctx->data.cu.instant, event_counter)) {
643 bool notify;
644
645 /* Procedure is complete when the instant has passed, and the
646 * new connection event parameters have been applied.
647 */
648 llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
649 cu_update_conn_parameters(conn, ctx);
650
651 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
652 if (ctx->proc == PROC_CONN_PARAM_REQ) {
653 /* Stop procedure response timeout timer */
654 llcp_lr_prt_stop(conn);
655 }
656 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
657
658 notify = cu_should_notify_host(ctx);
659 if (notify) {
660 ctx->data.cu.error = BT_HCI_ERR_SUCCESS;
661 lp_cu_ntf_complete(conn, ctx, evt, param);
662 } else {
663 /* Release RX node kept for NTF */
664 llcp_rx_node_release(ctx);
665 ctx->node_ref.rx = NULL;
666
667 lp_cu_complete(conn, ctx);
668 }
669 }
670 }
671
lp_cu_st_wait_instant(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)672 static void lp_cu_st_wait_instant(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
673 void *param)
674 {
675 switch (evt) {
676 case LP_CU_EVT_RUN:
677 lp_cu_check_instant(conn, ctx, evt, param);
678 break;
679 default:
680 /* Ignore other evts */
681 break;
682 }
683 }
684
lp_cu_execute_fsm(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)685 static void lp_cu_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
686 {
687 switch (ctx->state) {
688 case LP_CU_STATE_IDLE:
689 lp_cu_st_idle(conn, ctx, evt, param);
690 break;
691 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
692 case LP_CU_STATE_WAIT_TX_CONN_PARAM_REQ:
693 lp_cu_st_wait_tx_conn_param_req(conn, ctx, evt, param);
694 break;
695 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
696 #if defined(CONFIG_BT_CENTRAL)
697 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
698 case LP_CU_STATE_WAIT_RX_CONN_PARAM_RSP:
699 lp_cu_st_wait_rx_conn_param_rsp(conn, ctx, evt, param);
700 break;
701 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
702 case LP_CU_STATE_WAIT_TX_CONN_UPDATE_IND:
703 lp_cu_st_wait_tx_conn_update_ind(conn, ctx, evt, param);
704 break;
705 case LP_CU_STATE_WAIT_NTF_AVAIL:
706 lp_cu_st_wait_ntf_avail(conn, ctx, evt, param);
707 break;
708 #endif /* CONFIG_BT_CENTRAL */
709 #if defined(CONFIG_BT_PERIPHERAL)
710 case LP_CU_STATE_WAIT_RX_CONN_UPDATE_IND:
711 lp_cu_st_wait_rx_conn_update_ind(conn, ctx, evt, param);
712 break;
713 #endif /* CONFIG_BT_PERIPHERAL */
714 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
715 case LP_CU_STATE_WAIT_TX_REJECT_EXT_IND:
716 lp_cu_st_wait_tx_reject_ext_ind(conn, ctx, evt, param);
717 break;
718 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
719 case LP_CU_STATE_WAIT_INSTANT:
720 lp_cu_st_wait_instant(conn, ctx, evt, param);
721 break;
722 default:
723 /* Unknown state */
724 LL_ASSERT(0);
725 break;
726 }
727 }
728
llcp_lp_cu_rx(struct ll_conn * conn,struct proc_ctx * ctx,struct node_rx_pdu * rx)729 void llcp_lp_cu_rx(struct ll_conn *conn, struct proc_ctx *ctx, struct node_rx_pdu *rx)
730 {
731 struct pdu_data *pdu = (struct pdu_data *)rx->pdu;
732
733 switch (pdu->llctrl.opcode) {
734 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
735 case PDU_DATA_LLCTRL_TYPE_CONN_PARAM_RSP:
736 lp_cu_execute_fsm(conn, ctx, LP_CU_EVT_CONN_PARAM_RSP, pdu);
737 break;
738 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
739 case PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND:
740 lp_cu_execute_fsm(conn, ctx, LP_CU_EVT_CONN_UPDATE_IND, pdu);
741 break;
742 case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
743 lp_cu_execute_fsm(conn, ctx, LP_CU_EVT_UNKNOWN, pdu);
744 break;
745 case PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND:
746 lp_cu_execute_fsm(conn, ctx, LP_CU_EVT_REJECT, pdu);
747 break;
748 default:
749 /* Invalid behaviour */
750 /* Invalid PDU received so terminate connection */
751 conn->llcp_terminate.reason_final = BT_HCI_ERR_LMP_PDU_NOT_ALLOWED;
752 lp_cu_complete(conn, ctx);
753 break;
754 }
755 }
756
llcp_lp_cu_run(struct ll_conn * conn,struct proc_ctx * ctx,void * param)757 void llcp_lp_cu_run(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
758 {
759 lp_cu_execute_fsm(conn, ctx, LP_CU_EVT_RUN, param);
760 }
761
762 /*
763 * LLCP Remote Procedure Connection Update FSM
764 */
765
rp_cu_tx(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t opcode)766 static void rp_cu_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t opcode)
767 {
768 struct node_tx *tx;
769 struct pdu_data *pdu;
770
771 /* Get pre-allocated tx node */
772 tx = ctx->node_ref.tx;
773 ctx->node_ref.tx = NULL;
774
775 if (!tx) {
776 /* Allocate tx node if non pre-alloc'ed */
777 tx = llcp_tx_alloc(conn, ctx);
778 LL_ASSERT(tx);
779 }
780
781 pdu = (struct pdu_data *)tx->pdu;
782
783 /* Encode LL Control PDU */
784 switch (opcode) {
785 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
786 case PDU_DATA_LLCTRL_TYPE_CONN_PARAM_RSP:
787 llcp_pdu_encode_conn_param_rsp(ctx, pdu);
788 break;
789 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
790 case PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND:
791 llcp_pdu_encode_conn_update_ind(ctx, pdu);
792 break;
793 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
794 case PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND:
795 llcp_pdu_encode_reject_ext_ind(pdu, ctx->data.cu.rejected_opcode,
796 ctx->data.cu.error);
797 break;
798 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
799 case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
800 llcp_pdu_encode_unknown_rsp(ctx, pdu);
801 break;
802 default:
803 /* Unknown opcode */
804 LL_ASSERT(0);
805 break;
806 }
807
808 ctx->tx_opcode = pdu->llctrl.opcode;
809
810 /* Enqueue LL Control PDU towards LLL */
811 llcp_tx_enqueue(conn, tx);
812
813 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
814 if (ctx->proc == PROC_CONN_PARAM_REQ) {
815 /* Restart procedure response timeout timer */
816 llcp_rr_prt_restart(conn);
817 }
818 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
819 }
820
821 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
rp_cu_conn_param_req_ntf(struct ll_conn * conn,struct proc_ctx * ctx)822 static void rp_cu_conn_param_req_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
823 {
824 struct node_rx_pdu *ntf;
825 struct pdu_data *pdu;
826 uint8_t piggy_back;
827
828
829 /* Allocate ntf node */
830 ntf = ctx->node_ref.rx;
831 ctx->node_ref.rx = NULL;
832 LL_ASSERT(ntf);
833
834 piggy_back = (ntf->hdr.type != NODE_RX_TYPE_RETAIN);
835
836 ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
837 ntf->hdr.handle = conn->lll.handle;
838 pdu = (struct pdu_data *)ntf->pdu;
839
840 llcp_pdu_encode_conn_param_req(ctx, pdu);
841
842 if (!piggy_back) {
843 /* Enqueue notification towards LL, unless piggy-backing,
844 * in which case this is done on the rx return path
845 */
846 ll_rx_put_sched(ntf->hdr.link, ntf);
847 }
848 }
849 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
850
rp_cu_complete(struct ll_conn * conn,struct proc_ctx * ctx)851 static void rp_cu_complete(struct ll_conn *conn, struct proc_ctx *ctx)
852 {
853 llcp_rr_complete(conn);
854 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
855 if (ctx->proc == PROC_CONN_PARAM_REQ) {
856 cpr_active_check_and_reset(conn);
857 }
858 #endif /* defined(CONFIG_BT_CTLR_CONN_PARAM_REQ) */
859 ctx->state = RP_CU_STATE_IDLE;
860 }
861
rp_cu_send_conn_update_ind_finalize(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)862 static void rp_cu_send_conn_update_ind_finalize(struct ll_conn *conn, struct proc_ctx *ctx,
863 uint8_t evt, void *param)
864 {
865 /* Central role path, should not get here with !=NULL rx-node reference */
866 LL_ASSERT(ctx->node_ref.rx == NULL);
867 /* We pre-alloc NTF node */
868 ctx->node_ref.rx = llcp_ntf_alloc();
869
870 /* Signal put/sched on NTF - ie non-RX node piggy */
871 ctx->node_ref.rx->hdr.type = NODE_RX_TYPE_RETAIN;
872
873 cu_prepare_update_ind(conn, ctx);
874 rp_cu_tx(conn, ctx, PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND);
875 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
876 ctx->state = RP_CU_STATE_WAIT_INSTANT;
877 }
878
rp_cu_send_conn_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)879 static void rp_cu_send_conn_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
880 void *param)
881 {
882 if (llcp_rr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx)) {
883 ctx->state = RP_CU_STATE_WAIT_TX_CONN_UPDATE_IND;
884 } else {
885 /* ensure alloc of TX node, before possibly waiting for NTF node */
886 ctx->node_ref.tx = llcp_tx_alloc(conn, ctx);
887 if (!llcp_ntf_alloc_is_available()) {
888 /* No RX node piggy, and no NTF avail, so go wait for one, before TX'ing */
889 ctx->state = RP_CU_STATE_WAIT_NTF_AVAIL;
890 } else {
891 rp_cu_send_conn_update_ind_finalize(conn, ctx, evt, param);
892 }
893 }
894 }
895
rp_cu_st_wait_ntf_avail(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)896 static void rp_cu_st_wait_ntf_avail(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
897 void *param)
898 {
899 switch (evt) {
900 case RP_CU_EVT_RUN:
901 if (llcp_ntf_alloc_is_available()) {
902 /* If NTF node is now avail, so pick it up and continue */
903 rp_cu_send_conn_update_ind_finalize(conn, ctx, evt, param);
904 }
905 break;
906 default:
907 /* Ignore other evts */
908 break;
909 }
910 }
911
912 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
rp_cu_send_reject_ext_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)913 static void rp_cu_send_reject_ext_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
914 void *param)
915 {
916 if (llcp_rr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx)) {
917 ctx->state = RP_CU_STATE_WAIT_TX_REJECT_EXT_IND;
918 } else {
919 rp_cu_tx(conn, ctx, PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND);
920 rp_cu_complete(conn, ctx);
921 }
922 }
923
rp_cu_send_conn_param_rsp(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)924 static void rp_cu_send_conn_param_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
925 void *param)
926 {
927 if (llcp_rr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx)) {
928 ctx->state = RP_CU_STATE_WAIT_TX_CONN_PARAM_RSP;
929 } else {
930 rp_cu_tx(conn, ctx, PDU_DATA_LLCTRL_TYPE_CONN_PARAM_RSP);
931 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND;
932 ctx->state = RP_CU_STATE_WAIT_RX_CONN_UPDATE_IND;
933 }
934 }
935
rp_cu_send_conn_param_req_ntf(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)936 static void rp_cu_send_conn_param_req_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
937 void *param)
938 {
939 if (!llcp_ntf_alloc_is_available()) {
940 ctx->state = RP_CU_STATE_WAIT_NTF_CONN_PARAM_REQ;
941 } else {
942 rp_cu_conn_param_req_ntf(conn, ctx);
943 ctx->state = RP_CU_STATE_WAIT_CONN_PARAM_REQ_REPLY;
944 }
945 }
946 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
947
rp_cu_send_unknown_rsp(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)948 static void rp_cu_send_unknown_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
949 void *param)
950 {
951 if (llcp_rr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx)) {
952 ctx->state = RP_CU_STATE_WAIT_TX_UNKNOWN_RSP;
953 } else {
954 rp_cu_tx(conn, ctx, PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP);
955 rp_cu_complete(conn, ctx);
956 }
957 }
958
rp_cu_st_idle(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)959 static void rp_cu_st_idle(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
960 {
961 switch (evt) {
962 case RP_CU_EVT_RUN:
963 switch (ctx->proc) {
964 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
965 case PROC_CONN_PARAM_REQ:
966 ctx->state = RP_CU_STATE_WAIT_RX_CONN_PARAM_REQ;
967 break;
968 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
969 case PROC_CONN_UPDATE:
970 ctx->state = RP_CU_STATE_WAIT_RX_CONN_UPDATE_IND;
971 break;
972 default:
973 /* Unknown procedure */
974 LL_ASSERT(0);
975 break;
976 }
977 break;
978 default:
979 /* Ignore other evts */
980 break;
981 }
982 }
983
984 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
rp_cu_st_wait_conn_param_req_available(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)985 static void rp_cu_st_wait_conn_param_req_available(struct ll_conn *conn, struct proc_ctx *ctx,
986 uint8_t evt, void *param)
987 {
988 /* Check if CPR is already active on other connection.
989 * If so check if possible to send reject right away
990 * otherwise stay in wait state in case CPR becomes
991 * available before we can send send reject
992 */
993 switch (evt) {
994 case RP_CU_EVT_CONN_PARAM_REQ:
995 case RP_CU_EVT_RUN:
996 if (cpr_active_is_set(conn)) {
997 ctx->state = RP_CU_STATE_WAIT_CONN_PARAM_REQ_AVAILABLE;
998
999 if (!llcp_rr_ispaused(conn) && llcp_tx_alloc_peek(conn, ctx)) {
1000 /* We're good to reject immediately */
1001 ctx->data.cu.rejected_opcode = PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ;
1002 ctx->data.cu.error = BT_HCI_ERR_UNSUPP_LL_PARAM_VAL;
1003 rp_cu_send_reject_ext_ind(conn, ctx, evt, param);
1004
1005 /* Possibly retained rx node to be released as we won't need it */
1006 llcp_rx_node_release(ctx);
1007 ctx->node_ref.rx = NULL;
1008
1009 break;
1010 }
1011 /* In case we have to defer NTF */
1012 llcp_rx_node_retain(ctx);
1013 } else {
1014 cpr_active_set(conn);
1015 const bool params_changed =
1016 cu_have_params_changed(conn, ctx->data.cu.interval_max,
1017 ctx->data.cu.latency, ctx->data.cu.timeout);
1018
1019 /* notify Host if conn parameters changed, else respond */
1020 if (params_changed) {
1021 rp_cu_conn_param_req_ntf(conn, ctx);
1022 ctx->state = RP_CU_STATE_WAIT_CONN_PARAM_REQ_REPLY;
1023 } else {
1024 /* Possibly retained rx node to be released as we won't need it */
1025 llcp_rx_node_release(ctx);
1026 ctx->node_ref.rx = NULL;
1027 #if defined(CONFIG_BT_CTLR_USER_CPR_ANCHOR_POINT_MOVE)
1028 /* Handle APM as a vendor specific user extension */
1029 if (conn->lll.role == BT_HCI_ROLE_PERIPHERAL &&
1030 DEFER_APM_CHECK(conn, ctx->data.cu.offsets,
1031 &ctx->data.cu.error)) {
1032 /* Wait for user response */
1033 ctx->state = RP_CU_STATE_WAIT_USER_REPLY;
1034 break;
1035 }
1036 #endif /* CONFIG_BT_CTLR_USER_CPR_ANCHOR_POINT_MOVE */
1037 ctx->state = RP_CU_STATE_WAIT_CONN_PARAM_REQ_REPLY_CONTINUE;
1038 }
1039 }
1040 default:
1041 /* Ignore other evts */
1042 break;
1043 }
1044 }
1045
rp_cu_st_wait_rx_conn_param_req(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1046 static void rp_cu_st_wait_rx_conn_param_req(struct ll_conn *conn, struct proc_ctx *ctx,
1047 uint8_t evt, void *param)
1048 {
1049 switch (evt) {
1050 case RP_CU_EVT_CONN_PARAM_REQ:
1051 llcp_pdu_decode_conn_param_req(ctx, param);
1052
1053 /* Perform Param check and reject if invalid (LL_REJECT_EXT_IND) */
1054 if (!cu_check_conn_parameters(conn, ctx)) {
1055 ctx->data.cu.rejected_opcode = PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ;
1056 ctx->data.cu.error = BT_HCI_ERR_INVALID_LL_PARAM;
1057 rp_cu_send_reject_ext_ind(conn, ctx, evt, param);
1058 break;
1059 }
1060
1061 rp_cu_st_wait_conn_param_req_available(conn, ctx, evt, param);
1062 break;
1063 default:
1064 /* Ignore other evts */
1065 break;
1066 }
1067 }
1068
rp_cu_state_wait_ntf_conn_param_req(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1069 static void rp_cu_state_wait_ntf_conn_param_req(struct ll_conn *conn, struct proc_ctx *ctx,
1070 uint8_t evt, void *param)
1071 {
1072 switch (evt) {
1073 case RP_CU_EVT_RUN:
1074 rp_cu_send_conn_param_req_ntf(conn, ctx, evt, param);
1075 break;
1076 default:
1077 /* Ignore other evts */
1078 break;
1079 }
1080 }
1081
rp_cu_state_wait_conn_param_req_reply(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1082 static void rp_cu_state_wait_conn_param_req_reply(struct ll_conn *conn, struct proc_ctx *ctx,
1083 uint8_t evt, void *param)
1084 {
1085 switch (evt) {
1086 case RP_CU_EVT_CONN_PARAM_REQ_REPLY:
1087 /* Continue procedure in next prepare run */
1088 ctx->state = RP_CU_STATE_WAIT_CONN_PARAM_REQ_REPLY_CONTINUE;
1089 break;
1090 case RP_CU_EVT_CONN_PARAM_REQ_NEG_REPLY:
1091 /* Send reject in next prepare run */
1092 ctx->data.cu.rejected_opcode = PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ;
1093 ctx->state = RP_CU_STATE_WAIT_TX_REJECT_EXT_IND;
1094 break;
1095 default:
1096 /* Ignore other evts */
1097 break;
1098 }
1099 }
1100
rp_cu_state_wait_conn_param_req_reply_continue(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1101 static void rp_cu_state_wait_conn_param_req_reply_continue(struct ll_conn *conn,
1102 struct proc_ctx *ctx, uint8_t evt,
1103 void *param)
1104 {
1105 switch (evt) {
1106 case RP_CU_EVT_RUN:
1107 if (conn->lll.role == BT_HCI_ROLE_CENTRAL) {
1108 /* Ensure that node_ref does not indicate RX node for piggyback */
1109 ctx->node_ref.rx = NULL;
1110 rp_cu_send_conn_update_ind(conn, ctx, evt, param);
1111 } else if (conn->lll.role == BT_HCI_ROLE_PERIPHERAL) {
1112 if (!ctx->data.cu.error) {
1113 rp_cu_send_conn_param_rsp(conn, ctx, evt, param);
1114 } else {
1115 ctx->data.cu.rejected_opcode = PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ;
1116 rp_cu_send_reject_ext_ind(conn, ctx, evt, param);
1117
1118 }
1119 } else {
1120 /* Unknown role */
1121 LL_ASSERT(0);
1122 }
1123 break;
1124 default:
1125 /* Ignore other evts */
1126 break;
1127 }
1128 }
1129
rp_cu_state_wait_tx_reject_ext_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1130 static void rp_cu_state_wait_tx_reject_ext_ind(struct ll_conn *conn, struct proc_ctx *ctx,
1131 uint8_t evt, void *param)
1132 {
1133 switch (evt) {
1134 case RP_CU_EVT_RUN:
1135 rp_cu_send_reject_ext_ind(conn, ctx, evt, param);
1136 break;
1137 default:
1138 /* Ignore other evts */
1139 break;
1140 }
1141 }
1142
rp_cu_st_wait_tx_conn_param_rsp(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1143 static void rp_cu_st_wait_tx_conn_param_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1144 void *param)
1145 {
1146 switch (evt) {
1147 case RP_CU_EVT_RUN:
1148 rp_cu_send_conn_param_rsp(conn, ctx, evt, param);
1149 break;
1150 default:
1151 /* Ignore other evts */
1152 break;
1153 }
1154 }
1155
1156 #if defined(CONFIG_BT_CTLR_USER_CPR_ANCHOR_POINT_MOVE)
rp_cu_st_wait_user_response(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1157 static void rp_cu_st_wait_user_response(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1158 void *param)
1159 {
1160 switch (evt) {
1161 case RP_CU_EVT_CONN_PARAM_REQ_USER_REPLY:
1162 /* Continue procedure in next prepare run */
1163 ctx->state = RP_CU_STATE_WAIT_CONN_PARAM_REQ_REPLY_CONTINUE;
1164 break;
1165 default:
1166 /* Ignore other evts */
1167 break;
1168 }
1169 }
1170 #endif /* CONFIG_BT_CTLR_USER_CPR_ANCHOR_POINT_MOVE */
1171 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1172
rp_cu_st_wait_tx_conn_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1173 static void rp_cu_st_wait_tx_conn_update_ind(struct ll_conn *conn, struct proc_ctx *ctx,
1174 uint8_t evt, void *param)
1175 {
1176 switch (evt) {
1177 case RP_CU_EVT_RUN:
1178 rp_cu_send_conn_update_ind(conn, ctx, evt, param);
1179 break;
1180 default:
1181 /* Ignore other evts */
1182 break;
1183 }
1184 }
1185
rp_cu_check_instant(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1186 static void rp_cu_check_instant(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1187 void *param)
1188 {
1189 uint16_t event_counter = ull_conn_event_counter(conn);
1190
1191 if (is_instant_reached_or_passed(ctx->data.cu.instant, event_counter)) {
1192 bool notify;
1193
1194 /* Procedure is complete when the instant has passed, and the
1195 * new connection event parameters have been applied.
1196 */
1197 cu_update_conn_parameters(conn, ctx);
1198
1199 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1200 if (ctx->proc == PROC_CONN_PARAM_REQ) {
1201 /* Stop procedure response timeout timer */
1202 llcp_rr_prt_stop(conn);
1203 }
1204 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1205
1206 notify = cu_should_notify_host(ctx);
1207 if (notify) {
1208 ctx->data.cu.error = BT_HCI_ERR_SUCCESS;
1209 cu_ntf(conn, ctx);
1210 } else {
1211 /* Release RX node kept for NTF */
1212 llcp_rx_node_release(ctx);
1213 ctx->node_ref.rx = NULL;
1214 }
1215 rp_cu_complete(conn, ctx);
1216 }
1217 }
1218
rp_cu_st_wait_rx_conn_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1219 static void rp_cu_st_wait_rx_conn_update_ind(struct ll_conn *conn, struct proc_ctx *ctx,
1220 uint8_t evt, void *param)
1221 {
1222 switch (evt) {
1223 case RP_CU_EVT_CONN_UPDATE_IND:
1224 switch (conn->lll.role) {
1225 case BT_HCI_ROLE_CENTRAL:
1226 ctx->unknown_response.type = PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND;
1227 rp_cu_send_unknown_rsp(conn, ctx, evt, param);
1228 break;
1229 case BT_HCI_ROLE_PERIPHERAL:
1230 llcp_pdu_decode_conn_update_ind(ctx, param);
1231
1232 /* Valid PDU */
1233 if (cu_check_conn_ind_parameters(conn, ctx)) {
1234 if (is_instant_not_passed(ctx->data.cu.instant,
1235 ull_conn_event_counter(conn))) {
1236 /* Keep RX node to use for NTF */
1237 llcp_rx_node_retain(ctx);
1238
1239 ctx->state = RP_CU_STATE_WAIT_INSTANT;
1240
1241 /* In case we only just received it in time */
1242 rp_cu_check_instant(conn, ctx, evt, param);
1243 break;
1244 }
1245
1246 conn->llcp_terminate.reason_final = BT_HCI_ERR_INSTANT_PASSED;
1247 } else {
1248 conn->llcp_terminate.reason_final = BT_HCI_ERR_INVALID_LL_PARAM;
1249 }
1250
1251 llcp_rr_complete(conn);
1252 ctx->state = RP_CU_STATE_IDLE;
1253 break;
1254 default:
1255 /* Unknown role */
1256 LL_ASSERT(0);
1257 }
1258 default:
1259 /* Ignore other evts */
1260 break;
1261 }
1262 }
1263
rp_cu_st_wait_instant(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1264 static void rp_cu_st_wait_instant(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1265 void *param)
1266 {
1267 switch (evt) {
1268 case RP_CU_EVT_RUN:
1269 rp_cu_check_instant(conn, ctx, evt, param);
1270 break;
1271 default:
1272 /* Ignore other evts */
1273 break;
1274 }
1275 }
1276
rp_cu_execute_fsm(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1277 static void rp_cu_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
1278 {
1279 switch (ctx->state) {
1280 case RP_CU_STATE_IDLE:
1281 rp_cu_st_idle(conn, ctx, evt, param);
1282 break;
1283 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1284 case RP_CU_STATE_WAIT_RX_CONN_PARAM_REQ:
1285 rp_cu_st_wait_rx_conn_param_req(conn, ctx, evt, param);
1286 break;
1287 case RP_CU_STATE_WAIT_CONN_PARAM_REQ_AVAILABLE:
1288 rp_cu_st_wait_conn_param_req_available(conn, ctx, evt, param);
1289 break;
1290 case RP_CU_STATE_WAIT_NTF_CONN_PARAM_REQ:
1291 rp_cu_state_wait_ntf_conn_param_req(conn, ctx, evt, param);
1292 break;
1293 case RP_CU_STATE_WAIT_CONN_PARAM_REQ_REPLY:
1294 rp_cu_state_wait_conn_param_req_reply(conn, ctx, evt, param);
1295 break;
1296 case RP_CU_STATE_WAIT_CONN_PARAM_REQ_REPLY_CONTINUE:
1297 rp_cu_state_wait_conn_param_req_reply_continue(conn, ctx, evt, param);
1298 break;
1299 case RP_CU_STATE_WAIT_TX_REJECT_EXT_IND:
1300 rp_cu_state_wait_tx_reject_ext_ind(conn, ctx, evt, param);
1301 break;
1302 case RP_CU_STATE_WAIT_TX_CONN_PARAM_RSP:
1303 rp_cu_st_wait_tx_conn_param_rsp(conn, ctx, evt, param);
1304 break;
1305 #if defined(CONFIG_BT_CTLR_USER_CPR_ANCHOR_POINT_MOVE)
1306 case RP_CU_STATE_WAIT_USER_REPLY:
1307 rp_cu_st_wait_user_response(conn, ctx, evt, param);
1308 break;
1309 #endif /* CONFIG_BT_CTLR_USER_CPR_ANCHOR_POINT_MOVE */
1310 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1311 case RP_CU_STATE_WAIT_TX_CONN_UPDATE_IND:
1312 rp_cu_st_wait_tx_conn_update_ind(conn, ctx, evt, param);
1313 break;
1314 case RP_CU_STATE_WAIT_RX_CONN_UPDATE_IND:
1315 rp_cu_st_wait_rx_conn_update_ind(conn, ctx, evt, param);
1316 break;
1317 case RP_CU_STATE_WAIT_INSTANT:
1318 rp_cu_st_wait_instant(conn, ctx, evt, param);
1319 break;
1320 case RP_CU_STATE_WAIT_NTF_AVAIL:
1321 rp_cu_st_wait_ntf_avail(conn, ctx, evt, param);
1322 break;
1323 default:
1324 /* Unknown state */
1325 LL_ASSERT(0);
1326 break;
1327 }
1328 }
1329
llcp_rp_cu_rx(struct ll_conn * conn,struct proc_ctx * ctx,struct node_rx_pdu * rx)1330 void llcp_rp_cu_rx(struct ll_conn *conn, struct proc_ctx *ctx, struct node_rx_pdu *rx)
1331 {
1332 struct pdu_data *pdu = (struct pdu_data *)rx->pdu;
1333
1334 switch (pdu->llctrl.opcode) {
1335 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
1336 case PDU_DATA_LLCTRL_TYPE_CONN_PARAM_REQ:
1337 rp_cu_execute_fsm(conn, ctx, RP_CU_EVT_CONN_PARAM_REQ, pdu);
1338 break;
1339 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1340 case PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND:
1341 rp_cu_execute_fsm(conn, ctx, RP_CU_EVT_CONN_UPDATE_IND, pdu);
1342 break;
1343 default:
1344 /* Invalid behaviour */
1345 /* Invalid PDU received so terminate connection */
1346 conn->llcp_terminate.reason_final = BT_HCI_ERR_LMP_PDU_NOT_ALLOWED;
1347 rp_cu_complete(conn, ctx);
1348 break;
1349 }
1350 }
1351
llcp_rp_cu_run(struct ll_conn * conn,struct proc_ctx * ctx,void * param)1352 void llcp_rp_cu_run(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
1353 {
1354 rp_cu_execute_fsm(conn, ctx, RP_CU_EVT_RUN, param);
1355 }
1356
llcp_rp_cu_awaiting_instant(struct proc_ctx * ctx)1357 bool llcp_rp_cu_awaiting_instant(struct proc_ctx *ctx)
1358 {
1359 return (ctx->state == RP_CU_STATE_WAIT_INSTANT);
1360 }
1361
llcp_lp_cu_awaiting_instant(struct proc_ctx * ctx)1362 bool llcp_lp_cu_awaiting_instant(struct proc_ctx *ctx)
1363 {
1364 return (ctx->state == LP_CU_STATE_WAIT_INSTANT);
1365 }
1366
1367 #if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
llcp_rp_conn_param_req_reply(struct ll_conn * conn,struct proc_ctx * ctx)1368 void llcp_rp_conn_param_req_reply(struct ll_conn *conn, struct proc_ctx *ctx)
1369 {
1370 rp_cu_execute_fsm(conn, ctx, RP_CU_EVT_CONN_PARAM_REQ_REPLY, NULL);
1371 }
1372
llcp_rp_conn_param_req_neg_reply(struct ll_conn * conn,struct proc_ctx * ctx)1373 void llcp_rp_conn_param_req_neg_reply(struct ll_conn *conn, struct proc_ctx *ctx)
1374 {
1375 rp_cu_execute_fsm(conn, ctx, RP_CU_EVT_CONN_PARAM_REQ_NEG_REPLY, NULL);
1376 }
1377
1378 #if defined(CONFIG_BT_CTLR_USER_CPR_ANCHOR_POINT_MOVE)
llcp_rp_conn_param_req_apm_awaiting_reply(struct proc_ctx * ctx)1379 bool llcp_rp_conn_param_req_apm_awaiting_reply(struct proc_ctx *ctx)
1380 {
1381 return (ctx->state == RP_CU_STATE_WAIT_USER_REPLY);
1382 }
1383
llcp_rp_conn_param_req_apm_reply(struct ll_conn * conn,struct proc_ctx * ctx)1384 void llcp_rp_conn_param_req_apm_reply(struct ll_conn *conn, struct proc_ctx *ctx)
1385 {
1386 rp_cu_execute_fsm(conn, ctx, RP_CU_EVT_CONN_PARAM_REQ_USER_REPLY, NULL);
1387 }
1388 #endif /* CONFIG_BT_CTLR_USER_CPR_ANCHOR_POINT_MOVE */
1389 #endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
1390