1 /*
2 * Copyright (c) 2020 Demant
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8
9 #include <zephyr/sys/byteorder.h>
10 #include <zephyr/sys/slist.h>
11 #include <zephyr/sys/util.h>
12
13 #include <zephyr/bluetooth/hci_types.h>
14
15 #include "hal/ccm.h"
16
17 #include "util/util.h"
18 #include "util/mem.h"
19 #include "util/memq.h"
20 #include "util/dbuf.h"
21
22 #include "pdu_df.h"
23 #include "lll/pdu_vendor.h"
24 #include "pdu.h"
25
26 #include "ll.h"
27 #include "ll_settings.h"
28
29 #include "lll.h"
30 #include "ll_feat.h"
31 #include "lll/lll_df_types.h"
32 #include "lll_conn.h"
33 #include "lll_conn_iso.h"
34
35 #include "ull_tx_queue.h"
36
37 #include "isoal.h"
38 #include "ull_iso_types.h"
39 #include "ull_conn_iso_types.h"
40 #include "ull_conn_iso_internal.h"
41
42 #include "ull_conn_types.h"
43 #include "ull_internal.h"
44 #include "ull_llcp.h"
45 #include "ull_llcp_features.h"
46 #include "ull_llcp_internal.h"
47 #include "ull_conn_internal.h"
48
49 #include <soc.h>
50 #include "hal/debug.h"
51
52 /* LLCP Local Procedure PHY Update FSM states */
53 enum {
54 LP_PU_STATE_IDLE = LLCP_STATE_IDLE,
55 LP_PU_STATE_WAIT_TX_PHY_REQ,
56 LP_PU_STATE_WAIT_TX_ACK_PHY_REQ,
57 LP_PU_STATE_WAIT_RX_PHY_RSP,
58 LP_PU_STATE_WAIT_TX_PHY_UPDATE_IND,
59 LP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND,
60 LP_PU_STATE_WAIT_RX_PHY_UPDATE_IND,
61 LP_PU_STATE_WAIT_NTF_AVAIL,
62 LP_PU_STATE_WAIT_INSTANT,
63 LP_PU_STATE_WAIT_INSTANT_ON_AIR,
64 };
65
66 /* LLCP Local Procedure PHY Update FSM events */
67 enum {
68 /* Procedure run */
69 LP_PU_EVT_RUN,
70
71 /* Response received */
72 LP_PU_EVT_PHY_RSP,
73
74 /* Indication received */
75 LP_PU_EVT_PHY_UPDATE_IND,
76
77 /* Ack received */
78 LP_PU_EVT_ACK,
79
80 /* Ready to notify host */
81 LP_PU_EVT_NTF,
82
83 /* Reject response received */
84 LP_PU_EVT_REJECT,
85
86 /* Unknown response received */
87 LP_PU_EVT_UNKNOWN,
88 };
89
90 /* LLCP Remote Procedure PHY Update FSM states */
91 enum {
92 RP_PU_STATE_IDLE = LLCP_STATE_IDLE,
93 RP_PU_STATE_WAIT_RX_PHY_REQ,
94 RP_PU_STATE_WAIT_TX_PHY_RSP,
95 RP_PU_STATE_WAIT_TX_ACK_PHY_RSP,
96 RP_PU_STATE_WAIT_TX_PHY_UPDATE_IND,
97 RP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND,
98 RP_PU_STATE_WAIT_RX_PHY_UPDATE_IND,
99 RP_PU_STATE_WAIT_NTF_AVAIL,
100 RP_PU_STATE_WAIT_INSTANT,
101 RP_PU_STATE_WAIT_INSTANT_ON_AIR,
102 };
103
104 /* LLCP Remote Procedure PHY Update FSM events */
105 enum {
106 /* Procedure run */
107 RP_PU_EVT_RUN,
108
109 /* Request received */
110 RP_PU_EVT_PHY_REQ,
111
112 /* Ack received */
113 RP_PU_EVT_ACK,
114
115 /* Indication received */
116 RP_PU_EVT_PHY_UPDATE_IND,
117
118 /* Ready to notify host */
119 RP_PU_EVT_NTF,
120 };
121
122 /* Hardcoded instant delta +6 */
123 #define PHY_UPDATE_INSTANT_DELTA 6
124
125 #if defined(CONFIG_BT_CENTRAL)
126 /* PHY preference order*/
127 #define PHY_PREF_1 PHY_2M
128 #define PHY_PREF_2 PHY_1M
129 #define PHY_PREF_3 PHY_CODED
130
pu_select_phy(uint8_t phys)131 static inline uint8_t pu_select_phy(uint8_t phys)
132 {
133 /* select only one phy, select preferred */
134 if (phys & PHY_PREF_1) {
135 return PHY_PREF_1;
136 } else if (phys & PHY_PREF_2) {
137 return PHY_PREF_2;
138 } else if (phys & PHY_PREF_3) {
139 return PHY_PREF_3;
140 } else {
141 return 0U;
142 }
143 }
144
pu_prep_update_ind(struct ll_conn * conn,struct proc_ctx * ctx)145 static void pu_prep_update_ind(struct ll_conn *conn, struct proc_ctx *ctx)
146 {
147 ctx->data.pu.tx = pu_select_phy(ctx->data.pu.tx);
148 ctx->data.pu.rx = pu_select_phy(ctx->data.pu.rx);
149
150 if (ctx->data.pu.tx != conn->lll.phy_tx) {
151 ctx->data.pu.c_to_p_phy = ctx->data.pu.tx;
152 } else {
153 ctx->data.pu.c_to_p_phy = 0U;
154 }
155 if (ctx->data.pu.rx != conn->lll.phy_rx) {
156 ctx->data.pu.p_to_c_phy = ctx->data.pu.rx;
157 } else {
158 ctx->data.pu.p_to_c_phy = 0U;
159 }
160 }
161 #endif /* CONFIG_BT_CENTRAL */
162
163 #if defined(CONFIG_BT_PERIPHERAL)
pu_select_phy_timing_restrict(struct ll_conn * conn,uint8_t phy_tx)164 static uint8_t pu_select_phy_timing_restrict(struct ll_conn *conn, uint8_t phy_tx)
165 {
166 /* select the probable PHY with longest Tx time, which
167 * will be restricted to fit current
168 * connEffectiveMaxTxTime.
169 */
170 /* Note - entry 0 in table is unused, so 0 on purpose */
171 uint8_t phy_tx_time[8] = { 0, PHY_1M, PHY_2M, PHY_1M,
172 PHY_CODED, PHY_CODED, PHY_CODED, PHY_CODED };
173 struct lll_conn *lll = &conn->lll;
174 const uint8_t phys = phy_tx | lll->phy_tx;
175
176 return phy_tx_time[phys];
177 }
178 #endif /* CONFIG_BT_PERIPHERAL */
179
pu_set_timing_restrict(struct ll_conn * conn,uint8_t phy_tx)180 static void pu_set_timing_restrict(struct ll_conn *conn, uint8_t phy_tx)
181 {
182 struct lll_conn *lll = &conn->lll;
183
184 lll->phy_tx_time = phy_tx;
185 }
186
pu_reset_timing_restrict(struct ll_conn * conn)187 static void pu_reset_timing_restrict(struct ll_conn *conn)
188 {
189 pu_set_timing_restrict(conn, conn->lll.phy_tx);
190 }
191
192 #if defined(CONFIG_BT_PERIPHERAL)
phy_validation_check_phy_ind(uint8_t phy)193 static inline bool phy_validation_check_phy_ind(uint8_t phy)
194 {
195 /* This is equivalent to:
196 * maximum one bit set, and no bit set is rfu's
197 */
198 return (phy < 5 && phy != 3);
199 }
200
pu_check_update_ind(struct ll_conn * conn,struct proc_ctx * ctx)201 static uint8_t pu_check_update_ind(struct ll_conn *conn, struct proc_ctx *ctx)
202 {
203 uint8_t ret = 0;
204
205 /* Check if either phy selected is invalid */
206 if (!phy_validation_check_phy_ind(ctx->data.pu.c_to_p_phy) ||
207 !phy_validation_check_phy_ind(ctx->data.pu.p_to_c_phy)) {
208 /* more than one or any rfu bit selected in either phy */
209 ctx->data.pu.error = BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
210 ret = 1;
211 }
212
213 /* Both tx and rx PHY unchanged */
214 if (!((ctx->data.pu.c_to_p_phy | ctx->data.pu.p_to_c_phy) & 0x07)) {
215 /* if no phy changes, quit procedure, and possibly signal host */
216 ctx->data.pu.error = BT_HCI_ERR_SUCCESS;
217 ret = 1;
218 } else {
219 /* if instant already passed, quit procedure with error */
220 if (is_instant_reached_or_passed(ctx->data.pu.instant,
221 ull_conn_event_counter(conn))) {
222 ctx->data.pu.error = BT_HCI_ERR_INSTANT_PASSED;
223 ret = 1;
224 }
225 }
226 return ret;
227 }
228 #endif /* CONFIG_BT_PERIPHERAL */
229
pu_apply_phy_update(struct ll_conn * conn,struct proc_ctx * ctx)230 static uint8_t pu_apply_phy_update(struct ll_conn *conn, struct proc_ctx *ctx)
231 {
232 struct lll_conn *lll = &conn->lll;
233 uint8_t phy_bitmask = PHY_1M;
234 const uint8_t old_tx = lll->phy_tx;
235 const uint8_t old_rx = lll->phy_rx;
236
237 #if defined(CONFIG_BT_CTLR_PHY_2M)
238 phy_bitmask |= PHY_2M;
239 #endif
240 #if defined(CONFIG_BT_CTLR_PHY_CODED)
241 phy_bitmask |= PHY_CODED;
242 #endif
243 const uint8_t p_to_c_phy = ctx->data.pu.p_to_c_phy & phy_bitmask;
244 const uint8_t c_to_p_phy = ctx->data.pu.c_to_p_phy & phy_bitmask;
245
246 if (0) {
247 #if defined(CONFIG_BT_PERIPHERAL)
248 } else if (lll->role == BT_HCI_ROLE_PERIPHERAL) {
249 if (p_to_c_phy) {
250 lll->phy_tx = p_to_c_phy;
251 }
252 if (c_to_p_phy) {
253 lll->phy_rx = c_to_p_phy;
254 }
255 #endif /* CONFIG_BT_PERIPHERAL */
256 #if defined(CONFIG_BT_CENTRAL)
257 } else if (lll->role == BT_HCI_ROLE_CENTRAL) {
258 if (p_to_c_phy) {
259 lll->phy_rx = p_to_c_phy;
260 }
261 if (c_to_p_phy) {
262 lll->phy_tx = c_to_p_phy;
263 }
264 #endif /* CONFIG_BT_CENTRAL */
265 }
266
267 return ((old_tx != lll->phy_tx) || (old_rx != lll->phy_rx));
268 }
269
270 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
pu_calc_eff_time(uint8_t max_octets,uint8_t phy,uint16_t default_time)271 static uint16_t pu_calc_eff_time(uint8_t max_octets, uint8_t phy, uint16_t default_time)
272 {
273 uint16_t payload_time = PDU_DC_MAX_US(max_octets, phy);
274 uint16_t eff_time;
275
276 eff_time = MAX(PDU_DC_PAYLOAD_TIME_MIN, payload_time);
277 eff_time = MIN(eff_time, default_time);
278 #if defined(CONFIG_BT_CTLR_PHY_CODED)
279 eff_time = MAX(eff_time, PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, phy));
280 #endif
281
282 return eff_time;
283 }
284
pu_update_eff_times(struct ll_conn * conn,struct proc_ctx * ctx)285 static uint8_t pu_update_eff_times(struct ll_conn *conn, struct proc_ctx *ctx)
286 {
287 struct lll_conn *lll = &conn->lll;
288 uint16_t eff_tx_time = lll->dle.eff.max_tx_time;
289 uint16_t eff_rx_time = lll->dle.eff.max_rx_time;
290 uint16_t max_rx_time, max_tx_time;
291
292 ull_dle_max_time_get(conn, &max_rx_time, &max_tx_time);
293
294 if ((ctx->data.pu.p_to_c_phy && (lll->role == BT_HCI_ROLE_PERIPHERAL)) ||
295 (ctx->data.pu.c_to_p_phy && (lll->role == BT_HCI_ROLE_CENTRAL))) {
296 eff_tx_time =
297 pu_calc_eff_time(lll->dle.eff.max_tx_octets, lll->phy_tx, max_tx_time);
298 }
299 if ((ctx->data.pu.p_to_c_phy && (lll->role == BT_HCI_ROLE_CENTRAL)) ||
300 (ctx->data.pu.c_to_p_phy && (lll->role == BT_HCI_ROLE_PERIPHERAL))) {
301 eff_rx_time =
302 pu_calc_eff_time(lll->dle.eff.max_rx_octets, lll->phy_rx, max_rx_time);
303 }
304
305 if ((eff_tx_time > lll->dle.eff.max_tx_time) ||
306 (lll->dle.eff.max_tx_time > max_tx_time) ||
307 (eff_rx_time > lll->dle.eff.max_rx_time) ||
308 (lll->dle.eff.max_rx_time > max_rx_time)) {
309 lll->dle.eff.max_tx_time = eff_tx_time;
310 lll->dle.eff.max_rx_time = eff_rx_time;
311 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
312 lll->evt_len_upd = 1U;
313 #endif /* CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE */
314 return 1U;
315 }
316
317 return 0U;
318 }
319 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
320
pu_set_preferred_phys(struct ll_conn * conn,struct proc_ctx * ctx)321 static inline void pu_set_preferred_phys(struct ll_conn *conn, struct proc_ctx *ctx)
322 {
323 conn->phy_pref_rx = ctx->data.pu.rx;
324 conn->phy_pref_tx = ctx->data.pu.tx;
325
326 /*
327 * Note: Since 'flags' indicate local coded phy preference (S2 or S8) and
328 * this is not negotiated with the peer, it is simply reconfigured in conn->lll when
329 * the update is initiated, and takes effect whenever the coded phy is in use.
330 */
331 conn->lll.phy_flags = ctx->data.pu.flags;
332 }
333
pu_combine_phys(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t tx,uint8_t rx)334 static inline void pu_combine_phys(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t tx,
335 uint8_t rx)
336 {
337 /* Combine requested phys with locally preferred phys */
338 ctx->data.pu.rx &= rx;
339 ctx->data.pu.tx &= tx;
340 /* If either tx or rx is 'no change' at this point we force both to no change to
341 * comply with the spec
342 * Spec. BT5.2 Vol6, Part B, section 5.1.10:
343 * The remainder of this section shall apply irrespective of which device initiated
344 * the procedure.
345 *
346 * Irrespective of the above rules, the central may leave both directions
347 * unchanged. If the periph specified a single PHY in both the TX_PHYS and
348 * RX_PHYS fields and both fields are the same, the central shall either select
349 * the PHY specified by the periph for both directions or shall leave both directions
350 * unchanged.
351 */
352 if (conn->lll.role == BT_HCI_ROLE_CENTRAL && (!ctx->data.pu.rx || !ctx->data.pu.tx)) {
353 ctx->data.pu.tx = 0;
354 ctx->data.pu.rx = 0;
355 }
356 }
357
358 #if defined(CONFIG_BT_CENTRAL)
pu_prepare_instant(struct ll_conn * conn,struct proc_ctx * ctx)359 static void pu_prepare_instant(struct ll_conn *conn, struct proc_ctx *ctx)
360 {
361 /* Set instance only in case there is actual PHY change. Otherwise the instant should be
362 * set to 0.
363 */
364 if (ctx->data.pu.c_to_p_phy != 0 || ctx->data.pu.p_to_c_phy != 0) {
365 ctx->data.pu.instant = ull_conn_event_counter(conn) + conn->lll.latency +
366 PHY_UPDATE_INSTANT_DELTA;
367 } else {
368 ctx->data.pu.instant = 0;
369 }
370 }
371 #endif /* CONFIG_BT_CENTRAL */
372
373 /*
374 * LLCP Local Procedure PHY Update FSM
375 */
376
lp_pu_tx(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)377 static void lp_pu_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
378 {
379 struct node_tx *tx;
380 struct pdu_data *pdu;
381
382 LL_ASSERT(ctx->node_ref.tx);
383
384 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
385 if (!((ctx->tx_opcode == PDU_DATA_LLCTRL_TYPE_PHY_REQ) &&
386 (conn->lll.role == BT_HCI_ROLE_CENTRAL))) {
387 if (!llcp_ntf_alloc_is_available()) {
388 /* No NTF nodes avail, so we need to hold off TX */
389 ctx->state = LP_PU_STATE_WAIT_NTF_AVAIL;
390 return;
391 }
392 ctx->data.pu.ntf_dle_node = llcp_ntf_alloc();
393 LL_ASSERT(ctx->data.pu.ntf_dle_node);
394 }
395 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
396
397 tx = ctx->node_ref.tx;
398 ctx->node_ref.tx = NULL;
399 ctx->node_ref.tx_ack = tx;
400 pdu = (struct pdu_data *)tx->pdu;
401
402 /* Encode LL Control PDU */
403 switch (ctx->tx_opcode) {
404 case PDU_DATA_LLCTRL_TYPE_PHY_REQ:
405 pu_set_preferred_phys(conn, ctx);
406 llcp_pdu_encode_phy_req(ctx, pdu);
407 llcp_tx_pause_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
408 ctx->state = LP_PU_STATE_WAIT_TX_ACK_PHY_REQ;
409 break;
410 #if defined(CONFIG_BT_CENTRAL)
411 case PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND:
412 pu_prep_update_ind(conn, ctx);
413 pu_prepare_instant(conn, ctx);
414 llcp_pdu_encode_phy_update_ind(ctx, pdu);
415 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
416 ctx->state = LP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND;
417 break;
418 #endif /* CONFIG_BT_CENTRAL */
419 default:
420 LL_ASSERT(0);
421 }
422
423 /* Enqueue LL Control PDU towards LLL */
424 llcp_tx_enqueue(conn, tx);
425
426 /* Restart procedure response timeout timer */
427 llcp_lr_prt_restart(conn);
428 }
429
pu_ntf(struct ll_conn * conn,struct proc_ctx * ctx)430 static void pu_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
431 {
432 struct node_rx_pdu *ntf;
433 struct node_rx_pu *pdu;
434
435 /* Piggy-back on stored RX node */
436 ntf = ctx->node_ref.rx;
437 ctx->node_ref.rx = NULL;
438 LL_ASSERT(ntf);
439
440 if (ctx->data.pu.ntf_pu) {
441 LL_ASSERT(ntf->hdr.type == NODE_RX_TYPE_RETAIN);
442 ntf->hdr.type = NODE_RX_TYPE_PHY_UPDATE;
443 ntf->hdr.handle = conn->lll.handle;
444 pdu = (struct node_rx_pu *)ntf->pdu;
445
446 pdu->status = ctx->data.pu.error;
447 pdu->rx = conn->lll.phy_rx;
448 pdu->tx = conn->lll.phy_tx;
449 } else {
450 ntf->hdr.type = NODE_RX_TYPE_RELEASE;
451 }
452
453 /* Enqueue notification towards LL */
454 ll_rx_put_sched(ntf->hdr.link, ntf);
455
456 ctx->data.pu.ntf_pu = 0;
457 }
458
459 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
pu_dle_ntf(struct ll_conn * conn,struct proc_ctx * ctx)460 static void pu_dle_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
461 {
462 struct node_rx_pdu *ntf;
463 struct pdu_data *pdu;
464
465 /* Retrieve DLE ntf node */
466 ntf = ctx->data.pu.ntf_dle_node;
467
468 if (!ctx->data.pu.ntf_dle) {
469 if (!ntf) {
470 /* If no DLE ntf was pre-allocated there is nothing more to do */
471 /* This will happen in case of a completion on UNKNOWN_RSP to PHY_REQ
472 * in Central case.
473 */
474 return;
475 }
476 /* Signal to release pre-allocated node in case there is no DLE ntf */
477 ntf->hdr.type = NODE_RX_TYPE_RELEASE;
478 } else {
479 LL_ASSERT(ntf);
480
481 ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
482 ntf->hdr.handle = conn->lll.handle;
483 pdu = (struct pdu_data *)ntf->pdu;
484
485 llcp_ntf_encode_length_change(conn, pdu);
486 }
487
488 /* Enqueue notification towards LL */
489 ll_rx_put_sched(ntf->hdr.link, ntf);
490
491 ctx->data.pu.ntf_dle = 0;
492 ctx->data.pu.ntf_dle_node = NULL;
493 }
494 #endif
495
lp_pu_complete_finalize(struct ll_conn * conn,struct proc_ctx * ctx)496 static void lp_pu_complete_finalize(struct ll_conn *conn, struct proc_ctx *ctx)
497 {
498 llcp_lr_complete(conn);
499 llcp_rr_set_paused_cmd(conn, PROC_NONE);
500 ctx->state = LP_PU_STATE_IDLE;
501 }
502
lp_pu_tx_ntf(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)503 static void lp_pu_tx_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
504 {
505 pu_ntf(conn, ctx);
506 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
507 pu_dle_ntf(conn, ctx);
508 #endif
509 lp_pu_complete_finalize(conn, ctx);
510 }
511
lp_pu_complete(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)512 static void lp_pu_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
513 {
514 pu_reset_timing_restrict(conn);
515
516 /* Postpone procedure completion (and possible NTF generation) to actual 'air instant'
517 * Since LLCP STM is driven from LLL prepare this actually happens BEFORE instant
518 * and thus NTFs are generated and propagated up prior to actual instant on air.
519 * Instead postpone completion/NTF to the beginning of RX handling
520 */
521 ctx->state = LP_PU_STATE_WAIT_INSTANT_ON_AIR;
522 }
523
lp_pu_send_phy_req(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)524 static void lp_pu_send_phy_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
525 {
526 if (llcp_lr_ispaused(conn) || llcp_rr_get_collision(conn) ||
527 !llcp_tx_alloc_peek(conn, ctx) ||
528 (llcp_rr_get_paused_cmd(conn) == PROC_PHY_UPDATE)) {
529 ctx->state = LP_PU_STATE_WAIT_TX_PHY_REQ;
530 } else {
531 llcp_rr_set_incompat(conn, INCOMPAT_RESOLVABLE);
532 llcp_rr_set_paused_cmd(conn, PROC_CTE_REQ);
533 ctx->tx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_REQ;
534
535 /* Allocate TX node */
536 ctx->node_ref.tx = llcp_tx_alloc(conn, ctx);
537 lp_pu_tx(conn, ctx, evt, param);
538 }
539 }
540
541 #if defined(CONFIG_BT_CENTRAL)
lp_pu_send_phy_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)542 static void lp_pu_send_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
543 void *param)
544 {
545 if (llcp_lr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx)) {
546 ctx->state = LP_PU_STATE_WAIT_TX_PHY_UPDATE_IND;
547 } else {
548 ctx->tx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND;
549
550 /* Allocate TX node */
551 ctx->node_ref.tx = llcp_tx_alloc(conn, ctx);
552 lp_pu_tx(conn, ctx, evt, param);
553 }
554 }
555 #endif /* CONFIG_BT_CENTRAL */
556
lp_pu_st_idle(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)557 static void lp_pu_st_idle(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
558 {
559 switch (evt) {
560 case LP_PU_EVT_RUN:
561 lp_pu_send_phy_req(conn, ctx, evt, param);
562 break;
563 default:
564 /* Ignore other evts */
565 break;
566 }
567 }
568
lp_pu_st_wait_tx_phy_req(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)569 static void lp_pu_st_wait_tx_phy_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
570 void *param)
571 {
572 switch (evt) {
573 case LP_PU_EVT_RUN:
574 lp_pu_send_phy_req(conn, ctx, evt, param);
575 break;
576 default:
577 /* Ignore other evts */
578 break;
579 }
580 }
581
582 #if defined(CONFIG_BT_CENTRAL)
lp_pu_st_wait_rx_phy_rsp(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)583 static void lp_pu_st_wait_rx_phy_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
584 void *param)
585 {
586 switch (evt) {
587 case LP_PU_EVT_PHY_RSP:
588 llcp_rr_set_incompat(conn, INCOMPAT_RESERVED);
589 /* 'Prefer' the phys from the REQ */
590 uint8_t tx_pref = ctx->data.pu.tx;
591 uint8_t rx_pref = ctx->data.pu.rx;
592
593 llcp_pdu_decode_phy_rsp(ctx, (struct pdu_data *)param);
594 /* Pause data tx */
595 llcp_tx_pause_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
596 /* Combine with the 'Preferred' phys */
597 pu_combine_phys(conn, ctx, tx_pref, rx_pref);
598
599 /* Mark RX node to NOT release */
600 llcp_rx_node_retain(ctx);
601
602 lp_pu_send_phy_update_ind(conn, ctx, evt, param);
603 break;
604 case LP_PU_EVT_UNKNOWN:
605 llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
606 /* Unsupported in peer, so disable locally for this connection
607 * Peer does not accept PHY UPDATE, so disable non 1M phys on current connection
608 */
609 feature_unmask_features(conn, LL_FEAT_BIT_PHY_2M | LL_FEAT_BIT_PHY_CODED);
610
611 /* Mark RX node to NOT release */
612 llcp_rx_node_retain(ctx);
613
614 ctx->data.pu.error = BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
615 ctx->data.pu.ntf_pu = 1;
616 lp_pu_complete(conn, ctx, evt, param);
617 llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
618 break;
619 default:
620 /* Ignore other evts */
621 break;
622 }
623 }
624 #endif /* CONFIG_BT_CENTRAL */
625
lp_pu_st_wait_tx_ack_phy_req(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)626 static void lp_pu_st_wait_tx_ack_phy_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
627 void *param)
628 {
629 switch (evt) {
630 case LP_PU_EVT_ACK:
631 switch (conn->lll.role) {
632 #if defined(CONFIG_BT_CENTRAL)
633 case BT_HCI_ROLE_CENTRAL:
634 ctx->state = LP_PU_STATE_WAIT_RX_PHY_RSP;
635 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_RSP;
636 break;
637 #endif /* CONFIG_BT_CENTRAL */
638 #if defined(CONFIG_BT_PERIPHERAL)
639 case BT_HCI_ROLE_PERIPHERAL:
640 /* If we act as peripheral apply timing restriction */
641 pu_set_timing_restrict(
642 conn, pu_select_phy_timing_restrict(conn, ctx->data.pu.tx));
643 ctx->state = LP_PU_STATE_WAIT_RX_PHY_UPDATE_IND;
644 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND;
645 llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
646 break;
647 #endif /* CONFIG_BT_PERIPHERAL */
648 default:
649 /* Unknown role */
650 LL_ASSERT(0);
651 }
652
653 break;
654 default:
655 /* Ignore other evts */
656 break;
657 }
658 }
659
660 #if defined(CONFIG_BT_CENTRAL)
lp_pu_st_wait_tx_phy_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)661 static void lp_pu_st_wait_tx_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
662 void *param)
663 {
664 switch (evt) {
665 case LP_PU_EVT_RUN:
666 lp_pu_send_phy_update_ind(conn, ctx, evt, param);
667 break;
668 default:
669 /* Ignore other evts */
670 break;
671 }
672 }
673
lp_pu_st_wait_tx_ack_phy_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)674 static void lp_pu_st_wait_tx_ack_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx,
675 uint8_t evt, void *param)
676 {
677 switch (evt) {
678 case LP_PU_EVT_ACK:
679 LL_ASSERT(conn->lll.role == BT_HCI_ROLE_CENTRAL);
680 if (ctx->data.pu.p_to_c_phy || ctx->data.pu.c_to_p_phy) {
681 /* Either phys should change */
682 if (ctx->data.pu.c_to_p_phy) {
683 /* central to periph tx phy changes so, apply timing restriction */
684 pu_set_timing_restrict(conn, ctx->data.pu.c_to_p_phy);
685 }
686
687 /* Since at least one phy will change,
688 * stop the procedure response timeout
689 */
690 llcp_lr_prt_stop(conn);
691
692 /* Now we should wait for instant */
693 ctx->state = LP_PU_STATE_WAIT_INSTANT;
694 } else {
695 llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
696 ctx->data.pu.error = BT_HCI_ERR_SUCCESS;
697 ctx->data.pu.ntf_pu = ctx->data.pu.host_initiated;
698 lp_pu_complete(conn, ctx, evt, param);
699 }
700 llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
701 break;
702 default:
703 /* Ignore other evts */
704 break;
705 }
706 }
707 #endif /* CONFIG_BT_CENTRAL */
708
709 #if defined(CONFIG_BT_PERIPHERAL)
lp_pu_st_wait_rx_phy_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)710 static void lp_pu_st_wait_rx_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
711 void *param)
712 {
713 switch (evt) {
714 case LP_PU_EVT_PHY_UPDATE_IND:
715 LL_ASSERT(conn->lll.role == BT_HCI_ROLE_PERIPHERAL);
716 llcp_rr_set_incompat(conn, INCOMPAT_RESERVED);
717 llcp_pdu_decode_phy_update_ind(ctx, (struct pdu_data *)param);
718 const uint8_t end_procedure = pu_check_update_ind(conn, ctx);
719
720 /* Mark RX node to NOT release */
721 llcp_rx_node_retain(ctx);
722
723 if (!end_procedure) {
724 if (ctx->data.pu.p_to_c_phy) {
725 /* If periph to central phy changes apply tx timing restriction */
726 pu_set_timing_restrict(conn, ctx->data.pu.p_to_c_phy);
727 }
728
729 /* Since at least one phy will change,
730 * stop the procedure response timeout
731 */
732 llcp_lr_prt_stop(conn);
733
734 ctx->state = LP_PU_STATE_WAIT_INSTANT;
735 } else {
736 llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
737 if (ctx->data.pu.error != BT_HCI_ERR_SUCCESS) {
738 /* Mark the connection for termination */
739 conn->llcp_terminate.reason_final = ctx->data.pu.error;
740 }
741 ctx->data.pu.ntf_pu = ctx->data.pu.host_initiated;
742 lp_pu_complete(conn, ctx, evt, param);
743 }
744 llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
745 break;
746 case LP_PU_EVT_REJECT:
747 llcp_pdu_decode_reject_ext_ind(ctx, (struct pdu_data *)param);
748 ctx->data.pu.error = ctx->reject_ext_ind.error_code;
749 /* Fallthrough */
750 case LP_PU_EVT_UNKNOWN:
751 llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
752 if (evt == LP_PU_EVT_UNKNOWN) {
753 feature_unmask_features(conn, LL_FEAT_BIT_PHY_2M | LL_FEAT_BIT_PHY_CODED);
754 ctx->data.pu.error = BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
755 }
756 /* Mark RX node to NOT release */
757 llcp_rx_node_retain(ctx);
758
759 ctx->data.pu.ntf_pu = 1;
760 lp_pu_complete(conn, ctx, evt, param);
761 llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
762 break;
763 default:
764 /* Ignore other evts */
765 break;
766 }
767 }
768 #endif /* CONFIG_BT_PERIPHERAL */
769
lp_pu_check_instant(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)770 static void lp_pu_check_instant(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
771 void *param)
772 {
773 if (is_instant_reached_or_passed(ctx->data.pu.instant, ull_conn_event_counter(conn))) {
774 const uint8_t phy_changed = pu_apply_phy_update(conn, ctx);
775 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
776 if (phy_changed) {
777 ctx->data.pu.ntf_dle = pu_update_eff_times(conn, ctx);
778 }
779 #endif
780 llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
781 ctx->data.pu.error = BT_HCI_ERR_SUCCESS;
782 ctx->data.pu.ntf_pu = (phy_changed || ctx->data.pu.host_initiated);
783 lp_pu_complete(conn, ctx, evt, param);
784 }
785 }
786
lp_pu_st_wait_instant(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)787 static void lp_pu_st_wait_instant(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
788 void *param)
789 {
790 switch (evt) {
791 case LP_PU_EVT_RUN:
792 lp_pu_check_instant(conn, ctx, evt, param);
793 break;
794 default:
795 /* Ignore other evts */
796 break;
797 }
798 }
799
lp_pu_st_wait_instant_on_air(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)800 static void lp_pu_st_wait_instant_on_air(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
801 void *param)
802 {
803 switch (evt) {
804 case LP_PU_EVT_NTF:
805 lp_pu_tx_ntf(conn, ctx, evt, param);
806 break;
807 default:
808 /* Ignore other evts */
809 break;
810 }
811 }
812
813 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
lp_pu_st_wait_ntf_avail(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)814 static void lp_pu_st_wait_ntf_avail(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
815 void *param)
816 {
817 switch (evt) {
818 case LP_PU_EVT_RUN:
819 lp_pu_tx(conn, ctx, evt, param);
820 break;
821 default:
822 /* Ignore other evts */
823 break;
824 }
825 }
826 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
827
lp_pu_execute_fsm(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)828 static void lp_pu_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
829 {
830 switch (ctx->state) {
831 case LP_PU_STATE_IDLE:
832 lp_pu_st_idle(conn, ctx, evt, param);
833 break;
834 case LP_PU_STATE_WAIT_TX_PHY_REQ:
835 lp_pu_st_wait_tx_phy_req(conn, ctx, evt, param);
836 break;
837 case LP_PU_STATE_WAIT_TX_ACK_PHY_REQ:
838 lp_pu_st_wait_tx_ack_phy_req(conn, ctx, evt, param);
839 break;
840 #if defined(CONFIG_BT_CENTRAL)
841 case LP_PU_STATE_WAIT_RX_PHY_RSP:
842 lp_pu_st_wait_rx_phy_rsp(conn, ctx, evt, param);
843 break;
844 case LP_PU_STATE_WAIT_TX_PHY_UPDATE_IND:
845 lp_pu_st_wait_tx_phy_update_ind(conn, ctx, evt, param);
846 break;
847 case LP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND:
848 lp_pu_st_wait_tx_ack_phy_update_ind(conn, ctx, evt, param);
849 break;
850 #endif /* CONFIG_BT_CENTRAL */
851 #if defined(CONFIG_BT_PERIPHERAL)
852 case LP_PU_STATE_WAIT_RX_PHY_UPDATE_IND:
853 lp_pu_st_wait_rx_phy_update_ind(conn, ctx, evt, param);
854 break;
855 #endif /* CONFIG_BT_PERIPHERAL */
856 case LP_PU_STATE_WAIT_INSTANT:
857 lp_pu_st_wait_instant(conn, ctx, evt, param);
858 break;
859 case LP_PU_STATE_WAIT_INSTANT_ON_AIR:
860 lp_pu_st_wait_instant_on_air(conn, ctx, evt, param);
861 break;
862 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
863 case LP_PU_STATE_WAIT_NTF_AVAIL:
864 lp_pu_st_wait_ntf_avail(conn, ctx, evt, param);
865 break;
866 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
867 default:
868 /* Unknown state */
869 LL_ASSERT(0);
870 }
871 }
872
llcp_lp_pu_rx(struct ll_conn * conn,struct proc_ctx * ctx,struct node_rx_pdu * rx)873 void llcp_lp_pu_rx(struct ll_conn *conn, struct proc_ctx *ctx, struct node_rx_pdu *rx)
874 {
875 struct pdu_data *pdu = (struct pdu_data *)rx->pdu;
876
877 switch (pdu->llctrl.opcode) {
878 #if defined(CONFIG_BT_CENTRAL)
879 case PDU_DATA_LLCTRL_TYPE_PHY_RSP:
880 lp_pu_execute_fsm(conn, ctx, LP_PU_EVT_PHY_RSP, pdu);
881 break;
882 #endif /* CONFIG_BT_CENTRAL */
883 #if defined(CONFIG_BT_PERIPHERAL)
884 case PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND:
885 lp_pu_execute_fsm(conn, ctx, LP_PU_EVT_PHY_UPDATE_IND, pdu);
886 break;
887 #endif /* CONFIG_BT_PERIPHERAL */
888 case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
889 lp_pu_execute_fsm(conn, ctx, LP_PU_EVT_UNKNOWN, pdu);
890 break;
891 case PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND:
892 lp_pu_execute_fsm(conn, ctx, LP_PU_EVT_REJECT, pdu);
893 break;
894 default:
895 /* Invalid behaviour */
896 /* Invalid PDU received so terminate connection */
897 conn->llcp_terminate.reason_final = BT_HCI_ERR_LMP_PDU_NOT_ALLOWED;
898 llcp_lr_complete(conn);
899 ctx->state = LP_PU_STATE_IDLE;
900 break;
901 }
902 }
903
llcp_lp_pu_run(struct ll_conn * conn,struct proc_ctx * ctx,void * param)904 void llcp_lp_pu_run(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
905 {
906 lp_pu_execute_fsm(conn, ctx, LP_PU_EVT_RUN, param);
907 }
908
llcp_lp_pu_tx_ack(struct ll_conn * conn,struct proc_ctx * ctx,void * param)909 void llcp_lp_pu_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
910 {
911 lp_pu_execute_fsm(conn, ctx, LP_PU_EVT_ACK, param);
912 }
913
llcp_lp_pu_tx_ntf(struct ll_conn * conn,struct proc_ctx * ctx)914 void llcp_lp_pu_tx_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
915 {
916 lp_pu_execute_fsm(conn, ctx, LP_PU_EVT_NTF, NULL);
917 }
918
llcp_lp_pu_awaiting_instant(struct proc_ctx * ctx)919 bool llcp_lp_pu_awaiting_instant(struct proc_ctx *ctx)
920 {
921 return (ctx->state == LP_PU_STATE_WAIT_INSTANT);
922 }
923
924 /*
925 * LLCP Remote Procedure PHY Update FSM
926 */
rp_pu_tx(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)927 static void rp_pu_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
928 {
929 struct node_tx *tx;
930 struct pdu_data *pdu;
931
932 LL_ASSERT(ctx->node_ref.tx);
933
934 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
935 if (!llcp_ntf_alloc_is_available()) {
936 /* No NTF nodes avail, so we need to hold off TX */
937 ctx->state = RP_PU_STATE_WAIT_NTF_AVAIL;
938 return;
939 }
940
941 ctx->data.pu.ntf_dle_node = llcp_ntf_alloc();
942 LL_ASSERT(ctx->data.pu.ntf_dle_node);
943 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
944
945 tx = ctx->node_ref.tx;
946 ctx->node_ref.tx = NULL;
947 pdu = (struct pdu_data *)tx->pdu;
948 ctx->node_ref.tx_ack = tx;
949
950 /* Encode LL Control PDU */
951 switch (ctx->tx_opcode) {
952 #if defined(CONFIG_BT_PERIPHERAL)
953 case PDU_DATA_LLCTRL_TYPE_PHY_RSP:
954 llcp_pdu_encode_phy_rsp(conn, pdu);
955 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND;
956 ctx->state = RP_PU_STATE_WAIT_TX_ACK_PHY_RSP;
957 break;
958 #endif /* CONFIG_BT_PERIPHERAL */
959 #if defined(CONFIG_BT_CENTRAL)
960 case PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND:
961 pu_prep_update_ind(conn, ctx);
962 pu_prepare_instant(conn, ctx);
963 llcp_pdu_encode_phy_update_ind(ctx, pdu);
964 ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
965 ctx->state = RP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND;
966 break;
967 #endif /* CONFIG_BT_CENTRAL */
968 default:
969 LL_ASSERT(0);
970 }
971
972 /* Enqueue LL Control PDU towards LLL */
973 llcp_tx_enqueue(conn, tx);
974
975 /* Restart procedure response timeout timer */
976 llcp_rr_prt_restart(conn);
977 }
978
rp_pu_complete_finalize(struct ll_conn * conn,struct proc_ctx * ctx)979 static void rp_pu_complete_finalize(struct ll_conn *conn, struct proc_ctx *ctx)
980 {
981 llcp_rr_complete(conn);
982 llcp_rr_set_paused_cmd(conn, PROC_NONE);
983 ctx->state = RP_PU_STATE_IDLE;
984 }
985
rp_pu_complete(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)986 static void rp_pu_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
987 {
988 pu_reset_timing_restrict(conn);
989 /* Postpone procedure completion (and possible NTF generation) to actual 'air instant'
990 * Since LLCP STM is driven from LLL prepare this actually happens BEFORE instant
991 * and thus NTFs are generated and propagated up prior to actual instant on air.
992 * Instead postpone completion/NTF to the beginning of RX handling
993 */
994 ctx->state = RP_PU_STATE_WAIT_INSTANT_ON_AIR;
995 }
996
rp_pu_tx_ntf(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)997 static void rp_pu_tx_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
998 {
999 pu_ntf(conn, ctx);
1000 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1001 pu_dle_ntf(conn, ctx);
1002 #endif
1003 rp_pu_complete_finalize(conn, ctx);
1004 }
1005
1006 #if defined(CONFIG_BT_CENTRAL)
rp_pu_send_phy_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1007 static void rp_pu_send_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1008 void *param)
1009 {
1010 if (llcp_rr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx) ||
1011 (llcp_rr_get_paused_cmd(conn) == PROC_PHY_UPDATE) ||
1012 !ull_is_lll_tx_queue_empty(conn)) {
1013 ctx->state = RP_PU_STATE_WAIT_TX_PHY_UPDATE_IND;
1014 } else {
1015 llcp_rr_set_paused_cmd(conn, PROC_CTE_REQ);
1016 ctx->tx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND;
1017 ctx->node_ref.tx = llcp_tx_alloc(conn, ctx);
1018 rp_pu_tx(conn, ctx, evt, param);
1019
1020 }
1021 }
1022 #endif /* CONFIG_BT_CENTRAL */
1023
1024 #if defined(CONFIG_BT_PERIPHERAL)
rp_pu_send_phy_rsp(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1025 static void rp_pu_send_phy_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
1026 {
1027 if (llcp_rr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx) ||
1028 (llcp_rr_get_paused_cmd(conn) == PROC_PHY_UPDATE)) {
1029 ctx->state = RP_PU_STATE_WAIT_TX_PHY_RSP;
1030 } else {
1031 llcp_rr_set_paused_cmd(conn, PROC_CTE_REQ);
1032 ctx->tx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_RSP;
1033 ctx->node_ref.tx = llcp_tx_alloc(conn, ctx);
1034 rp_pu_tx(conn, ctx, evt, param);
1035 }
1036 }
1037 #endif /* CONFIG_BT_CENTRAL */
1038
rp_pu_st_idle(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1039 static void rp_pu_st_idle(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
1040 {
1041 switch (evt) {
1042 case RP_PU_EVT_RUN:
1043 ctx->state = RP_PU_STATE_WAIT_RX_PHY_REQ;
1044 break;
1045 default:
1046 /* Ignore other evts */
1047 break;
1048 }
1049 }
1050
rp_pu_st_wait_rx_phy_req(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1051 static void rp_pu_st_wait_rx_phy_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1052 void *param)
1053 {
1054 llcp_pdu_decode_phy_req(ctx, (struct pdu_data *)param);
1055 /* Combine with the 'Preferred' the phys in conn->phy_pref_?x */
1056 pu_combine_phys(conn, ctx, conn->phy_pref_tx, conn->phy_pref_rx);
1057 llcp_tx_pause_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
1058
1059 switch (evt) {
1060 case RP_PU_EVT_PHY_REQ:
1061 switch (conn->lll.role) {
1062 #if defined(CONFIG_BT_CENTRAL)
1063 case BT_HCI_ROLE_CENTRAL:
1064 /* Mark RX node to NOT release */
1065 llcp_rx_node_retain(ctx);
1066 rp_pu_send_phy_update_ind(conn, ctx, evt, param);
1067 break;
1068 #endif /* CONFIG_BT_CENTRAL */
1069 #if defined(CONFIG_BT_PERIPHERAL)
1070 case BT_HCI_ROLE_PERIPHERAL:
1071 rp_pu_send_phy_rsp(conn, ctx, evt, param);
1072 break;
1073 #endif /* CONFIG_BT_PERIPHERAL */
1074 default:
1075 /* Unknown role */
1076 LL_ASSERT(0);
1077 }
1078 break;
1079 default:
1080 /* Ignore other evts */
1081 break;
1082 }
1083 }
1084
1085 #if defined(CONFIG_BT_PERIPHERAL)
rp_pu_st_wait_tx_phy_rsp(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1086 static void rp_pu_st_wait_tx_phy_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1087 void *param)
1088 {
1089 switch (evt) {
1090 case RP_PU_EVT_RUN:
1091 rp_pu_send_phy_rsp(conn, ctx, evt, param);
1092 break;
1093 default:
1094 /* Ignore other evts */
1095 break;
1096 }
1097 }
1098 #endif /* CONFIG_BT_PERIPHERAL */
1099
rp_pu_st_wait_tx_ack_phy(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1100 static void rp_pu_st_wait_tx_ack_phy(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1101 void *param)
1102 {
1103 switch (evt) {
1104 case RP_PU_EVT_ACK:
1105 if (0) {
1106 #if defined(CONFIG_BT_PERIPHERAL)
1107 } else if (ctx->state == RP_PU_STATE_WAIT_TX_ACK_PHY_RSP) {
1108 LL_ASSERT(conn->lll.role == BT_HCI_ROLE_PERIPHERAL);
1109 /* When we act as peripheral apply timing restriction */
1110 pu_set_timing_restrict(
1111 conn, pu_select_phy_timing_restrict(conn, ctx->data.pu.tx));
1112 /* RSP acked, now await update ind from central */
1113 ctx->state = RP_PU_STATE_WAIT_RX_PHY_UPDATE_IND;
1114 #endif /* CONFIG_BT_PERIPHERAL */
1115 #if defined(CONFIG_BT_CENTRAL)
1116 } else if (ctx->state == RP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND) {
1117 LL_ASSERT(conn->lll.role == BT_HCI_ROLE_CENTRAL);
1118 if (ctx->data.pu.c_to_p_phy || ctx->data.pu.p_to_c_phy) {
1119 /* UPDATE_IND acked, so lets await instant */
1120 if (ctx->data.pu.c_to_p_phy) {
1121 /*
1122 * And if central to periph phys changes
1123 * apply timining restrictions
1124 */
1125 pu_set_timing_restrict(conn, ctx->data.pu.c_to_p_phy);
1126 }
1127 ctx->state = RP_PU_STATE_WAIT_INSTANT;
1128 } else {
1129 rp_pu_complete(conn, ctx, evt, param);
1130 }
1131 #endif /* CONFIG_BT_CENTRAL */
1132 } else {
1133 /* empty clause */
1134 }
1135 llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
1136 break;
1137 default:
1138 /* Ignore other evts */
1139 break;
1140 }
1141 }
1142
1143 #if defined(CONFIG_BT_CENTRAL)
rp_pu_st_wait_tx_phy_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1144 static void rp_pu_st_wait_tx_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1145 void *param)
1146 {
1147 switch (evt) {
1148 case RP_PU_EVT_RUN:
1149 rp_pu_send_phy_update_ind(conn, ctx, evt, param);
1150 break;
1151 default:
1152 /* Ignore other evts */
1153 break;
1154 }
1155 }
1156 #endif /* CONFIG_BT_CENTRAL */
1157
1158 #if defined(CONFIG_BT_PERIPHERAL)
rp_pu_st_wait_rx_phy_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1159 static void rp_pu_st_wait_rx_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1160 void *param)
1161 {
1162 switch (evt) {
1163 case RP_PU_EVT_PHY_UPDATE_IND:
1164 llcp_pdu_decode_phy_update_ind(ctx, (struct pdu_data *)param);
1165 const uint8_t end_procedure = pu_check_update_ind(conn, ctx);
1166
1167 /* Mark RX node to NOT release */
1168 llcp_rx_node_retain(ctx);
1169
1170 if (!end_procedure) {
1171 /* Since at least one phy will change,
1172 * stop the procedure response timeout
1173 */
1174 llcp_rr_prt_stop(conn);
1175 ctx->state = RP_PU_STATE_WAIT_INSTANT;
1176 } else {
1177 if (ctx->data.pu.error == BT_HCI_ERR_INSTANT_PASSED) {
1178 /* Mark the connection for termination */
1179 conn->llcp_terminate.reason_final = BT_HCI_ERR_INSTANT_PASSED;
1180 }
1181 rp_pu_complete(conn, ctx, evt, param);
1182 }
1183 break;
1184 default:
1185 /* Ignore other evts */
1186 break;
1187 }
1188 }
1189 #endif /* CONFIG_BT_PERIPHERAL */
1190
rp_pu_check_instant(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1191 static void rp_pu_check_instant(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1192 void *param)
1193 {
1194 if (is_instant_reached_or_passed(ctx->data.pu.instant, ull_conn_event_counter(conn))) {
1195 ctx->data.pu.error = BT_HCI_ERR_SUCCESS;
1196 const uint8_t phy_changed = pu_apply_phy_update(conn, ctx);
1197 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1198 if (phy_changed) {
1199 ctx->data.pu.ntf_dle = pu_update_eff_times(conn, ctx);
1200 }
1201 #endif
1202 /* if PHY settings changed we should generate NTF */
1203 ctx->data.pu.ntf_pu = phy_changed;
1204 rp_pu_complete(conn, ctx, evt, param);
1205 }
1206 }
1207
rp_pu_st_wait_instant(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1208 static void rp_pu_st_wait_instant(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1209 void *param)
1210 {
1211 switch (evt) {
1212 case RP_PU_EVT_RUN:
1213 rp_pu_check_instant(conn, ctx, evt, param);
1214 break;
1215 default:
1216 /* Ignore other evts */
1217 break;
1218 }
1219 }
1220
rp_pu_st_wait_instant_on_air(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1221 static void rp_pu_st_wait_instant_on_air(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1222 void *param)
1223 {
1224 switch (evt) {
1225 case RP_PU_EVT_NTF:
1226 rp_pu_tx_ntf(conn, ctx, evt, param);
1227 break;
1228 default:
1229 /* Ignore other evts */
1230 break;
1231 }
1232 }
1233
1234 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
rp_pu_st_wait_ntf_avail(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1235 static void rp_pu_st_wait_ntf_avail(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1236 void *param)
1237 {
1238 switch (evt) {
1239 case RP_PU_EVT_RUN:
1240 rp_pu_tx(conn, ctx, evt, param);
1241 break;
1242 default:
1243 /* Ignore other evts */
1244 break;
1245 }
1246 }
1247 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1248
rp_pu_execute_fsm(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1249 static void rp_pu_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
1250 {
1251 switch (ctx->state) {
1252 case RP_PU_STATE_IDLE:
1253 rp_pu_st_idle(conn, ctx, evt, param);
1254 break;
1255 case RP_PU_STATE_WAIT_RX_PHY_REQ:
1256 rp_pu_st_wait_rx_phy_req(conn, ctx, evt, param);
1257 break;
1258 #if defined(CONFIG_BT_PERIPHERAL)
1259 case RP_PU_STATE_WAIT_TX_PHY_RSP:
1260 rp_pu_st_wait_tx_phy_rsp(conn, ctx, evt, param);
1261 break;
1262 case RP_PU_STATE_WAIT_TX_ACK_PHY_RSP:
1263 rp_pu_st_wait_tx_ack_phy(conn, ctx, evt, param);
1264 break;
1265 case RP_PU_STATE_WAIT_RX_PHY_UPDATE_IND:
1266 rp_pu_st_wait_rx_phy_update_ind(conn, ctx, evt, param);
1267 break;
1268 #endif /* CONFIG_BT_PERIPHERAL */
1269 #if defined(CONFIG_BT_CENTRAL)
1270 case RP_PU_STATE_WAIT_TX_PHY_UPDATE_IND:
1271 rp_pu_st_wait_tx_phy_update_ind(conn, ctx, evt, param);
1272 break;
1273 case RP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND:
1274 rp_pu_st_wait_tx_ack_phy(conn, ctx, evt, param);
1275 break;
1276 #endif /* CONFIG_BT_CENTRAL */
1277 case RP_PU_STATE_WAIT_INSTANT:
1278 rp_pu_st_wait_instant(conn, ctx, evt, param);
1279 break;
1280 case RP_PU_STATE_WAIT_INSTANT_ON_AIR:
1281 rp_pu_st_wait_instant_on_air(conn, ctx, evt, param);
1282 break;
1283 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1284 case RP_PU_STATE_WAIT_NTF_AVAIL:
1285 rp_pu_st_wait_ntf_avail(conn, ctx, evt, param);
1286 break;
1287 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1288 default:
1289 /* Unknown state */
1290 LL_ASSERT(0);
1291 }
1292 }
1293
llcp_rp_pu_rx(struct ll_conn * conn,struct proc_ctx * ctx,struct node_rx_pdu * rx)1294 void llcp_rp_pu_rx(struct ll_conn *conn, struct proc_ctx *ctx, struct node_rx_pdu *rx)
1295 {
1296 struct pdu_data *pdu = (struct pdu_data *)rx->pdu;
1297
1298 switch (pdu->llctrl.opcode) {
1299 case PDU_DATA_LLCTRL_TYPE_PHY_REQ:
1300 rp_pu_execute_fsm(conn, ctx, RP_PU_EVT_PHY_REQ, pdu);
1301 break;
1302 #if defined(CONFIG_BT_PERIPHERAL)
1303 case PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND:
1304 rp_pu_execute_fsm(conn, ctx, RP_PU_EVT_PHY_UPDATE_IND, pdu);
1305 break;
1306 #endif /* CONFIG_BT_PERIPHERAL */
1307 default:
1308 /* Invalid behaviour */
1309 /* Invalid PDU received so terminate connection */
1310 conn->llcp_terminate.reason_final = BT_HCI_ERR_LMP_PDU_NOT_ALLOWED;
1311 llcp_rr_complete(conn);
1312 ctx->state = RP_PU_STATE_IDLE;
1313 break;
1314 }
1315 }
1316
llcp_rp_pu_run(struct ll_conn * conn,struct proc_ctx * ctx,void * param)1317 void llcp_rp_pu_run(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
1318 {
1319 rp_pu_execute_fsm(conn, ctx, RP_PU_EVT_RUN, param);
1320 }
1321
llcp_rp_pu_tx_ack(struct ll_conn * conn,struct proc_ctx * ctx,void * param)1322 void llcp_rp_pu_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
1323 {
1324 rp_pu_execute_fsm(conn, ctx, RP_PU_EVT_ACK, param);
1325 }
1326
llcp_rp_pu_tx_ntf(struct ll_conn * conn,struct proc_ctx * ctx)1327 void llcp_rp_pu_tx_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
1328 {
1329 rp_pu_execute_fsm(conn, ctx, RP_PU_EVT_NTF, NULL);
1330 }
1331
llcp_rp_pu_awaiting_instant(struct proc_ctx * ctx)1332 bool llcp_rp_pu_awaiting_instant(struct proc_ctx *ctx)
1333 {
1334 return (ctx->state == RP_PU_STATE_WAIT_INSTANT);
1335 }
1336