1 /*
2  * Copyright (c) 2020 Demant
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 
9 #include <zephyr/sys/byteorder.h>
10 #include <zephyr/sys/slist.h>
11 #include <zephyr/sys/util.h>
12 
13 #include <zephyr/bluetooth/hci_types.h>
14 
15 #include "hal/ccm.h"
16 
17 #include "util/util.h"
18 #include "util/mem.h"
19 #include "util/memq.h"
20 #include "util/dbuf.h"
21 
22 #include "pdu_df.h"
23 #include "lll/pdu_vendor.h"
24 #include "pdu.h"
25 
26 #include "ll.h"
27 #include "ll_settings.h"
28 
29 #include "lll.h"
30 #include "ll_feat.h"
31 #include "lll/lll_df_types.h"
32 #include "lll_conn.h"
33 #include "lll_conn_iso.h"
34 
35 #include "ull_tx_queue.h"
36 
37 #include "isoal.h"
38 #include "ull_iso_types.h"
39 #include "ull_conn_iso_types.h"
40 #include "ull_conn_iso_internal.h"
41 
42 #include "ull_conn_types.h"
43 #include "ull_internal.h"
44 #include "ull_llcp.h"
45 #include "ull_llcp_features.h"
46 #include "ull_llcp_internal.h"
47 #include "ull_conn_internal.h"
48 
49 #include <soc.h>
50 #include "hal/debug.h"
51 
52 /* LLCP Local Procedure PHY Update FSM states */
53 enum {
54 	LP_PU_STATE_IDLE,
55 	LP_PU_STATE_WAIT_TX_PHY_REQ,
56 	LP_PU_STATE_WAIT_TX_ACK_PHY_REQ,
57 	LP_PU_STATE_WAIT_RX_PHY_RSP,
58 	LP_PU_STATE_WAIT_TX_PHY_UPDATE_IND,
59 	LP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND,
60 	LP_PU_STATE_WAIT_RX_PHY_UPDATE_IND,
61 	LP_PU_STATE_WAIT_NTF_AVAIL,
62 	LP_PU_STATE_WAIT_INSTANT,
63 	LP_PU_STATE_WAIT_INSTANT_ON_AIR,
64 };
65 
66 /* LLCP Local Procedure PHY Update FSM events */
67 enum {
68 	/* Procedure run */
69 	LP_PU_EVT_RUN,
70 
71 	/* Response received */
72 	LP_PU_EVT_PHY_RSP,
73 
74 	/* Indication received */
75 	LP_PU_EVT_PHY_UPDATE_IND,
76 
77 	/* Ack received */
78 	LP_PU_EVT_ACK,
79 
80 	/* Ready to notify host */
81 	LP_PU_EVT_NTF,
82 
83 	/* Reject response received */
84 	LP_PU_EVT_REJECT,
85 
86 	/* Unknown response received */
87 	LP_PU_EVT_UNKNOWN,
88 };
89 
90 /* LLCP Remote Procedure PHY Update FSM states */
91 enum {
92 	RP_PU_STATE_IDLE,
93 	RP_PU_STATE_WAIT_RX_PHY_REQ,
94 	RP_PU_STATE_WAIT_TX_PHY_RSP,
95 	RP_PU_STATE_WAIT_TX_ACK_PHY_RSP,
96 	RP_PU_STATE_WAIT_TX_PHY_UPDATE_IND,
97 	RP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND,
98 	RP_PU_STATE_WAIT_RX_PHY_UPDATE_IND,
99 	RP_PU_STATE_WAIT_NTF_AVAIL,
100 	RP_PU_STATE_WAIT_INSTANT,
101 	RP_PU_STATE_WAIT_INSTANT_ON_AIR,
102 };
103 
104 /* LLCP Remote Procedure PHY Update FSM events */
105 enum {
106 	/* Procedure run */
107 	RP_PU_EVT_RUN,
108 
109 	/* Request received */
110 	RP_PU_EVT_PHY_REQ,
111 
112 	/* Ack received */
113 	RP_PU_EVT_ACK,
114 
115 	/* Indication received */
116 	RP_PU_EVT_PHY_UPDATE_IND,
117 
118 	/* Ready to notify host */
119 	RP_PU_EVT_NTF,
120 };
121 
122 /* Hardcoded instant delta +6 */
123 #define PHY_UPDATE_INSTANT_DELTA 6
124 
125 #if defined(CONFIG_BT_CENTRAL)
126 /* PHY preference order*/
127 #define PHY_PREF_1 PHY_2M
128 #define PHY_PREF_2 PHY_1M
129 #define PHY_PREF_3 PHY_CODED
130 
pu_select_phy(uint8_t phys)131 static inline uint8_t pu_select_phy(uint8_t phys)
132 {
133 	/* select only one phy, select preferred */
134 	if (phys & PHY_PREF_1) {
135 		return PHY_PREF_1;
136 	} else if (phys & PHY_PREF_2) {
137 		return PHY_PREF_2;
138 	} else if (phys & PHY_PREF_3) {
139 		return PHY_PREF_3;
140 	} else {
141 		return 0U;
142 	}
143 }
144 
pu_prep_update_ind(struct ll_conn * conn,struct proc_ctx * ctx)145 static void pu_prep_update_ind(struct ll_conn *conn, struct proc_ctx *ctx)
146 {
147 	ctx->data.pu.tx = pu_select_phy(ctx->data.pu.tx);
148 	ctx->data.pu.rx = pu_select_phy(ctx->data.pu.rx);
149 
150 	if (ctx->data.pu.tx != conn->lll.phy_tx) {
151 		ctx->data.pu.c_to_p_phy = ctx->data.pu.tx;
152 	} else {
153 		ctx->data.pu.c_to_p_phy = 0U;
154 	}
155 	if (ctx->data.pu.rx != conn->lll.phy_rx) {
156 		ctx->data.pu.p_to_c_phy = ctx->data.pu.rx;
157 	} else {
158 		ctx->data.pu.p_to_c_phy = 0U;
159 	}
160 }
161 #endif /* CONFIG_BT_CENTRAL */
162 
163 #if defined(CONFIG_BT_PERIPHERAL)
pu_select_phy_timing_restrict(struct ll_conn * conn,uint8_t phy_tx)164 static uint8_t pu_select_phy_timing_restrict(struct ll_conn *conn, uint8_t phy_tx)
165 {
166 	/* select the probable PHY with longest Tx time, which
167 	 * will be restricted to fit current
168 	 * connEffectiveMaxTxTime.
169 	 */
170 	/* Note - entry 0 in table is unused, so 0 on purpose */
171 	uint8_t phy_tx_time[8] = { 0,	      PHY_1M,	 PHY_2M,    PHY_1M,
172 				   PHY_CODED, PHY_CODED, PHY_CODED, PHY_CODED };
173 	struct lll_conn *lll = &conn->lll;
174 	const uint8_t phys = phy_tx | lll->phy_tx;
175 
176 	return phy_tx_time[phys];
177 }
178 #endif /* CONFIG_BT_PERIPHERAL */
179 
pu_set_timing_restrict(struct ll_conn * conn,uint8_t phy_tx)180 static void pu_set_timing_restrict(struct ll_conn *conn, uint8_t phy_tx)
181 {
182 	struct lll_conn *lll = &conn->lll;
183 
184 	lll->phy_tx_time = phy_tx;
185 }
186 
pu_reset_timing_restrict(struct ll_conn * conn)187 static void pu_reset_timing_restrict(struct ll_conn *conn)
188 {
189 	pu_set_timing_restrict(conn, conn->lll.phy_tx);
190 }
191 
192 #if defined(CONFIG_BT_PERIPHERAL)
phy_valid(uint8_t phy)193 static inline bool phy_valid(uint8_t phy)
194 {
195 	/* This is equivalent to:
196 	 * maximum one bit set, and no bit set is rfu's
197 	 */
198 	return (phy < 5 && phy != 3);
199 }
200 
pu_check_update_ind(struct ll_conn * conn,struct proc_ctx * ctx)201 static uint8_t pu_check_update_ind(struct ll_conn *conn, struct proc_ctx *ctx)
202 {
203 	uint8_t ret = 0;
204 
205 	/* Check if either phy selected is invalid */
206 	if (!phy_valid(ctx->data.pu.c_to_p_phy) || !phy_valid(ctx->data.pu.p_to_c_phy)) {
207 		/* more than one or any rfu bit selected in either phy */
208 		ctx->data.pu.error = BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
209 		ret = 1;
210 	}
211 
212 	/* Both tx and rx PHY unchanged */
213 	if (!((ctx->data.pu.c_to_p_phy | ctx->data.pu.p_to_c_phy) & 0x07)) {
214 		/* if no phy changes, quit procedure, and possibly signal host */
215 		ctx->data.pu.error = BT_HCI_ERR_SUCCESS;
216 		ret = 1;
217 	} else {
218 		/* if instant already passed, quit procedure with error */
219 		if (is_instant_reached_or_passed(ctx->data.pu.instant,
220 						 ull_conn_event_counter(conn))) {
221 			ctx->data.pu.error = BT_HCI_ERR_INSTANT_PASSED;
222 			ret = 1;
223 		}
224 	}
225 	return ret;
226 }
227 #endif /* CONFIG_BT_PERIPHERAL */
228 
pu_apply_phy_update(struct ll_conn * conn,struct proc_ctx * ctx)229 static uint8_t pu_apply_phy_update(struct ll_conn *conn, struct proc_ctx *ctx)
230 {
231 	struct lll_conn *lll = &conn->lll;
232 	uint8_t phy_bitmask = PHY_1M;
233 	const uint8_t old_tx = lll->phy_tx;
234 	const uint8_t old_rx = lll->phy_rx;
235 
236 #if defined(CONFIG_BT_CTLR_PHY_2M)
237 	phy_bitmask |= PHY_2M;
238 #endif
239 #if defined(CONFIG_BT_CTLR_PHY_CODED)
240 	phy_bitmask |= PHY_CODED;
241 #endif
242 	const uint8_t p_to_c_phy = ctx->data.pu.p_to_c_phy & phy_bitmask;
243 	const uint8_t c_to_p_phy = ctx->data.pu.c_to_p_phy & phy_bitmask;
244 
245 	if (0) {
246 #if defined(CONFIG_BT_PERIPHERAL)
247 	} else if (lll->role == BT_HCI_ROLE_PERIPHERAL) {
248 		if (p_to_c_phy) {
249 			lll->phy_tx = p_to_c_phy;
250 		}
251 		if (c_to_p_phy) {
252 			lll->phy_rx = c_to_p_phy;
253 		}
254 #endif /* CONFIG_BT_PERIPHERAL */
255 #if defined(CONFIG_BT_CENTRAL)
256 	} else if (lll->role == BT_HCI_ROLE_CENTRAL) {
257 		if (p_to_c_phy) {
258 			lll->phy_rx = p_to_c_phy;
259 		}
260 		if (c_to_p_phy) {
261 			lll->phy_tx = c_to_p_phy;
262 		}
263 #endif /* CONFIG_BT_CENTRAL */
264 	}
265 
266 	return ((old_tx != lll->phy_tx) || (old_rx != lll->phy_rx));
267 }
268 
269 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
pu_calc_eff_time(uint8_t max_octets,uint8_t phy,uint16_t default_time)270 static uint16_t pu_calc_eff_time(uint8_t max_octets, uint8_t phy, uint16_t default_time)
271 {
272 	uint16_t payload_time = PDU_DC_MAX_US(max_octets, phy);
273 	uint16_t eff_time;
274 
275 	eff_time = MAX(PDU_DC_PAYLOAD_TIME_MIN, payload_time);
276 	eff_time = MIN(eff_time, default_time);
277 #if defined(CONFIG_BT_CTLR_PHY_CODED)
278 	eff_time = MAX(eff_time, PDU_DC_MAX_US(PDU_DC_PAYLOAD_SIZE_MIN, phy));
279 #endif
280 
281 	return eff_time;
282 }
283 
pu_update_eff_times(struct ll_conn * conn,struct proc_ctx * ctx)284 static uint8_t pu_update_eff_times(struct ll_conn *conn, struct proc_ctx *ctx)
285 {
286 	struct lll_conn *lll = &conn->lll;
287 	uint16_t eff_tx_time = lll->dle.eff.max_tx_time;
288 	uint16_t eff_rx_time = lll->dle.eff.max_rx_time;
289 	uint16_t max_rx_time, max_tx_time;
290 
291 	ull_dle_max_time_get(conn, &max_rx_time, &max_tx_time);
292 
293 	if ((ctx->data.pu.p_to_c_phy && (lll->role == BT_HCI_ROLE_PERIPHERAL)) ||
294 	    (ctx->data.pu.c_to_p_phy && (lll->role == BT_HCI_ROLE_CENTRAL))) {
295 		eff_tx_time =
296 			pu_calc_eff_time(lll->dle.eff.max_tx_octets, lll->phy_tx, max_tx_time);
297 	}
298 	if ((ctx->data.pu.p_to_c_phy && (lll->role == BT_HCI_ROLE_CENTRAL)) ||
299 	    (ctx->data.pu.c_to_p_phy && (lll->role == BT_HCI_ROLE_PERIPHERAL))) {
300 		eff_rx_time =
301 			pu_calc_eff_time(lll->dle.eff.max_rx_octets, lll->phy_rx, max_rx_time);
302 	}
303 
304 	if ((eff_tx_time > lll->dle.eff.max_tx_time) ||
305 	    (lll->dle.eff.max_tx_time > max_tx_time) ||
306 	    (eff_rx_time > lll->dle.eff.max_rx_time) ||
307 	    (lll->dle.eff.max_rx_time > max_rx_time)) {
308 		lll->dle.eff.max_tx_time = eff_tx_time;
309 		lll->dle.eff.max_rx_time = eff_rx_time;
310 #if defined(CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE)
311 		lll->evt_len_upd = 1U;
312 #endif /* CONFIG_BT_CTLR_SLOT_RESERVATION_UPDATE */
313 		return 1U;
314 	}
315 
316 	return 0U;
317 }
318 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
319 
pu_set_preferred_phys(struct ll_conn * conn,struct proc_ctx * ctx)320 static inline void pu_set_preferred_phys(struct ll_conn *conn, struct proc_ctx *ctx)
321 {
322 	conn->phy_pref_rx = ctx->data.pu.rx;
323 	conn->phy_pref_tx = ctx->data.pu.tx;
324 
325 	/*
326 	 * Note: Since 'flags' indicate local coded phy preference (S2 or S8) and
327 	 * this is not negotiated with the peer, it is simply reconfigured in conn->lll when
328 	 * the update is initiated, and takes effect whenever the coded phy is in use.
329 	 */
330 	conn->lll.phy_flags = ctx->data.pu.flags;
331 }
332 
pu_combine_phys(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t tx,uint8_t rx)333 static inline void pu_combine_phys(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t tx,
334 				   uint8_t rx)
335 {
336 	/* Combine requested phys with locally preferred phys */
337 	ctx->data.pu.rx &= rx;
338 	ctx->data.pu.tx &= tx;
339 	/* If either tx or rx is 'no change' at this point we force both to no change to
340 	 * comply with the spec
341 	 *	Spec. BT5.2 Vol6, Part B, section 5.1.10:
342 	 *	The remainder of this section shall apply irrespective of which device initiated
343 	 *	the procedure.
344 	 *
345 	 *	Irrespective of the above rules, the central may leave both directions
346 	 *	unchanged. If the periph specified a single PHY in both the TX_PHYS and
347 	 *	RX_PHYS fields and both fields are the same, the central shall either select
348 	 *	the PHY specified by the periph for both directions or shall leave both directions
349 	 *	unchanged.
350 	 */
351 	if (conn->lll.role == BT_HCI_ROLE_CENTRAL && (!ctx->data.pu.rx || !ctx->data.pu.tx)) {
352 		ctx->data.pu.tx = 0;
353 		ctx->data.pu.rx = 0;
354 	}
355 }
356 
357 #if defined(CONFIG_BT_CENTRAL)
pu_prepare_instant(struct ll_conn * conn,struct proc_ctx * ctx)358 static void pu_prepare_instant(struct ll_conn *conn, struct proc_ctx *ctx)
359 {
360 	/* Set instance only in case there is actual PHY change. Otherwise the instant should be
361 	 * set to 0.
362 	 */
363 	if (ctx->data.pu.c_to_p_phy != 0 || ctx->data.pu.p_to_c_phy != 0) {
364 		ctx->data.pu.instant = ull_conn_event_counter(conn) + conn->lll.latency +
365 			PHY_UPDATE_INSTANT_DELTA;
366 	} else {
367 		ctx->data.pu.instant = 0;
368 	}
369 }
370 #endif /* CONFIG_BT_CENTRAL */
371 
372 /*
373  * LLCP Local Procedure PHY Update FSM
374  */
375 
lp_pu_tx(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)376 static void lp_pu_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
377 {
378 	struct node_tx *tx;
379 	struct pdu_data *pdu;
380 
381 	LL_ASSERT(ctx->node_ref.tx);
382 
383 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
384 	if (!((ctx->tx_opcode == PDU_DATA_LLCTRL_TYPE_PHY_REQ) &&
385 	    (conn->lll.role == BT_HCI_ROLE_CENTRAL))) {
386 		if (!llcp_ntf_alloc_is_available()) {
387 			/* No NTF nodes avail, so we need to hold off TX */
388 			ctx->state = LP_PU_STATE_WAIT_NTF_AVAIL;
389 			return;
390 		}
391 		ctx->data.pu.ntf_dle_node = llcp_ntf_alloc();
392 		LL_ASSERT(ctx->data.pu.ntf_dle_node);
393 	}
394 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
395 
396 	tx = ctx->node_ref.tx;
397 	ctx->node_ref.tx = NULL;
398 	ctx->node_ref.tx_ack = tx;
399 	pdu = (struct pdu_data *)tx->pdu;
400 
401 	/* Encode LL Control PDU */
402 	switch (ctx->tx_opcode) {
403 	case PDU_DATA_LLCTRL_TYPE_PHY_REQ:
404 		pu_set_preferred_phys(conn, ctx);
405 		llcp_pdu_encode_phy_req(ctx, pdu);
406 		llcp_tx_pause_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
407 		ctx->state = LP_PU_STATE_WAIT_TX_ACK_PHY_REQ;
408 		break;
409 #if defined(CONFIG_BT_CENTRAL)
410 	case PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND:
411 		pu_prep_update_ind(conn, ctx);
412 		pu_prepare_instant(conn, ctx);
413 		llcp_pdu_encode_phy_update_ind(ctx, pdu);
414 		ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
415 		ctx->state = LP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND;
416 		break;
417 #endif /* CONFIG_BT_CENTRAL */
418 	default:
419 		LL_ASSERT(0);
420 	}
421 
422 	/* Enqueue LL Control PDU towards LLL */
423 	llcp_tx_enqueue(conn, tx);
424 
425 	/* Restart procedure response timeout timer */
426 	llcp_lr_prt_restart(conn);
427 }
428 
pu_ntf(struct ll_conn * conn,struct proc_ctx * ctx)429 static void pu_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
430 {
431 	struct node_rx_pdu *ntf;
432 	struct node_rx_pu *pdu;
433 
434 	/* Piggy-back on stored RX node */
435 	ntf = ctx->node_ref.rx;
436 	LL_ASSERT(ntf);
437 
438 	if (ctx->data.pu.ntf_pu) {
439 		LL_ASSERT(ntf->hdr.type == NODE_RX_TYPE_RETAIN);
440 		ntf->hdr.type = NODE_RX_TYPE_PHY_UPDATE;
441 		ntf->hdr.handle = conn->lll.handle;
442 		pdu = (struct node_rx_pu *)ntf->pdu;
443 
444 		pdu->status = ctx->data.pu.error;
445 		pdu->rx = conn->lll.phy_rx;
446 		pdu->tx = conn->lll.phy_tx;
447 	} else {
448 		ntf->hdr.type = NODE_RX_TYPE_RELEASE;
449 	}
450 
451 	/* Enqueue notification towards LL */
452 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
453 	/* only 'put' as the 'sched' is handled when handling DLE ntf */
454 	ll_rx_put(ntf->hdr.link, ntf);
455 #else
456 	ll_rx_put_sched(ntf->hdr.link, ntf);
457 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
458 
459 	ctx->data.pu.ntf_pu = 0;
460 	ctx->node_ref.rx = NULL;
461 }
462 
463 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
pu_dle_ntf(struct ll_conn * conn,struct proc_ctx * ctx)464 static void pu_dle_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
465 {
466 	struct node_rx_pdu *ntf;
467 	struct pdu_data *pdu;
468 
469 	/* Retrieve DLE ntf node */
470 	ntf = ctx->data.pu.ntf_dle_node;
471 
472 	if (!ctx->data.pu.ntf_dle) {
473 		if (!ntf) {
474 			/* If no DLE ntf was pre-allocated there is nothing more to do */
475 			/* This will happen in case of a completion on UNKNOWN_RSP to PHY_REQ
476 			 * in Central case.
477 			 */
478 			return;
479 		}
480 		/* Signal to release pre-allocated node in case there is no DLE ntf */
481 		ntf->hdr.type = NODE_RX_TYPE_RELEASE;
482 	} else {
483 		LL_ASSERT(ntf);
484 
485 		ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
486 		ntf->hdr.handle = conn->lll.handle;
487 		pdu = (struct pdu_data *)ntf->pdu;
488 
489 		llcp_ntf_encode_length_change(conn, pdu);
490 	}
491 
492 	/* Enqueue notification towards LL */
493 	ll_rx_put_sched(ntf->hdr.link, ntf);
494 
495 	ctx->data.pu.ntf_dle = 0;
496 	ctx->data.pu.ntf_dle_node = NULL;
497 }
498 #endif
499 
lp_pu_complete_finalize(struct ll_conn * conn,struct proc_ctx * ctx)500 static void lp_pu_complete_finalize(struct ll_conn *conn, struct proc_ctx *ctx)
501 {
502 	llcp_lr_complete(conn);
503 	llcp_rr_set_paused_cmd(conn, PROC_NONE);
504 	ctx->state = LP_PU_STATE_IDLE;
505 }
506 
lp_pu_tx_ntf(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)507 static void lp_pu_tx_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
508 {
509 	pu_ntf(conn, ctx);
510 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
511 	pu_dle_ntf(conn, ctx);
512 #endif
513 	lp_pu_complete_finalize(conn, ctx);
514 }
515 
lp_pu_complete(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)516 static void lp_pu_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
517 {
518 	pu_reset_timing_restrict(conn);
519 
520 	/* Postpone procedure completion (and possible NTF generation) to actual 'air instant'
521 	 * Since LLCP STM is driven from LLL prepare this actually happens BEFORE instant
522 	 * and thus NTFs are generated and propagated up prior to actual instant on air.
523 	 * Instead postpone completion/NTF to the beginning of RX handling
524 	 */
525 	ctx->state = LP_PU_STATE_WAIT_INSTANT_ON_AIR;
526 }
527 
lp_pu_send_phy_req(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)528 static void lp_pu_send_phy_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
529 {
530 	if (llcp_lr_ispaused(conn) || llcp_rr_get_collision(conn) ||
531 	    !llcp_tx_alloc_peek(conn, ctx) ||
532 	    (llcp_rr_get_paused_cmd(conn) == PROC_PHY_UPDATE)) {
533 		ctx->state = LP_PU_STATE_WAIT_TX_PHY_REQ;
534 	} else {
535 		llcp_rr_set_incompat(conn, INCOMPAT_RESOLVABLE);
536 		llcp_rr_set_paused_cmd(conn, PROC_CTE_REQ);
537 		ctx->tx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_REQ;
538 
539 		/* Allocate TX node */
540 		ctx->node_ref.tx = llcp_tx_alloc(conn, ctx);
541 		lp_pu_tx(conn, ctx, evt, param);
542 	}
543 }
544 
545 #if defined(CONFIG_BT_CENTRAL)
lp_pu_send_phy_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)546 static void lp_pu_send_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
547 				      void *param)
548 {
549 	if (llcp_lr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx)) {
550 		ctx->state = LP_PU_STATE_WAIT_TX_PHY_UPDATE_IND;
551 	} else {
552 		ctx->tx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND;
553 
554 		/* Allocate TX node */
555 		ctx->node_ref.tx = llcp_tx_alloc(conn, ctx);
556 		lp_pu_tx(conn, ctx, evt, param);
557 	}
558 }
559 #endif /* CONFIG_BT_CENTRAL */
560 
lp_pu_st_idle(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)561 static void lp_pu_st_idle(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
562 {
563 	switch (evt) {
564 	case LP_PU_EVT_RUN:
565 		lp_pu_send_phy_req(conn, ctx, evt, param);
566 		break;
567 	default:
568 		/* Ignore other evts */
569 		break;
570 	}
571 }
572 
lp_pu_st_wait_tx_phy_req(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)573 static void lp_pu_st_wait_tx_phy_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
574 				     void *param)
575 {
576 	switch (evt) {
577 	case LP_PU_EVT_RUN:
578 		lp_pu_send_phy_req(conn, ctx, evt, param);
579 		break;
580 	default:
581 		/* Ignore other evts */
582 		break;
583 	}
584 }
585 
586 #if defined(CONFIG_BT_CENTRAL)
lp_pu_st_wait_rx_phy_rsp(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)587 static void lp_pu_st_wait_rx_phy_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
588 				     void *param)
589 {
590 	switch (evt) {
591 	case LP_PU_EVT_PHY_RSP:
592 		llcp_rr_set_incompat(conn, INCOMPAT_RESERVED);
593 		/* 'Prefer' the phys from the REQ */
594 		uint8_t tx_pref = ctx->data.pu.tx;
595 		uint8_t rx_pref = ctx->data.pu.rx;
596 
597 		llcp_pdu_decode_phy_rsp(ctx, (struct pdu_data *)param);
598 		/* Pause data tx */
599 		llcp_tx_pause_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
600 		/* Combine with the 'Preferred' phys */
601 		pu_combine_phys(conn, ctx, tx_pref, rx_pref);
602 
603 		/* Mark RX node to NOT release */
604 		llcp_rx_node_retain(ctx);
605 
606 		lp_pu_send_phy_update_ind(conn, ctx, evt, param);
607 		break;
608 	case LP_PU_EVT_UNKNOWN:
609 		llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
610 		/* Unsupported in peer, so disable locally for this connection
611 		 * Peer does not accept PHY UPDATE, so disable non 1M phys on current connection
612 		 */
613 		feature_unmask_features(conn, LL_FEAT_BIT_PHY_2M | LL_FEAT_BIT_PHY_CODED);
614 
615 		/* Mark RX node to NOT release */
616 		llcp_rx_node_retain(ctx);
617 
618 		ctx->data.pu.error = BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
619 		ctx->data.pu.ntf_pu = 1;
620 		lp_pu_complete(conn, ctx, evt, param);
621 		break;
622 	default:
623 		/* Ignore other evts */
624 		break;
625 	}
626 }
627 #endif /* CONFIG_BT_CENTRAL */
628 
lp_pu_st_wait_tx_ack_phy_req(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)629 static void lp_pu_st_wait_tx_ack_phy_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
630 					 void *param)
631 {
632 	switch (evt) {
633 	case LP_PU_EVT_ACK:
634 		switch (conn->lll.role) {
635 #if defined(CONFIG_BT_CENTRAL)
636 		case BT_HCI_ROLE_CENTRAL:
637 			ctx->state = LP_PU_STATE_WAIT_RX_PHY_RSP;
638 			ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_RSP;
639 			break;
640 #endif /* CONFIG_BT_CENTRAL */
641 #if defined(CONFIG_BT_PERIPHERAL)
642 		case BT_HCI_ROLE_PERIPHERAL:
643 			/* If we act as peripheral apply timing restriction */
644 			pu_set_timing_restrict(
645 				conn, pu_select_phy_timing_restrict(conn, ctx->data.pu.tx));
646 			ctx->state = LP_PU_STATE_WAIT_RX_PHY_UPDATE_IND;
647 			ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND;
648 			llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
649 			break;
650 #endif /* CONFIG_BT_PERIPHERAL */
651 		default:
652 			/* Unknown role */
653 			LL_ASSERT(0);
654 		}
655 
656 		break;
657 	default:
658 		/* Ignore other evts */
659 		break;
660 	}
661 }
662 
663 #if defined(CONFIG_BT_CENTRAL)
lp_pu_st_wait_tx_phy_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)664 static void lp_pu_st_wait_tx_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
665 					    void *param)
666 {
667 	switch (evt) {
668 	case LP_PU_EVT_RUN:
669 		lp_pu_send_phy_update_ind(conn, ctx, evt, param);
670 		break;
671 	default:
672 		/* Ignore other evts */
673 		break;
674 	}
675 }
676 
lp_pu_st_wait_tx_ack_phy_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)677 static void lp_pu_st_wait_tx_ack_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx,
678 						uint8_t evt, void *param)
679 {
680 	switch (evt) {
681 	case LP_PU_EVT_ACK:
682 		LL_ASSERT(conn->lll.role == BT_HCI_ROLE_CENTRAL);
683 		if (ctx->data.pu.p_to_c_phy || ctx->data.pu.c_to_p_phy) {
684 			/* Either phys should change */
685 			if (ctx->data.pu.c_to_p_phy) {
686 				/* central to periph tx phy changes so, apply timing restriction */
687 				pu_set_timing_restrict(conn, ctx->data.pu.c_to_p_phy);
688 			}
689 
690 			/* Since at least one phy will change,
691 			 * stop the procedure response timeout
692 			 */
693 			llcp_lr_prt_stop(conn);
694 
695 			/* Now we should wait for instant */
696 			ctx->state = LP_PU_STATE_WAIT_INSTANT;
697 		} else {
698 			llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
699 			ctx->data.pu.error = BT_HCI_ERR_SUCCESS;
700 			ctx->data.pu.ntf_pu = ctx->data.pu.host_initiated;
701 			lp_pu_complete(conn, ctx, evt, param);
702 		}
703 		llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
704 		break;
705 	default:
706 		/* Ignore other evts */
707 		break;
708 	}
709 }
710 #endif /* CONFIG_BT_CENTRAL */
711 
712 #if defined(CONFIG_BT_PERIPHERAL)
lp_pu_st_wait_rx_phy_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)713 static void lp_pu_st_wait_rx_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
714 					    void *param)
715 {
716 	switch (evt) {
717 	case LP_PU_EVT_PHY_UPDATE_IND:
718 		LL_ASSERT(conn->lll.role == BT_HCI_ROLE_PERIPHERAL);
719 		llcp_pdu_decode_phy_update_ind(ctx, (struct pdu_data *)param);
720 		const uint8_t end_procedure = pu_check_update_ind(conn, ctx);
721 
722 		/* Mark RX node to NOT release */
723 		llcp_rx_node_retain(ctx);
724 
725 		if (!end_procedure) {
726 			if (ctx->data.pu.p_to_c_phy) {
727 				/* If periph to central phy changes apply tx timing restriction */
728 				pu_set_timing_restrict(conn, ctx->data.pu.p_to_c_phy);
729 			}
730 
731 			/* Since at least one phy will change,
732 			 * stop the procedure response timeout
733 			 */
734 			llcp_lr_prt_stop(conn);
735 
736 			ctx->state = LP_PU_STATE_WAIT_INSTANT;
737 		} else {
738 			llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
739 			if (ctx->data.pu.error != BT_HCI_ERR_SUCCESS) {
740 				/* Mark the connection for termination */
741 				conn->llcp_terminate.reason_final = ctx->data.pu.error;
742 			}
743 			ctx->data.pu.ntf_pu = ctx->data.pu.host_initiated;
744 			lp_pu_complete(conn, ctx, evt, param);
745 		}
746 		llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
747 		break;
748 	case LP_PU_EVT_REJECT:
749 		llcp_pdu_decode_reject_ext_ind(ctx, (struct pdu_data *)param);
750 		ctx->data.pu.error = ctx->reject_ext_ind.error_code;
751 		/* Fallthrough */
752 	case LP_PU_EVT_UNKNOWN:
753 		llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
754 		if (evt == LP_PU_EVT_UNKNOWN) {
755 			feature_unmask_features(conn, LL_FEAT_BIT_PHY_2M | LL_FEAT_BIT_PHY_CODED);
756 			ctx->data.pu.error = BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
757 		}
758 		/* Mark RX node to NOT release */
759 		llcp_rx_node_retain(ctx);
760 
761 		ctx->data.pu.ntf_pu = 1;
762 		lp_pu_complete(conn, ctx, evt, param);
763 		llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
764 		break;
765 	default:
766 		/* Ignore other evts */
767 		break;
768 	}
769 }
770 #endif /* CONFIG_BT_PERIPHERAL */
771 
lp_pu_check_instant(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)772 static void lp_pu_check_instant(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
773 				void *param)
774 {
775 	if (is_instant_reached_or_passed(ctx->data.pu.instant, ull_conn_event_counter(conn))) {
776 		const uint8_t phy_changed = pu_apply_phy_update(conn, ctx);
777 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
778 		if (phy_changed) {
779 			ctx->data.pu.ntf_dle = pu_update_eff_times(conn, ctx);
780 		}
781 #endif
782 		llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
783 		ctx->data.pu.error = BT_HCI_ERR_SUCCESS;
784 		ctx->data.pu.ntf_pu = (phy_changed || ctx->data.pu.host_initiated);
785 		lp_pu_complete(conn, ctx, evt, param);
786 	}
787 }
788 
lp_pu_st_wait_instant(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)789 static void lp_pu_st_wait_instant(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
790 				  void *param)
791 {
792 	switch (evt) {
793 	case LP_PU_EVT_RUN:
794 		lp_pu_check_instant(conn, ctx, evt, param);
795 		break;
796 	default:
797 		/* Ignore other evts */
798 		break;
799 	}
800 }
801 
lp_pu_st_wait_instant_on_air(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)802 static void lp_pu_st_wait_instant_on_air(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
803 					 void *param)
804 {
805 	switch (evt) {
806 	case LP_PU_EVT_NTF:
807 		lp_pu_tx_ntf(conn, ctx, evt, param);
808 		break;
809 	default:
810 		/* Ignore other evts */
811 		break;
812 	}
813 }
814 
815 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
lp_pu_st_wait_ntf_avail(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)816 static void lp_pu_st_wait_ntf_avail(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
817 				    void *param)
818 {
819 	switch (evt) {
820 	case LP_PU_EVT_RUN:
821 		lp_pu_tx(conn, ctx, evt, param);
822 		break;
823 	default:
824 		/* Ignore other evts */
825 		break;
826 	}
827 }
828 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
829 
lp_pu_execute_fsm(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)830 static void lp_pu_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
831 {
832 	switch (ctx->state) {
833 	case LP_PU_STATE_IDLE:
834 		lp_pu_st_idle(conn, ctx, evt, param);
835 		break;
836 	case LP_PU_STATE_WAIT_TX_PHY_REQ:
837 		lp_pu_st_wait_tx_phy_req(conn, ctx, evt, param);
838 		break;
839 	case LP_PU_STATE_WAIT_TX_ACK_PHY_REQ:
840 		lp_pu_st_wait_tx_ack_phy_req(conn, ctx, evt, param);
841 		break;
842 #if defined(CONFIG_BT_CENTRAL)
843 	case LP_PU_STATE_WAIT_RX_PHY_RSP:
844 		lp_pu_st_wait_rx_phy_rsp(conn, ctx, evt, param);
845 		break;
846 	case LP_PU_STATE_WAIT_TX_PHY_UPDATE_IND:
847 		lp_pu_st_wait_tx_phy_update_ind(conn, ctx, evt, param);
848 		break;
849 	case LP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND:
850 		lp_pu_st_wait_tx_ack_phy_update_ind(conn, ctx, evt, param);
851 		break;
852 #endif /* CONFIG_BT_CENTRAL */
853 #if defined(CONFIG_BT_PERIPHERAL)
854 	case LP_PU_STATE_WAIT_RX_PHY_UPDATE_IND:
855 		lp_pu_st_wait_rx_phy_update_ind(conn, ctx, evt, param);
856 		break;
857 #endif /* CONFIG_BT_PERIPHERAL */
858 	case LP_PU_STATE_WAIT_INSTANT:
859 		lp_pu_st_wait_instant(conn, ctx, evt, param);
860 		break;
861 	case LP_PU_STATE_WAIT_INSTANT_ON_AIR:
862 		lp_pu_st_wait_instant_on_air(conn, ctx, evt, param);
863 		break;
864 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
865 	case LP_PU_STATE_WAIT_NTF_AVAIL:
866 		lp_pu_st_wait_ntf_avail(conn, ctx, evt, param);
867 		break;
868 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
869 	default:
870 		/* Unknown state */
871 		LL_ASSERT(0);
872 	}
873 }
874 
llcp_lp_pu_rx(struct ll_conn * conn,struct proc_ctx * ctx,struct node_rx_pdu * rx)875 void llcp_lp_pu_rx(struct ll_conn *conn, struct proc_ctx *ctx, struct node_rx_pdu *rx)
876 {
877 	struct pdu_data *pdu = (struct pdu_data *)rx->pdu;
878 
879 	switch (pdu->llctrl.opcode) {
880 #if defined(CONFIG_BT_CENTRAL)
881 	case PDU_DATA_LLCTRL_TYPE_PHY_RSP:
882 		lp_pu_execute_fsm(conn, ctx, LP_PU_EVT_PHY_RSP, pdu);
883 		break;
884 #endif /* CONFIG_BT_CENTRAL */
885 #if defined(CONFIG_BT_PERIPHERAL)
886 	case PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND:
887 		lp_pu_execute_fsm(conn, ctx, LP_PU_EVT_PHY_UPDATE_IND, pdu);
888 		break;
889 #endif /* CONFIG_BT_PERIPHERAL */
890 	case PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP:
891 		lp_pu_execute_fsm(conn, ctx, LP_PU_EVT_UNKNOWN, pdu);
892 		break;
893 	case PDU_DATA_LLCTRL_TYPE_REJECT_EXT_IND:
894 		lp_pu_execute_fsm(conn, ctx, LP_PU_EVT_REJECT, pdu);
895 		break;
896 	default:
897 		/* Invalid behaviour */
898 		/* Invalid PDU received so terminate connection */
899 		conn->llcp_terminate.reason_final = BT_HCI_ERR_LMP_PDU_NOT_ALLOWED;
900 		llcp_lr_complete(conn);
901 		ctx->state = LP_PU_STATE_IDLE;
902 		break;
903 	}
904 }
905 
llcp_lp_pu_init_proc(struct proc_ctx * ctx)906 void llcp_lp_pu_init_proc(struct proc_ctx *ctx)
907 {
908 	ctx->state = LP_PU_STATE_IDLE;
909 }
910 
llcp_lp_pu_run(struct ll_conn * conn,struct proc_ctx * ctx,void * param)911 void llcp_lp_pu_run(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
912 {
913 	lp_pu_execute_fsm(conn, ctx, LP_PU_EVT_RUN, param);
914 }
915 
llcp_lp_pu_tx_ack(struct ll_conn * conn,struct proc_ctx * ctx,void * param)916 void llcp_lp_pu_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
917 {
918 	lp_pu_execute_fsm(conn, ctx, LP_PU_EVT_ACK, param);
919 }
920 
llcp_lp_pu_tx_ntf(struct ll_conn * conn,struct proc_ctx * ctx)921 void llcp_lp_pu_tx_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
922 {
923 	lp_pu_execute_fsm(conn, ctx, LP_PU_EVT_NTF, NULL);
924 }
925 
llcp_lp_pu_awaiting_instant(struct proc_ctx * ctx)926 bool llcp_lp_pu_awaiting_instant(struct proc_ctx *ctx)
927 {
928 	return (ctx->state == LP_PU_STATE_WAIT_INSTANT);
929 }
930 
931 /*
932  * LLCP Remote Procedure PHY Update FSM
933  */
rp_pu_tx(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)934 static void rp_pu_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
935 {
936 	struct node_tx *tx;
937 	struct pdu_data *pdu;
938 
939 	LL_ASSERT(ctx->node_ref.tx);
940 
941 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
942 	if (!llcp_ntf_alloc_is_available()) {
943 		/* No NTF nodes avail, so we need to hold off TX */
944 		ctx->state = RP_PU_STATE_WAIT_NTF_AVAIL;
945 		return;
946 	}
947 
948 	ctx->data.pu.ntf_dle_node = llcp_ntf_alloc();
949 	LL_ASSERT(ctx->data.pu.ntf_dle_node);
950 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
951 
952 	tx = ctx->node_ref.tx;
953 	ctx->node_ref.tx = NULL;
954 	pdu = (struct pdu_data *)tx->pdu;
955 	ctx->node_ref.tx_ack = tx;
956 
957 	/* Encode LL Control PDU */
958 	switch (ctx->tx_opcode) {
959 #if defined(CONFIG_BT_PERIPHERAL)
960 	case PDU_DATA_LLCTRL_TYPE_PHY_RSP:
961 		llcp_pdu_encode_phy_rsp(conn, pdu);
962 		ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND;
963 		ctx->state = RP_PU_STATE_WAIT_TX_ACK_PHY_RSP;
964 		break;
965 #endif /* CONFIG_BT_PERIPHERAL */
966 #if defined(CONFIG_BT_CENTRAL)
967 	case PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND:
968 		pu_prep_update_ind(conn, ctx);
969 		pu_prepare_instant(conn, ctx);
970 		llcp_pdu_encode_phy_update_ind(ctx, pdu);
971 		ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
972 		ctx->state = RP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND;
973 		break;
974 #endif /* CONFIG_BT_CENTRAL */
975 	default:
976 		LL_ASSERT(0);
977 	}
978 
979 	/* Enqueue LL Control PDU towards LLL */
980 	llcp_tx_enqueue(conn, tx);
981 
982 	/* Restart procedure response timeout timer */
983 	llcp_rr_prt_restart(conn);
984 }
985 
rp_pu_complete_finalize(struct ll_conn * conn,struct proc_ctx * ctx)986 static void rp_pu_complete_finalize(struct ll_conn *conn, struct proc_ctx *ctx)
987 {
988 	llcp_rr_complete(conn);
989 	llcp_rr_set_paused_cmd(conn, PROC_NONE);
990 	ctx->state = RP_PU_STATE_IDLE;
991 }
992 
rp_pu_complete(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)993 static void rp_pu_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
994 {
995 	pu_reset_timing_restrict(conn);
996 	/* Postpone procedure completion (and possible NTF generation) to actual 'air instant'
997 	 * Since LLCP STM is driven from LLL prepare this actually happens BEFORE instant
998 	 * and thus NTFs are generated and propagated up prior to actual instant on air.
999 	 * Instead postpone completion/NTF to the beginning of RX handling
1000 	 */
1001 	ctx->state = RP_PU_STATE_WAIT_INSTANT_ON_AIR;
1002 }
1003 
rp_pu_tx_ntf(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1004 static void rp_pu_tx_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
1005 {
1006 	pu_ntf(conn, ctx);
1007 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1008 	pu_dle_ntf(conn, ctx);
1009 #endif
1010 	rp_pu_complete_finalize(conn, ctx);
1011 }
1012 
1013 #if defined(CONFIG_BT_CENTRAL)
rp_pu_send_phy_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1014 static void rp_pu_send_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1015 				      void *param)
1016 {
1017 	if (llcp_rr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx) ||
1018 	    (llcp_rr_get_paused_cmd(conn) == PROC_PHY_UPDATE) ||
1019 	    !ull_is_lll_tx_queue_empty(conn)) {
1020 		ctx->state = RP_PU_STATE_WAIT_TX_PHY_UPDATE_IND;
1021 	} else {
1022 		llcp_rr_set_paused_cmd(conn, PROC_CTE_REQ);
1023 		ctx->tx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND;
1024 		ctx->node_ref.tx = llcp_tx_alloc(conn, ctx);
1025 		rp_pu_tx(conn, ctx, evt, param);
1026 
1027 	}
1028 }
1029 #endif /* CONFIG_BT_CENTRAL */
1030 
1031 #if defined(CONFIG_BT_PERIPHERAL)
rp_pu_send_phy_rsp(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1032 static void rp_pu_send_phy_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
1033 {
1034 	if (llcp_rr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx) ||
1035 	    (llcp_rr_get_paused_cmd(conn) == PROC_PHY_UPDATE)) {
1036 		ctx->state = RP_PU_STATE_WAIT_TX_PHY_RSP;
1037 	} else {
1038 		llcp_rr_set_paused_cmd(conn, PROC_CTE_REQ);
1039 		ctx->tx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_RSP;
1040 		ctx->node_ref.tx = llcp_tx_alloc(conn, ctx);
1041 		rp_pu_tx(conn, ctx, evt, param);
1042 	}
1043 }
1044 #endif /* CONFIG_BT_CENTRAL */
1045 
rp_pu_st_idle(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1046 static void rp_pu_st_idle(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
1047 {
1048 	switch (evt) {
1049 	case RP_PU_EVT_RUN:
1050 		ctx->state = RP_PU_STATE_WAIT_RX_PHY_REQ;
1051 		break;
1052 	default:
1053 		/* Ignore other evts */
1054 		break;
1055 	}
1056 }
1057 
rp_pu_st_wait_rx_phy_req(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1058 static void rp_pu_st_wait_rx_phy_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1059 				     void *param)
1060 {
1061 	llcp_pdu_decode_phy_req(ctx, (struct pdu_data *)param);
1062 	/* Combine with the 'Preferred' the phys in conn->phy_pref_?x */
1063 	pu_combine_phys(conn, ctx, conn->phy_pref_tx, conn->phy_pref_rx);
1064 	llcp_tx_pause_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
1065 
1066 	switch (evt) {
1067 	case RP_PU_EVT_PHY_REQ:
1068 		switch (conn->lll.role) {
1069 #if defined(CONFIG_BT_CENTRAL)
1070 		case BT_HCI_ROLE_CENTRAL:
1071 			/* Mark RX node to NOT release */
1072 			llcp_rx_node_retain(ctx);
1073 			rp_pu_send_phy_update_ind(conn, ctx, evt, param);
1074 			break;
1075 #endif /* CONFIG_BT_CENTRAL */
1076 #if defined(CONFIG_BT_PERIPHERAL)
1077 		case BT_HCI_ROLE_PERIPHERAL:
1078 			rp_pu_send_phy_rsp(conn, ctx, evt, param);
1079 			break;
1080 #endif /* CONFIG_BT_PERIPHERAL */
1081 		default:
1082 			/* Unknown role */
1083 			LL_ASSERT(0);
1084 		}
1085 		break;
1086 	default:
1087 		/* Ignore other evts */
1088 		break;
1089 	}
1090 }
1091 
1092 #if defined(CONFIG_BT_PERIPHERAL)
rp_pu_st_wait_tx_phy_rsp(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1093 static void rp_pu_st_wait_tx_phy_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1094 				     void *param)
1095 {
1096 	switch (evt) {
1097 	case RP_PU_EVT_RUN:
1098 		rp_pu_send_phy_rsp(conn, ctx, evt, param);
1099 		break;
1100 	default:
1101 		/* Ignore other evts */
1102 		break;
1103 	}
1104 }
1105 #endif /* CONFIG_BT_PERIPHERAL */
1106 
rp_pu_st_wait_tx_ack_phy(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1107 static void rp_pu_st_wait_tx_ack_phy(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1108 				     void *param)
1109 {
1110 	switch (evt) {
1111 	case RP_PU_EVT_ACK:
1112 		if (0) {
1113 #if defined(CONFIG_BT_PERIPHERAL)
1114 		} else if (ctx->state == RP_PU_STATE_WAIT_TX_ACK_PHY_RSP) {
1115 			LL_ASSERT(conn->lll.role == BT_HCI_ROLE_PERIPHERAL);
1116 			/* When we act as peripheral apply timing restriction */
1117 			pu_set_timing_restrict(
1118 				conn, pu_select_phy_timing_restrict(conn, ctx->data.pu.tx));
1119 			/* RSP acked, now await update ind from central */
1120 			ctx->state = RP_PU_STATE_WAIT_RX_PHY_UPDATE_IND;
1121 #endif /* CONFIG_BT_PERIPHERAL */
1122 #if defined(CONFIG_BT_CENTRAL)
1123 		} else if (ctx->state == RP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND) {
1124 			LL_ASSERT(conn->lll.role == BT_HCI_ROLE_CENTRAL);
1125 			if (ctx->data.pu.c_to_p_phy || ctx->data.pu.p_to_c_phy) {
1126 				/* UPDATE_IND acked, so lets await instant */
1127 				if (ctx->data.pu.c_to_p_phy) {
1128 					/*
1129 					 * And if central to periph phys changes
1130 					 * apply timining restrictions
1131 					 */
1132 					pu_set_timing_restrict(conn, ctx->data.pu.c_to_p_phy);
1133 				}
1134 				ctx->state = RP_PU_STATE_WAIT_INSTANT;
1135 			} else {
1136 				rp_pu_complete(conn, ctx, evt, param);
1137 			}
1138 #endif /* CONFIG_BT_CENTRAL */
1139 		} else {
1140 			/* empty clause */
1141 		}
1142 		llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
1143 		break;
1144 	default:
1145 		/* Ignore other evts */
1146 		break;
1147 	}
1148 }
1149 
1150 #if defined(CONFIG_BT_CENTRAL)
rp_pu_st_wait_tx_phy_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1151 static void rp_pu_st_wait_tx_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1152 					    void *param)
1153 {
1154 	switch (evt) {
1155 	case RP_PU_EVT_RUN:
1156 		rp_pu_send_phy_update_ind(conn, ctx, evt, param);
1157 		break;
1158 	default:
1159 		/* Ignore other evts */
1160 		break;
1161 	}
1162 }
1163 #endif /* CONFIG_BT_CENTRAL */
1164 
1165 #if defined(CONFIG_BT_PERIPHERAL)
rp_pu_st_wait_rx_phy_update_ind(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1166 static void rp_pu_st_wait_rx_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1167 					    void *param)
1168 {
1169 	switch (evt) {
1170 	case RP_PU_EVT_PHY_UPDATE_IND:
1171 		llcp_pdu_decode_phy_update_ind(ctx, (struct pdu_data *)param);
1172 		const uint8_t end_procedure = pu_check_update_ind(conn, ctx);
1173 
1174 		/* Mark RX node to NOT release */
1175 		llcp_rx_node_retain(ctx);
1176 
1177 		if (!end_procedure) {
1178 			/* Since at least one phy will change,
1179 			 * stop the procedure response timeout
1180 			 */
1181 			llcp_rr_prt_stop(conn);
1182 			ctx->state = RP_PU_STATE_WAIT_INSTANT;
1183 		} else {
1184 			if (ctx->data.pu.error == BT_HCI_ERR_INSTANT_PASSED) {
1185 				/* Mark the connection for termination */
1186 				conn->llcp_terminate.reason_final = BT_HCI_ERR_INSTANT_PASSED;
1187 			}
1188 			rp_pu_complete(conn, ctx, evt, param);
1189 		}
1190 		break;
1191 	default:
1192 		/* Ignore other evts */
1193 		break;
1194 	}
1195 }
1196 #endif /* CONFIG_BT_PERIPHERAL */
1197 
rp_pu_check_instant(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1198 static void rp_pu_check_instant(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1199 				void *param)
1200 {
1201 	if (is_instant_reached_or_passed(ctx->data.pu.instant, ull_conn_event_counter(conn))) {
1202 		ctx->data.pu.error = BT_HCI_ERR_SUCCESS;
1203 		const uint8_t phy_changed = pu_apply_phy_update(conn, ctx);
1204 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1205 		if (phy_changed) {
1206 			ctx->data.pu.ntf_dle = pu_update_eff_times(conn, ctx);
1207 		}
1208 #endif
1209 		/* if PHY settings changed we should generate NTF */
1210 		ctx->data.pu.ntf_pu = phy_changed;
1211 		rp_pu_complete(conn, ctx, evt, param);
1212 	}
1213 }
1214 
rp_pu_st_wait_instant(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1215 static void rp_pu_st_wait_instant(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1216 				  void *param)
1217 {
1218 	switch (evt) {
1219 	case RP_PU_EVT_RUN:
1220 		rp_pu_check_instant(conn, ctx, evt, param);
1221 		break;
1222 	default:
1223 		/* Ignore other evts */
1224 		break;
1225 	}
1226 }
1227 
rp_pu_st_wait_instant_on_air(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1228 static void rp_pu_st_wait_instant_on_air(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1229 					 void *param)
1230 {
1231 	switch (evt) {
1232 	case RP_PU_EVT_NTF:
1233 		rp_pu_tx_ntf(conn, ctx, evt, param);
1234 		break;
1235 	default:
1236 		/* Ignore other evts */
1237 		break;
1238 	}
1239 }
1240 
1241 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
rp_pu_st_wait_ntf_avail(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1242 static void rp_pu_st_wait_ntf_avail(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
1243 				    void *param)
1244 {
1245 	switch (evt) {
1246 	case RP_PU_EVT_RUN:
1247 		rp_pu_tx(conn, ctx, evt, param);
1248 		break;
1249 	default:
1250 		/* Ignore other evts */
1251 		break;
1252 	}
1253 }
1254 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1255 
rp_pu_execute_fsm(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)1256 static void rp_pu_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
1257 {
1258 	switch (ctx->state) {
1259 	case RP_PU_STATE_IDLE:
1260 		rp_pu_st_idle(conn, ctx, evt, param);
1261 		break;
1262 	case RP_PU_STATE_WAIT_RX_PHY_REQ:
1263 		rp_pu_st_wait_rx_phy_req(conn, ctx, evt, param);
1264 		break;
1265 #if defined(CONFIG_BT_PERIPHERAL)
1266 	case RP_PU_STATE_WAIT_TX_PHY_RSP:
1267 		rp_pu_st_wait_tx_phy_rsp(conn, ctx, evt, param);
1268 		break;
1269 	case RP_PU_STATE_WAIT_TX_ACK_PHY_RSP:
1270 		rp_pu_st_wait_tx_ack_phy(conn, ctx, evt, param);
1271 		break;
1272 	case RP_PU_STATE_WAIT_RX_PHY_UPDATE_IND:
1273 		rp_pu_st_wait_rx_phy_update_ind(conn, ctx, evt, param);
1274 		break;
1275 #endif /* CONFIG_BT_PERIPHERAL */
1276 #if defined(CONFIG_BT_CENTRAL)
1277 	case RP_PU_STATE_WAIT_TX_PHY_UPDATE_IND:
1278 		rp_pu_st_wait_tx_phy_update_ind(conn, ctx, evt, param);
1279 		break;
1280 	case RP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND:
1281 		rp_pu_st_wait_tx_ack_phy(conn, ctx, evt, param);
1282 		break;
1283 #endif /* CONFIG_BT_CENTRAL */
1284 	case RP_PU_STATE_WAIT_INSTANT:
1285 		rp_pu_st_wait_instant(conn, ctx, evt, param);
1286 		break;
1287 	case RP_PU_STATE_WAIT_INSTANT_ON_AIR:
1288 		rp_pu_st_wait_instant_on_air(conn, ctx, evt, param);
1289 		break;
1290 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
1291 	case RP_PU_STATE_WAIT_NTF_AVAIL:
1292 		rp_pu_st_wait_ntf_avail(conn, ctx, evt, param);
1293 		break;
1294 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
1295 	default:
1296 		/* Unknown state */
1297 		LL_ASSERT(0);
1298 	}
1299 }
1300 
llcp_rp_pu_rx(struct ll_conn * conn,struct proc_ctx * ctx,struct node_rx_pdu * rx)1301 void llcp_rp_pu_rx(struct ll_conn *conn, struct proc_ctx *ctx, struct node_rx_pdu *rx)
1302 {
1303 	struct pdu_data *pdu = (struct pdu_data *)rx->pdu;
1304 
1305 	switch (pdu->llctrl.opcode) {
1306 	case PDU_DATA_LLCTRL_TYPE_PHY_REQ:
1307 		rp_pu_execute_fsm(conn, ctx, RP_PU_EVT_PHY_REQ, pdu);
1308 		break;
1309 #if defined(CONFIG_BT_PERIPHERAL)
1310 	case PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND:
1311 		rp_pu_execute_fsm(conn, ctx, RP_PU_EVT_PHY_UPDATE_IND, pdu);
1312 		break;
1313 #endif /* CONFIG_BT_PERIPHERAL */
1314 	default:
1315 		/* Invalid behaviour */
1316 		/* Invalid PDU received so terminate connection */
1317 		conn->llcp_terminate.reason_final = BT_HCI_ERR_LMP_PDU_NOT_ALLOWED;
1318 		llcp_rr_complete(conn);
1319 		ctx->state = RP_PU_STATE_IDLE;
1320 		break;
1321 	}
1322 }
1323 
llcp_rp_pu_init_proc(struct proc_ctx * ctx)1324 void llcp_rp_pu_init_proc(struct proc_ctx *ctx)
1325 {
1326 	ctx->state = RP_PU_STATE_IDLE;
1327 }
1328 
llcp_rp_pu_run(struct ll_conn * conn,struct proc_ctx * ctx,void * param)1329 void llcp_rp_pu_run(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
1330 {
1331 	rp_pu_execute_fsm(conn, ctx, RP_PU_EVT_RUN, param);
1332 }
1333 
llcp_rp_pu_tx_ack(struct ll_conn * conn,struct proc_ctx * ctx,void * param)1334 void llcp_rp_pu_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
1335 {
1336 	rp_pu_execute_fsm(conn, ctx, RP_PU_EVT_ACK, param);
1337 }
1338 
llcp_rp_pu_tx_ntf(struct ll_conn * conn,struct proc_ctx * ctx)1339 void llcp_rp_pu_tx_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
1340 {
1341 	rp_pu_execute_fsm(conn, ctx, RP_PU_EVT_NTF, NULL);
1342 }
1343 
llcp_rp_pu_awaiting_instant(struct proc_ctx * ctx)1344 bool llcp_rp_pu_awaiting_instant(struct proc_ctx *ctx)
1345 {
1346 	return (ctx->state == RP_PU_STATE_WAIT_INSTANT);
1347 }
1348