1 /*
2 * Copyright (c) 2024 Demant A/S
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/types.h>
8 #include <zephyr/sys/slist.h>
9
10 #include <zephyr/bluetooth/hci_types.h>
11
12 #include "hal/ccm.h"
13 #include "hal/debug.h"
14
15 #include "util/util.h"
16 #include "util/memq.h"
17 #include "util/dbuf.h"
18
19 #include "pdu_df.h"
20 #include "lll/pdu_vendor.h"
21 #include "pdu.h"
22
23 #include "lll.h"
24 #include "lll/lll_df_types.h"
25 #include "lll_filter.h"
26 #include "lll_scan.h"
27 #include "lll_sync.h"
28 #include "lll_sync_iso.h"
29 #include "lll_conn.h"
30 #include "lll_conn_iso.h"
31
32 #include "ull_tx_queue.h"
33
34 #include "isoal.h"
35 #include "ull_scan_types.h"
36 #include "ull_sync_types.h"
37 #include "ull_iso_types.h"
38 #include "ull_conn_types.h"
39 #include "ull_conn_iso_types.h"
40
41 #include "ll_settings.h"
42 #include "ll_feat.h"
43
44 #include "ull_llcp.h"
45 #include "ull_llcp_features.h"
46
47 #include "ull_internal.h"
48 #include "ull_sync_internal.h"
49 #include "ull_conn_internal.h"
50 #include "ull_llcp_internal.h"
51
52 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER)
53 /* LLCP Remote Procedure FSM states */
54 enum {
55 RP_PAST_STATE_IDLE,
56 RP_PAST_STATE_WAIT_RX,
57 RP_PAST_STATE_WAIT_NEXT_EVT,
58 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
59 RP_PAST_STATE_WAIT_RESOLVE_INTERFACE_AVAIL,
60 RP_PAST_STATE_WAIT_RESOLVE_COMPLETE,
61 #endif /* CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
62 };
63
64 /* LLCP Remote Procedure PAST (receiver) FSM events */
65 enum {
66 /* Procedure run */
67 RP_PAST_EVT_RUN,
68
69 /* IND received */
70 RP_PAST_EVT_RX,
71
72 /* RPA resolve completed */
73 RP_PAST_EVT_RESOLVED,
74 };
75
76 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
77 /* Active connection for RPA resolve */
78 static struct ll_conn *rp_past_resolve_conn;
79 #endif /* CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
80
rp_check_phy(struct ll_conn * conn,struct proc_ctx * ctx,struct pdu_data * pdu)81 static uint8_t rp_check_phy(struct ll_conn *conn, struct proc_ctx *ctx,
82 struct pdu_data *pdu)
83 {
84 if (!phy_valid(pdu->llctrl.periodic_sync_ind.phy)) {
85 /* zero, more than one or any rfu bit selected in either phy */
86 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
87 }
88
89 #if defined(CONFIG_BT_CTLR_PHY)
90 const uint8_t phy = pdu->llctrl.periodic_sync_ind.phy;
91
92 if (((phy & PHY_2M) && !IS_ENABLED(CONFIG_BT_CTLR_PHY_2M)) ||
93 ((phy & PHY_CODED) && !IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED))) {
94 /* Unsupported phy selected */
95 return BT_HCI_ERR_UNSUPP_FEATURE_PARAM_VAL;
96 }
97 #endif /* CONFIG_BT_CTLR_PHY */
98
99 return BT_HCI_ERR_SUCCESS;
100 }
101
rp_past_complete(struct ll_conn * conn,struct proc_ctx * ctx)102 static void rp_past_complete(struct ll_conn *conn, struct proc_ctx *ctx)
103 {
104 llcp_rr_complete(conn);
105 ctx->state = RP_PAST_STATE_IDLE;
106 }
107
rp_past_st_idle(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)108 static void rp_past_st_idle(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
109 {
110 switch (evt) {
111 case RP_PAST_EVT_RUN:
112 ctx->state = RP_PAST_STATE_WAIT_RX;
113 break;
114 default:
115 /* Ignore other evts */
116 break;
117 }
118 }
119
120 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
rp_past_resolve_cb(void * param)121 static void rp_past_resolve_cb(void *param)
122 {
123 uint8_t rl_idx = (uint8_t)(uint32_t)param;
124 struct ll_conn *conn = rp_past_resolve_conn;
125 struct proc_ctx *ctx;
126 uint8_t id_addr_type;
127
128 /* Release resolving interface */
129 rp_past_resolve_conn = NULL;
130
131 ctx = llcp_rr_peek(conn);
132 if (!ctx) {
133 /* No context - possibly due to connection termination or
134 * other cause of procedure completion.
135 */
136 return;
137 }
138
139 if (ctx->state != RP_PAST_STATE_WAIT_RESOLVE_COMPLETE) {
140 /* Wrong state - possibly due to connection termination or
141 * other cause of procedure completion.
142 */
143 return;
144 }
145
146 /* If resolve failed then just continue in next event using the RPA */
147 if (rl_idx != FILTER_IDX_NONE) {
148 const bt_addr_t *id_addr;
149
150 id_addr = ull_filter_lll_id_addr_get(rl_idx, &id_addr_type);
151 if (id_addr) {
152 memcpy(&ctx->data.periodic_sync.adv_addr, &id_addr->val, sizeof(bt_addr_t));
153
154 ctx->data.periodic_sync.addr_type = id_addr_type;
155 ctx->data.periodic_sync.addr_resolved = 1U;
156 }
157 }
158
159 /* Let sync creation continue in next event */
160 ctx->state = RP_PAST_STATE_WAIT_NEXT_EVT;
161 }
162
rp_past_addr_resolve(struct ll_conn * conn,struct proc_ctx * ctx)163 static bool rp_past_addr_resolve(struct ll_conn *conn, struct proc_ctx *ctx)
164 {
165 bt_addr_t adv_addr;
166
167 if (rp_past_resolve_conn) {
168 /* Resolve interface busy */
169 return false;
170 }
171
172 (void)memcpy(&adv_addr.val, ctx->data.periodic_sync.adv_addr, sizeof(bt_addr_t));
173 rp_past_resolve_conn = conn;
174
175 if (ull_filter_deferred_resolve(&adv_addr, rp_past_resolve_cb)) {
176 /* Resolve initiated - wait for callback */
177 return true;
178 }
179
180 /* Resolve interface busy (in ull_filter) */
181 rp_past_resolve_conn = NULL;
182 return false;
183 }
184
rp_past_st_wait_resolve_if(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)185 static void rp_past_st_wait_resolve_if(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
186 void *param)
187 {
188 switch (evt) {
189 case RP_PAST_EVT_RUN:
190 if (rp_past_addr_resolve(conn, ctx)) {
191 ctx->state = RP_PAST_STATE_WAIT_RESOLVE_COMPLETE;
192 }
193 break;
194 default:
195 /* Ignore other evts */
196 break;
197 }
198 }
199 #endif /* CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
200
rp_past_st_wait_rx(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)201 static void rp_past_st_wait_rx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
202 void *param)
203 {
204 struct pdu_data *pdu = (struct pdu_data *)param;
205
206 switch (evt) {
207 case RP_PAST_EVT_RX:
208 llcp_pdu_decode_periodic_sync_ind(ctx, pdu);
209
210 /* Check PHY */
211 if (rp_check_phy(conn, ctx, pdu) != BT_HCI_ERR_SUCCESS) {
212 /* Invalid PHY - ignore and silently complete */
213 rp_past_complete(conn, ctx);
214 return;
215 }
216
217 ctx->data.periodic_sync.addr_resolved = 0U;
218
219 if (ctx->data.periodic_sync.addr_type == BT_ADDR_LE_RANDOM) {
220 /* TODO: For efficiency, check if address resolve is needed */
221 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
222 if (ull_filter_lll_rl_enabled()) {
223 if (rp_past_addr_resolve(conn, ctx)) {
224 ctx->state = RP_PAST_STATE_WAIT_RESOLVE_COMPLETE;
225 return;
226 }
227 ctx->state = RP_PAST_STATE_WAIT_RESOLVE_INTERFACE_AVAIL;
228 return;
229 }
230 #else
231 /* TODO: Not implemented - use RPA */
232 #endif /* CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
233 }
234
235 if (IS_ENABLED(CONFIG_BT_PERIPHERAL)) {
236 /* If sync_conn_event_count is this connection event,
237 * we have to wait for drift correction for this event to be applied -
238 * continue processing in the next conn event
239 */
240 if (conn->lll.role == BT_HCI_ROLE_PERIPHERAL &&
241 ctx->data.periodic_sync.sync_conn_event_count ==
242 ull_conn_event_counter(conn)) {
243 ctx->state = RP_PAST_STATE_WAIT_NEXT_EVT;
244 return;
245 }
246 }
247
248 /* Hand over to ULL */
249 ull_sync_transfer_received(conn,
250 ctx->data.periodic_sync.id,
251 &ctx->data.periodic_sync.sync_info,
252 ctx->data.periodic_sync.conn_event_count,
253 ctx->data.periodic_sync.last_pa_event_counter,
254 ctx->data.periodic_sync.sid,
255 ctx->data.periodic_sync.addr_type,
256 ctx->data.periodic_sync.sca,
257 ctx->data.periodic_sync.phy,
258 ctx->data.periodic_sync.adv_addr,
259 ctx->data.periodic_sync.sync_conn_event_count,
260 ctx->data.periodic_sync.addr_resolved);
261 rp_past_complete(conn, ctx);
262 break;
263 default:
264 /* Ignore other evts */
265 break;
266 }
267 }
268
rp_past_st_wait_next_evt(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)269 static void rp_past_st_wait_next_evt(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
270 void *param)
271 {
272 switch (evt) {
273 case RP_PAST_EVT_RUN:
274 #if defined(CONFIG_BT_PERIPHERAL)
275 /* If sync_conn_event_count is this connection event,
276 * we have to wait for drift correction for this event to be applied -
277 * continue processing in the next conn event
278 */
279 if (conn->lll.role == BT_HCI_ROLE_PERIPHERAL &&
280 ctx->data.periodic_sync.sync_conn_event_count == ull_conn_event_counter(conn)) {
281 return;
282 }
283 #endif /* CONFIG_BT_PERIPHERAL */
284
285 /* Hand over to ULL */
286 ull_sync_transfer_received(conn,
287 ctx->data.periodic_sync.id,
288 &ctx->data.periodic_sync.sync_info,
289 ctx->data.periodic_sync.conn_event_count,
290 ctx->data.periodic_sync.last_pa_event_counter,
291 ctx->data.periodic_sync.sid,
292 ctx->data.periodic_sync.addr_type,
293 ctx->data.periodic_sync.sca,
294 ctx->data.periodic_sync.phy,
295 ctx->data.periodic_sync.adv_addr,
296 ctx->data.periodic_sync.sync_conn_event_count,
297 ctx->data.periodic_sync.addr_resolved);
298 rp_past_complete(conn, ctx);
299 break;
300 default:
301 /* Ignore other evts */
302 break;
303 }
304 }
305
rp_past_execute_fsm(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)306 static void rp_past_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
307 void *param)
308 {
309 switch (ctx->state) {
310 case RP_PAST_STATE_IDLE:
311 rp_past_st_idle(conn, ctx, evt, param);
312 break;
313 case RP_PAST_STATE_WAIT_RX:
314 rp_past_st_wait_rx(conn, ctx, evt, param);
315 break;
316 case RP_PAST_STATE_WAIT_NEXT_EVT:
317 rp_past_st_wait_next_evt(conn, ctx, evt, param);
318 break;
319 #if defined(CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY)
320 case RP_PAST_STATE_WAIT_RESOLVE_INTERFACE_AVAIL:
321 rp_past_st_wait_resolve_if(conn, ctx, evt, param);
322 break;
323 case RP_PAST_STATE_WAIT_RESOLVE_COMPLETE:
324 /* Waiting for callback - do nothing */
325 break;
326 #endif /* CONFIG_BT_CTLR_SW_DEFERRED_PRIVACY */
327 default:
328 /* Unknown state */
329 LL_ASSERT(0);
330 break;
331 }
332 }
333
llcp_rp_past_rx(struct ll_conn * conn,struct proc_ctx * ctx,struct node_rx_pdu * rx)334 void llcp_rp_past_rx(struct ll_conn *conn, struct proc_ctx *ctx, struct node_rx_pdu *rx)
335 {
336 rp_past_execute_fsm(conn, ctx, RP_PAST_EVT_RX, rx->pdu);
337 }
338
llcp_rp_past_run(struct ll_conn * conn,struct proc_ctx * ctx,void * param)339 void llcp_rp_past_run(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
340 {
341 rp_past_execute_fsm(conn, ctx, RP_PAST_EVT_RUN, param);
342 }
343
344 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_RECEIVER */
345
346 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
347 /* LLCP Local Procedure FSM states */
348 enum {
349 LP_PAST_STATE_IDLE,
350 LP_PAST_STATE_WAIT_TX_REQ,
351 LP_PAST_STATE_WAIT_OFFSET_CALC,
352 LP_PAST_STATE_WAIT_TX_ACK,
353 LP_PAST_STATE_WAIT_EVT_DONE,
354 };
355
356 /* LLCP Local Procedure PAST (sender) FSM events */
357 enum {
358 /* Procedure run */
359 LP_PAST_EVT_RUN,
360
361 /* Offset calculation reply received */
362 LP_PAST_EVT_OFFSET_CALC_REPLY,
363
364 /* RX received in connection event */
365 LP_PAST_EVT_RX_RECEIVED,
366
367 /* RX not received in connection event */
368 LP_PAST_EVT_NO_RX_RECEIVED,
369
370 /* Ack received */
371 LP_PAST_EVT_ACK,
372 };
373
374 static void lp_past_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
375 void *param);
376
lp_past_complete(struct ll_conn * conn,struct proc_ctx * ctx)377 static void lp_past_complete(struct ll_conn *conn, struct proc_ctx *ctx)
378 {
379 llcp_lr_complete(conn);
380 ctx->state = LP_PAST_STATE_IDLE;
381 }
382
lp_past_tx(struct ll_conn * conn,struct proc_ctx * ctx)383 static void lp_past_tx(struct ll_conn *conn, struct proc_ctx *ctx)
384 {
385 struct node_tx *tx;
386 struct pdu_data *pdu;
387
388 if (IS_ENABLED(CONFIG_BT_PERIPHERAL)) {
389 if (conn->lll.role == BT_HCI_ROLE_PERIPHERAL) {
390 uint32_t offset_us;
391
392 /* Correct offset can be calculated now that we know the event start time */
393 offset_us = ctx->data.periodic_sync.offset_us -
394 ctx->data.periodic_sync.conn_start_to_actual_us;
395
396 llcp_pdu_fill_sync_info_offset(&ctx->data.periodic_sync.sync_info,
397 offset_us);
398 }
399 }
400
401 /* Allocate tx node */
402 tx = llcp_tx_alloc(conn, ctx);
403 LL_ASSERT(tx);
404
405 pdu = (struct pdu_data *)tx->pdu;
406
407 /* Encode LL Control PDU */
408 llcp_pdu_encode_periodic_sync_ind(ctx, pdu);
409
410 /* Enqueue LL Control PDU towards LLL */
411 llcp_tx_enqueue(conn, tx);
412
413 /* Wait for TX Ack */
414 ctx->state = LP_PAST_STATE_WAIT_TX_ACK;
415 ctx->node_ref.tx_ack = tx;
416 }
417
llcp_lp_past_offset_calc_reply(struct ll_conn * conn,struct proc_ctx * ctx)418 void llcp_lp_past_offset_calc_reply(struct ll_conn *conn, struct proc_ctx *ctx)
419 {
420 lp_past_execute_fsm(conn, ctx, LP_PAST_EVT_OFFSET_CALC_REPLY, NULL);
421 }
422
lp_past_offset_calc_req(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)423 static void lp_past_offset_calc_req(struct ll_conn *conn, struct proc_ctx *ctx,
424 uint8_t evt, void *param)
425 {
426 if (llcp_lr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx)) {
427 ctx->state = LP_PAST_STATE_WAIT_TX_REQ;
428 } else {
429 /* Call ULL and wait for reply */
430 ctx->state = LP_PAST_STATE_WAIT_OFFSET_CALC;
431 ull_conn_past_sender_offset_request(conn);
432 }
433 }
434
lp_past_st_wait_tx_req(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)435 static void lp_past_st_wait_tx_req(struct ll_conn *conn, struct proc_ctx *ctx,
436 uint8_t evt, void *param)
437 {
438 switch (evt) {
439 case LP_PAST_EVT_RUN:
440 lp_past_offset_calc_req(conn, ctx, evt, param);
441 break;
442 default:
443 /* Ignore other evts */
444 break;
445 }
446 }
447
lp_past_st_wait_offset_calc(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)448 static void lp_past_st_wait_offset_calc(struct ll_conn *conn, struct proc_ctx *ctx,
449 uint8_t evt, void *param)
450 {
451 switch (evt) {
452 case LP_PAST_EVT_OFFSET_CALC_REPLY:
453 if (ull_ref_get(&conn->ull)) {
454 /* Connection event still ongoing, wait for done */
455 ctx->state = LP_PAST_STATE_WAIT_EVT_DONE;
456 } else if (ctx->data.periodic_sync.conn_evt_trx) {
457 /* Connection event done with successful rx from peer */
458 lp_past_tx(conn, ctx);
459 } else {
460 /* Reset state and try again in next connection event */
461 ctx->state = LP_PAST_STATE_WAIT_TX_REQ;
462 }
463 break;
464 default:
465 /* Ignore other evts */
466 break;
467 }
468 }
469
llcp_lp_past_tx_ack(struct ll_conn * conn,struct proc_ctx * ctx,struct node_tx * tx)470 void llcp_lp_past_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, struct node_tx *tx)
471 {
472 lp_past_execute_fsm(conn, ctx, LP_PAST_EVT_ACK, tx->pdu);
473 }
474
llcp_lp_past_conn_evt_done(struct ll_conn * conn,struct proc_ctx * ctx)475 void llcp_lp_past_conn_evt_done(struct ll_conn *conn, struct proc_ctx *ctx)
476 {
477 if (ctx->data.periodic_sync.conn_evt_trx != 0) {
478 lp_past_execute_fsm(conn, ctx, LP_PAST_EVT_RX_RECEIVED, NULL);
479 } else {
480 lp_past_execute_fsm(conn, ctx, LP_PAST_EVT_NO_RX_RECEIVED, NULL);
481 }
482 }
483
lp_past_state_wait_evt_done(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)484 static void lp_past_state_wait_evt_done(struct ll_conn *conn, struct proc_ctx *ctx,
485 uint8_t evt, void *param)
486 {
487 /* Offset calculation has to be done in a connection event where a packet
488 * was received from the peer.
489 * From Core Spec v5.4, Vol 6, Part B, Section 2.4.2.27:
490 * syncConnEventCount shall be set to the connection event counter for the
491 * connection event that the sending device used in determining the contents
492 * of this PDU. This shall be a connection event where the sending device
493 * received a packet from the device it will send the LL_PERIODIC_SYNC_-
494 * IND PDU to and, if the sending device is the Peripheral on the piconet con-
495 * taining those two devices, it used the received packet to synchronize its
496 * anchor point
497 */
498 switch (evt) {
499 case LP_PAST_EVT_RX_RECEIVED:
500 lp_past_tx(conn, ctx);
501 break;
502 case LP_PAST_EVT_NO_RX_RECEIVED:
503 /* Try again in next connection event */
504 ctx->state = LP_PAST_STATE_WAIT_TX_REQ;
505 break;
506 }
507 }
508
lp_past_st_idle(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)509 static void lp_past_st_idle(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
510 {
511 switch (evt) {
512 case LP_PAST_EVT_RUN:
513 /* In case feature exchange completed after past was enqueued
514 * peer PAST receiver support should be confirmed
515 */
516 if (feature_peer_periodic_sync_recv(conn)) {
517 lp_past_offset_calc_req(conn, ctx, evt, param);
518 } else {
519 /* Peer doesn't support PAST Receiver; HCI gives us no way to
520 * indicate this to the host so just silently complete the procedure
521 */
522 lp_past_complete(conn, ctx);
523 }
524 break;
525 default:
526 /* Ignore other evts */
527 break;
528 }
529 }
530
lp_past_st_wait_tx_ack(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)531 static void lp_past_st_wait_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
532 void *param)
533 {
534 switch (evt) {
535 case LP_PAST_EVT_ACK:
536 /* Received Ack - All done now */
537 lp_past_complete(conn, ctx);
538 break;
539 default:
540 /* Ignore other evts */
541 break;
542 }
543 }
544
lp_past_execute_fsm(struct ll_conn * conn,struct proc_ctx * ctx,uint8_t evt,void * param)545 static void lp_past_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
546 void *param)
547 {
548 switch (ctx->state) {
549 case LP_PAST_STATE_IDLE:
550 lp_past_st_idle(conn, ctx, evt, param);
551 break;
552 case LP_PAST_STATE_WAIT_TX_REQ:
553 lp_past_st_wait_tx_req(conn, ctx, evt, param);
554 break;
555 case LP_PAST_STATE_WAIT_OFFSET_CALC:
556 lp_past_st_wait_offset_calc(conn, ctx, evt, param);
557 break;
558 case LP_PAST_STATE_WAIT_TX_ACK:
559 lp_past_st_wait_tx_ack(conn, ctx, evt, param);
560 break;
561 case LP_PAST_STATE_WAIT_EVT_DONE:
562 lp_past_state_wait_evt_done(conn, ctx, evt, param);
563 break;
564 default:
565 /* Unknown state */
566 LL_ASSERT(0);
567 break;
568 }
569 }
570
llcp_lp_past_run(struct ll_conn * conn,struct proc_ctx * ctx,void * param)571 void llcp_lp_past_run(struct ll_conn *conn, struct proc_ctx *ctx, void *param)
572 {
573 lp_past_execute_fsm(conn, ctx, LP_PAST_EVT_RUN, param);
574 }
575 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
576