1 /*
2 * Copyright (c) 2020 Demant
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8
9 #include <zephyr/sys/byteorder.h>
10 #include <zephyr/sys/slist.h>
11 #include <zephyr/sys/util.h>
12
13 #include <zephyr/bluetooth/hci_types.h>
14
15 #include "hal/ccm.h"
16
17 #include "util/util.h"
18 #include "util/mem.h"
19 #include "util/memq.h"
20 #include "util/mayfly.h"
21 #include "util/dbuf.h"
22
23 #include "pdu_df.h"
24 #include "lll/pdu_vendor.h"
25 #include "pdu.h"
26
27 #include "ll.h"
28 #include "ll_settings.h"
29
30 #include "lll.h"
31 #include "lll/lll_df_types.h"
32 #include "lll_conn.h"
33 #include "lll_conn_iso.h"
34
35 #include "ull_tx_queue.h"
36
37 #include "isoal.h"
38 #include "ull_internal.h"
39 #include "ull_iso_types.h"
40 #include "ull_conn_iso_types.h"
41 #include "ull_conn_iso_internal.h"
42
43 #include "ull_conn_types.h"
44 #include "ull_llcp.h"
45 #include "ull_llcp_internal.h"
46 #include "ull_conn_internal.h"
47
48 #include <soc.h>
49 #include "hal/debug.h"
50
51 static struct proc_ctx *lr_dequeue(struct ll_conn *conn);
52
53 /* LLCP Local Request FSM State */
54 enum lr_state {
55 LR_STATE_IDLE,
56 LR_STATE_ACTIVE,
57 LR_STATE_DISCONNECT,
58 LR_STATE_TERMINATE,
59 };
60
61 /* LLCP Local Request FSM Event */
62 enum {
63 /* Procedure run */
64 LR_EVT_RUN,
65
66 /* Procedure completed */
67 LR_EVT_COMPLETE,
68
69 /* Link connected */
70 LR_EVT_CONNECT,
71
72 /* Link disconnected */
73 LR_EVT_DISCONNECT,
74 };
75
llcp_lr_check_done(struct ll_conn * conn,struct proc_ctx * ctx)76 void llcp_lr_check_done(struct ll_conn *conn, struct proc_ctx *ctx)
77 {
78 if (ctx->done) {
79 struct proc_ctx *ctx_header;
80
81 ctx_header = llcp_lr_peek(conn);
82 LL_ASSERT(ctx_header == ctx);
83
84 /* If we have a node rx it must not be marked RETAIN as
85 * the memory referenced would leak
86 */
87 LL_ASSERT(ctx->node_ref.rx == NULL ||
88 ctx->node_ref.rx->hdr.type != NODE_RX_TYPE_RETAIN);
89
90 lr_dequeue(conn);
91
92 llcp_proc_ctx_release(ctx);
93 }
94 }
95
96 /*
97 * LLCP Local Request Shared Data Locking
98 */
99
shared_data_access_lock(void)100 static ALWAYS_INLINE uint32_t shared_data_access_lock(void)
101 {
102 bool enabled;
103
104 if (mayfly_is_running()) {
105 /* We are in Mayfly context, nothing to be done */
106 return false;
107 }
108
109 /* We are in thread context and have to disable TICKER_USER_ID_ULL_HIGH */
110 enabled = mayfly_is_enabled(TICKER_USER_ID_THREAD, TICKER_USER_ID_ULL_HIGH) != 0U;
111 mayfly_enable(TICKER_USER_ID_THREAD, TICKER_USER_ID_ULL_HIGH, 0U);
112
113 return enabled;
114 }
115
shared_data_access_unlock(bool key)116 static ALWAYS_INLINE void shared_data_access_unlock(bool key)
117 {
118 if (key) {
119 /* We are in thread context and have to reenable TICKER_USER_ID_ULL_HIGH */
120 mayfly_enable(TICKER_USER_ID_THREAD, TICKER_USER_ID_ULL_HIGH, 1U);
121 }
122 }
123
124 /*
125 * LLCP Local Request FSM
126 */
127
lr_set_state(struct ll_conn * conn,enum lr_state state)128 static void lr_set_state(struct ll_conn *conn, enum lr_state state)
129 {
130 conn->llcp.local.state = state;
131 }
132
llcp_lr_enqueue(struct ll_conn * conn,struct proc_ctx * ctx)133 void llcp_lr_enqueue(struct ll_conn *conn, struct proc_ctx *ctx)
134 {
135 /* This function is called from both Thread and Mayfly (ISR),
136 * make sure only a single context have access at a time.
137 */
138
139 bool key = shared_data_access_lock();
140
141 sys_slist_append(&conn->llcp.local.pend_proc_list, &ctx->node);
142
143 shared_data_access_unlock(key);
144 }
145
lr_dequeue(struct ll_conn * conn)146 static struct proc_ctx *lr_dequeue(struct ll_conn *conn)
147 {
148 /* This function is called from both Thread and Mayfly (ISR),
149 * make sure only a single context have access at a time.
150 */
151
152 struct proc_ctx *ctx;
153
154 bool key = shared_data_access_lock();
155
156 ctx = (struct proc_ctx *)sys_slist_get(&conn->llcp.local.pend_proc_list);
157
158 shared_data_access_unlock(key);
159
160 return ctx;
161 }
162
llcp_lr_peek(struct ll_conn * conn)163 struct proc_ctx *llcp_lr_peek(struct ll_conn *conn)
164 {
165 /* This function is called from both Thread and Mayfly (ISR),
166 * make sure only a single context have access at a time.
167 */
168 struct proc_ctx *ctx;
169
170 bool key = shared_data_access_lock();
171
172 ctx = (struct proc_ctx *)sys_slist_peek_head(&conn->llcp.local.pend_proc_list);
173
174 shared_data_access_unlock(key);
175
176 return ctx;
177 }
178
llcp_lr_peek_proc(struct ll_conn * conn,uint8_t proc)179 struct proc_ctx *llcp_lr_peek_proc(struct ll_conn *conn, uint8_t proc)
180 {
181 /* This function is called from both Thread and Mayfly (ISR),
182 * make sure only a single context have access at a time.
183 */
184
185 struct proc_ctx *ctx, *tmp;
186
187 bool key = shared_data_access_lock();
188
189 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&conn->llcp.local.pend_proc_list, ctx, tmp, node) {
190 if (ctx->proc == proc) {
191 break;
192 }
193 }
194
195 shared_data_access_unlock(key);
196
197 return ctx;
198 }
199
llcp_lr_ispaused(struct ll_conn * conn)200 bool llcp_lr_ispaused(struct ll_conn *conn)
201 {
202 return conn->llcp.local.pause == 1U;
203 }
204
llcp_lr_pause(struct ll_conn * conn)205 void llcp_lr_pause(struct ll_conn *conn)
206 {
207 conn->llcp.local.pause = 1U;
208 }
209
llcp_lr_resume(struct ll_conn * conn)210 void llcp_lr_resume(struct ll_conn *conn)
211 {
212 conn->llcp.local.pause = 0U;
213 }
214
llcp_lr_prt_restart(struct ll_conn * conn)215 void llcp_lr_prt_restart(struct ll_conn *conn)
216 {
217 conn->llcp.local.prt_expire = conn->llcp.prt_reload;
218 }
219
llcp_lr_prt_restart_with_value(struct ll_conn * conn,uint16_t value)220 void llcp_lr_prt_restart_with_value(struct ll_conn *conn, uint16_t value)
221 {
222 conn->llcp.local.prt_expire = value;
223 }
224
llcp_lr_prt_stop(struct ll_conn * conn)225 void llcp_lr_prt_stop(struct ll_conn *conn)
226 {
227 conn->llcp.local.prt_expire = 0U;
228 }
229
llcp_lr_flush_procedures(struct ll_conn * conn)230 void llcp_lr_flush_procedures(struct ll_conn *conn)
231 {
232 struct proc_ctx *ctx;
233
234 /* Flush all pending procedures */
235 ctx = lr_dequeue(conn);
236 while (ctx) {
237 llcp_nodes_release(conn, ctx);
238 llcp_proc_ctx_release(ctx);
239 ctx = lr_dequeue(conn);
240 }
241 }
242
llcp_lr_rx(struct ll_conn * conn,struct proc_ctx * ctx,memq_link_t * link,struct node_rx_pdu * rx)243 void llcp_lr_rx(struct ll_conn *conn, struct proc_ctx *ctx, memq_link_t *link,
244 struct node_rx_pdu *rx)
245 {
246 /* In the case of a specific connection update procedure collision it can occur that
247 * an 'unexpected' REJECT_IND_PDU is received and passed as RX'ed and will then result in
248 * discarding of the retention of the previously received CONNECTION_UPDATE_IND
249 * and following this, an assert will be hit when attempting to use this retained
250 * RX node for creating the notification on completion of connection param request.
251 * (see comment in ull_llcp_conn_upd.c::lp_cu_st_wait_instant() for more details)
252 *
253 * The workaround/fix for this is to only store an RX node for retention if
254 * 'we havent already' got one
255 */
256 if (!ctx->node_ref.rx) {
257 /* Store RX node and link */
258 ctx->node_ref.rx = rx;
259 ctx->node_ref.link = link;
260 }
261
262 switch (ctx->proc) {
263 #if defined(CONFIG_BT_CTLR_LE_PING)
264 case PROC_LE_PING:
265 llcp_lp_comm_rx(conn, ctx, rx);
266 break;
267 #endif /* CONFIG_BT_CTLR_LE_PING */
268 case PROC_FEATURE_EXCHANGE:
269 llcp_lp_comm_rx(conn, ctx, rx);
270 break;
271 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
272 case PROC_MIN_USED_CHANS:
273 llcp_lp_comm_rx(conn, ctx, rx);
274 break;
275 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */
276 case PROC_VERSION_EXCHANGE:
277 llcp_lp_comm_rx(conn, ctx, rx);
278 break;
279 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_CENTRAL)
280 case PROC_ENCRYPTION_START:
281 case PROC_ENCRYPTION_PAUSE:
282 llcp_lp_enc_rx(conn, ctx, rx);
283 break;
284 #endif /* CONFIG_BT_CTLR_LE_ENC && CONFIG_BT_CENTRAL */
285 #ifdef CONFIG_BT_CTLR_PHY
286 case PROC_PHY_UPDATE:
287 llcp_lp_pu_rx(conn, ctx, rx);
288 break;
289 #endif /* CONFIG_BT_CTLR_PHY */
290 case PROC_CONN_UPDATE:
291 case PROC_CONN_PARAM_REQ:
292 llcp_lp_cu_rx(conn, ctx, rx);
293 break;
294 case PROC_TERMINATE:
295 llcp_lp_comm_rx(conn, ctx, rx);
296 break;
297 #if defined(CONFIG_BT_CENTRAL)
298 case PROC_CHAN_MAP_UPDATE:
299 llcp_lp_chmu_rx(conn, ctx, rx);
300 break;
301 #endif /* CONFIG_BT_CENTRAL */
302 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
303 case PROC_DATA_LENGTH_UPDATE:
304 llcp_lp_comm_rx(conn, ctx, rx);
305 break;
306 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
307 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
308 case PROC_CTE_REQ:
309 llcp_lp_comm_rx(conn, ctx, rx);
310 break;
311 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
312 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
313 case PROC_CIS_TERMINATE:
314 llcp_lp_comm_rx(conn, ctx, rx);
315 break;
316 #endif /* defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) */
317 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
318 case PROC_CIS_CREATE:
319 llcp_lp_cc_rx(conn, ctx, rx);
320 break;
321 #endif /* defined(CONFIG_BT_CTLR_CENTRAL_ISO) */
322 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
323 case PROC_SCA_UPDATE:
324 llcp_lp_comm_rx(conn, ctx, rx);
325 break;
326 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
327 default:
328 /* Unknown procedure */
329 LL_ASSERT(0);
330 break;
331 }
332
333 /* If rx node was not retained clear reference */
334 if (ctx->node_ref.rx && ctx->node_ref.rx->hdr.type != NODE_RX_TYPE_RETAIN) {
335 ctx->node_ref.rx = NULL;
336 }
337
338 llcp_lr_check_done(conn, ctx);
339 }
340
llcp_lr_tx_ack(struct ll_conn * conn,struct proc_ctx * ctx,struct node_tx * tx)341 void llcp_lr_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, struct node_tx *tx)
342 {
343 switch (ctx->proc) {
344 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
345 case PROC_MIN_USED_CHANS:
346 llcp_lp_comm_tx_ack(conn, ctx, tx);
347 break;
348 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */
349 case PROC_TERMINATE:
350 llcp_lp_comm_tx_ack(conn, ctx, tx);
351 break;
352 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
353 case PROC_DATA_LENGTH_UPDATE:
354 llcp_lp_comm_tx_ack(conn, ctx, tx);
355 break;
356 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
357 #ifdef CONFIG_BT_CTLR_PHY
358 case PROC_PHY_UPDATE:
359 llcp_lp_pu_tx_ack(conn, ctx, tx);
360 break;
361 #endif /* CONFIG_BT_CTLR_PHY */
362 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
363 case PROC_CIS_TERMINATE:
364 llcp_lp_comm_tx_ack(conn, ctx, tx);
365 break;
366 #endif /* defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) */
367 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
368 case PROC_PERIODIC_SYNC:
369 llcp_lp_past_tx_ack(conn, ctx, tx);
370 break;
371 #endif /* defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER) */
372 default:
373 break;
374 /* Ignore tx_ack */
375 }
376
377 /* Clear TX node reference */
378 ctx->node_ref.tx_ack = NULL;
379
380 llcp_lr_check_done(conn, ctx);
381 }
382
llcp_lr_tx_ntf(struct ll_conn * conn,struct proc_ctx * ctx)383 void llcp_lr_tx_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
384 {
385 switch (ctx->proc) {
386 #if defined(CONFIG_BT_CTLR_PHY)
387 case PROC_PHY_UPDATE:
388 llcp_lp_pu_tx_ntf(conn, ctx);
389 break;
390 #endif /* CONFIG_BT_CTLR_PHY */
391 default:
392 /* Ignore other procedures */
393 break;
394 }
395
396 llcp_lr_check_done(conn, ctx);
397 }
398
lr_act_run(struct ll_conn * conn)399 static void lr_act_run(struct ll_conn *conn)
400 {
401 struct proc_ctx *ctx;
402
403 ctx = llcp_lr_peek(conn);
404
405 switch (ctx->proc) {
406 #if defined(CONFIG_BT_CTLR_LE_PING)
407 case PROC_LE_PING:
408 llcp_lp_comm_run(conn, ctx, NULL);
409 break;
410 #endif /* CONFIG_BT_CTLR_LE_PING */
411 case PROC_FEATURE_EXCHANGE:
412 llcp_lp_comm_run(conn, ctx, NULL);
413 break;
414 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
415 case PROC_MIN_USED_CHANS:
416 llcp_lp_comm_run(conn, ctx, NULL);
417 break;
418 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */
419 case PROC_VERSION_EXCHANGE:
420 llcp_lp_comm_run(conn, ctx, NULL);
421 break;
422 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_CENTRAL)
423 case PROC_ENCRYPTION_START:
424 case PROC_ENCRYPTION_PAUSE:
425 llcp_lp_enc_run(conn, ctx, NULL);
426 break;
427 #endif /* CONFIG_BT_CTLR_LE_ENC && CONFIG_BT_CENTRAL */
428 #ifdef CONFIG_BT_CTLR_PHY
429 case PROC_PHY_UPDATE:
430 llcp_lp_pu_run(conn, ctx, NULL);
431 break;
432 #endif /* CONFIG_BT_CTLR_PHY */
433 case PROC_CONN_UPDATE:
434 case PROC_CONN_PARAM_REQ:
435 llcp_lp_cu_run(conn, ctx, NULL);
436 break;
437 case PROC_TERMINATE:
438 llcp_lp_comm_run(conn, ctx, NULL);
439 break;
440 #if defined(CONFIG_BT_CENTRAL)
441 case PROC_CHAN_MAP_UPDATE:
442 llcp_lp_chmu_run(conn, ctx, NULL);
443 break;
444 #endif /* CONFIG_BT_CENTRAL */
445 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
446 case PROC_DATA_LENGTH_UPDATE:
447 llcp_lp_comm_run(conn, ctx, NULL);
448 break;
449 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
450 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
451 case PROC_CTE_REQ:
452 /* 3rd partam null? */
453 llcp_lp_comm_run(conn, ctx, NULL);
454 break;
455 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
456 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
457 case PROC_CIS_CREATE:
458 llcp_lp_cc_run(conn, ctx, NULL);
459 break;
460 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
461 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
462 case PROC_CIS_TERMINATE:
463 llcp_lp_comm_run(conn, ctx, NULL);
464 break;
465 #endif /* defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) */
466 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
467 case PROC_SCA_UPDATE:
468 llcp_lp_comm_run(conn, ctx, NULL);
469 break;
470 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
471 #if defined(CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER)
472 case PROC_PERIODIC_SYNC:
473 llcp_lp_past_run(conn, ctx, NULL);
474 break;
475 #endif /* CONFIG_BT_CTLR_SYNC_TRANSFER_SENDER */
476 default:
477 /* Unknown procedure */
478 LL_ASSERT(0);
479 break;
480 }
481
482 llcp_lr_check_done(conn, ctx);
483 }
484
lr_act_complete(struct ll_conn * conn)485 static void lr_act_complete(struct ll_conn *conn)
486 {
487 struct proc_ctx *ctx;
488
489 ctx = llcp_lr_peek(conn);
490 LL_ASSERT(ctx != NULL);
491
492 /* Stop procedure response timeout timer */
493 llcp_lr_prt_stop(conn);
494
495 /* Mark the procedure as safe to delete */
496 ctx->done = 1U;
497 }
498
lr_act_connect(struct ll_conn * conn)499 static void lr_act_connect(struct ll_conn *conn)
500 {
501 /* Empty on purpose */
502 }
503
lr_act_disconnect(struct ll_conn * conn)504 static void lr_act_disconnect(struct ll_conn *conn)
505 {
506 llcp_lr_flush_procedures(conn);
507 }
508
lr_st_disconnect(struct ll_conn * conn,uint8_t evt,void * param)509 static void lr_st_disconnect(struct ll_conn *conn, uint8_t evt, void *param)
510 {
511 switch (evt) {
512 case LR_EVT_CONNECT:
513 lr_act_connect(conn);
514 lr_set_state(conn, LR_STATE_IDLE);
515 break;
516 default:
517 /* Ignore other evts */
518 break;
519 }
520 }
521
lr_st_idle(struct ll_conn * conn,uint8_t evt,void * param)522 static void lr_st_idle(struct ll_conn *conn, uint8_t evt, void *param)
523 {
524 struct proc_ctx *ctx;
525
526 switch (evt) {
527 case LR_EVT_RUN:
528 ctx = llcp_lr_peek(conn);
529 if (ctx) {
530 /*
531 * since the call to lr_act_run may release the context we need to remember
532 * which procedure we are running
533 */
534 const enum llcp_proc curr_proc = ctx->proc;
535 lr_act_run(conn);
536 if (curr_proc != PROC_TERMINATE) {
537 lr_set_state(conn, LR_STATE_ACTIVE);
538 } else {
539 lr_set_state(conn, LR_STATE_TERMINATE);
540 }
541 }
542 break;
543 case LR_EVT_DISCONNECT:
544 lr_act_disconnect(conn);
545 lr_set_state(conn, LR_STATE_DISCONNECT);
546 break;
547 case LR_EVT_COMPLETE:
548 /* Some procedures like CTE request may be completed without actual run due to
549 * change in conditions while the procedure was waiting in a queue.
550 */
551 lr_act_complete(conn);
552 break;
553 default:
554 /* Ignore other evts */
555 break;
556 }
557 }
558
lr_st_active(struct ll_conn * conn,uint8_t evt,void * param)559 static void lr_st_active(struct ll_conn *conn, uint8_t evt, void *param)
560 {
561 switch (evt) {
562 case LR_EVT_RUN:
563 if (llcp_lr_peek(conn)) {
564 lr_act_run(conn);
565 }
566 break;
567 case LR_EVT_COMPLETE:
568 lr_act_complete(conn);
569 lr_set_state(conn, LR_STATE_IDLE);
570 break;
571 case LR_EVT_DISCONNECT:
572 lr_act_disconnect(conn);
573 lr_set_state(conn, LR_STATE_DISCONNECT);
574 break;
575 default:
576 /* Ignore other evts */
577 break;
578 }
579 }
580
lr_st_terminate(struct ll_conn * conn,uint8_t evt,void * param)581 static void lr_st_terminate(struct ll_conn *conn, uint8_t evt, void *param)
582 {
583 switch (evt) {
584 case LR_EVT_RUN:
585 if (llcp_lr_peek(conn)) {
586 lr_act_run(conn);
587 }
588 break;
589 case LR_EVT_COMPLETE:
590 lr_act_complete(conn);
591 lr_set_state(conn, LR_STATE_IDLE);
592 break;
593 case LR_EVT_DISCONNECT:
594 lr_act_disconnect(conn);
595 lr_set_state(conn, LR_STATE_DISCONNECT);
596 break;
597 default:
598 /* Ignore other evts */
599 break;
600 }
601 }
602
lr_execute_fsm(struct ll_conn * conn,uint8_t evt,void * param)603 static void lr_execute_fsm(struct ll_conn *conn, uint8_t evt, void *param)
604 {
605 switch (conn->llcp.local.state) {
606 case LR_STATE_DISCONNECT:
607 lr_st_disconnect(conn, evt, param);
608 break;
609 case LR_STATE_IDLE:
610 lr_st_idle(conn, evt, param);
611 break;
612 case LR_STATE_ACTIVE:
613 lr_st_active(conn, evt, param);
614 break;
615 case LR_STATE_TERMINATE:
616 lr_st_terminate(conn, evt, param);
617 break;
618 default:
619 /* Unknown state */
620 LL_ASSERT(0);
621 }
622 }
623
llcp_lr_init(struct ll_conn * conn)624 void llcp_lr_init(struct ll_conn *conn)
625 {
626 lr_set_state(conn, LR_STATE_DISCONNECT);
627 conn->llcp.local.prt_expire = 0U;
628 }
629
llcp_lr_run(struct ll_conn * conn)630 void llcp_lr_run(struct ll_conn *conn)
631 {
632 lr_execute_fsm(conn, LR_EVT_RUN, NULL);
633 }
634
llcp_lr_complete(struct ll_conn * conn)635 void llcp_lr_complete(struct ll_conn *conn)
636 {
637 lr_execute_fsm(conn, LR_EVT_COMPLETE, NULL);
638 }
639
llcp_lr_connect(struct ll_conn * conn)640 void llcp_lr_connect(struct ll_conn *conn)
641 {
642 lr_execute_fsm(conn, LR_EVT_CONNECT, NULL);
643 }
644
llcp_lr_disconnect(struct ll_conn * conn)645 void llcp_lr_disconnect(struct ll_conn *conn)
646 {
647 lr_execute_fsm(conn, LR_EVT_DISCONNECT, NULL);
648 }
649
llcp_lr_terminate(struct ll_conn * conn)650 void llcp_lr_terminate(struct ll_conn *conn)
651 {
652
653 llcp_lr_flush_procedures(conn);
654 llcp_lr_prt_stop(conn);
655 llcp_rr_set_incompat(conn, 0U);
656 lr_set_state(conn, LR_STATE_IDLE);
657 }
658
659 #ifdef ZTEST_UNITTEST
660
llcp_lr_is_disconnected(struct ll_conn * conn)661 bool llcp_lr_is_disconnected(struct ll_conn *conn)
662 {
663 return conn->llcp.local.state == LR_STATE_DISCONNECT;
664 }
665
llcp_lr_is_idle(struct ll_conn * conn)666 bool llcp_lr_is_idle(struct ll_conn *conn)
667 {
668 return conn->llcp.local.state == LR_STATE_IDLE;
669 }
670
llcp_lr_dequeue(struct ll_conn * conn)671 struct proc_ctx *llcp_lr_dequeue(struct ll_conn *conn)
672 {
673 return lr_dequeue(conn);
674 }
675
676 #endif
677