1 /*
2 * Copyright (c) 2020 Demant
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8
9 #include <zephyr/sys/byteorder.h>
10 #include <zephyr/sys/slist.h>
11 #include <zephyr/sys/util.h>
12
13 #include <zephyr/bluetooth/hci_types.h>
14
15 #include "hal/ccm.h"
16
17 #include "util/util.h"
18 #include "util/mem.h"
19 #include "util/memq.h"
20 #include "util/mayfly.h"
21 #include "util/dbuf.h"
22
23 #include "pdu_df.h"
24 #include "lll/pdu_vendor.h"
25 #include "pdu.h"
26
27 #include "ll.h"
28 #include "ll_settings.h"
29
30 #include "lll.h"
31 #include "lll/lll_df_types.h"
32 #include "lll_conn.h"
33 #include "lll_conn_iso.h"
34
35 #include "ull_tx_queue.h"
36
37 #include "isoal.h"
38 #include "ull_internal.h"
39 #include "ull_iso_types.h"
40 #include "ull_conn_iso_types.h"
41 #include "ull_conn_iso_internal.h"
42
43 #include "ull_conn_types.h"
44 #include "ull_llcp.h"
45 #include "ull_llcp_internal.h"
46 #include "ull_conn_internal.h"
47
48 #include <soc.h>
49 #include "hal/debug.h"
50
51 static struct proc_ctx *lr_dequeue(struct ll_conn *conn);
52
53 /* LLCP Local Request FSM State */
54 enum lr_state {
55 LR_STATE_IDLE,
56 LR_STATE_ACTIVE,
57 LR_STATE_DISCONNECT,
58 LR_STATE_TERMINATE,
59 };
60
61 /* LLCP Local Request FSM Event */
62 enum {
63 /* Procedure run */
64 LR_EVT_RUN,
65
66 /* Procedure completed */
67 LR_EVT_COMPLETE,
68
69 /* Link connected */
70 LR_EVT_CONNECT,
71
72 /* Link disconnected */
73 LR_EVT_DISCONNECT,
74 };
75
llcp_lr_check_done(struct ll_conn * conn,struct proc_ctx * ctx)76 void llcp_lr_check_done(struct ll_conn *conn, struct proc_ctx *ctx)
77 {
78 if (ctx->done) {
79 struct proc_ctx *ctx_header;
80
81 ctx_header = llcp_lr_peek(conn);
82 LL_ASSERT(ctx_header == ctx);
83
84 /* If we have a node rx it must not be marked RETAIN as
85 * the memory referenced would leak
86 */
87 LL_ASSERT(ctx->node_ref.rx == NULL ||
88 ctx->node_ref.rx->hdr.type != NODE_RX_TYPE_RETAIN);
89
90 lr_dequeue(conn);
91
92 llcp_proc_ctx_release(ctx);
93 }
94 }
95
96 /*
97 * LLCP Local Request Shared Data Locking
98 */
99
shared_data_access_lock(void)100 static ALWAYS_INLINE uint32_t shared_data_access_lock(void)
101 {
102 bool enabled;
103
104 if (mayfly_is_running()) {
105 /* We are in Mayfly context, nothing to be done */
106 return false;
107 }
108
109 /* We are in thread context and have to disable TICKER_USER_ID_ULL_HIGH */
110 enabled = mayfly_is_enabled(TICKER_USER_ID_THREAD, TICKER_USER_ID_ULL_HIGH) != 0U;
111 mayfly_enable(TICKER_USER_ID_THREAD, TICKER_USER_ID_ULL_HIGH, 0U);
112
113 return enabled;
114 }
115
shared_data_access_unlock(bool key)116 static ALWAYS_INLINE void shared_data_access_unlock(bool key)
117 {
118 if (key) {
119 /* We are in thread context and have to reenable TICKER_USER_ID_ULL_HIGH */
120 mayfly_enable(TICKER_USER_ID_THREAD, TICKER_USER_ID_ULL_HIGH, 1U);
121 }
122 }
123
124 /*
125 * LLCP Local Request FSM
126 */
127
lr_set_state(struct ll_conn * conn,enum lr_state state)128 static void lr_set_state(struct ll_conn *conn, enum lr_state state)
129 {
130 conn->llcp.local.state = state;
131 }
132
llcp_lr_enqueue(struct ll_conn * conn,struct proc_ctx * ctx)133 void llcp_lr_enqueue(struct ll_conn *conn, struct proc_ctx *ctx)
134 {
135 /* This function is called from both Thread and Mayfly (ISR),
136 * make sure only a single context have access at a time.
137 */
138
139 bool key = shared_data_access_lock();
140
141 sys_slist_append(&conn->llcp.local.pend_proc_list, &ctx->node);
142
143 shared_data_access_unlock(key);
144 }
145
lr_dequeue(struct ll_conn * conn)146 static struct proc_ctx *lr_dequeue(struct ll_conn *conn)
147 {
148 /* This function is called from both Thread and Mayfly (ISR),
149 * make sure only a single context have access at a time.
150 */
151
152 struct proc_ctx *ctx;
153
154 bool key = shared_data_access_lock();
155
156 ctx = (struct proc_ctx *)sys_slist_get(&conn->llcp.local.pend_proc_list);
157
158 shared_data_access_unlock(key);
159
160 return ctx;
161 }
162
llcp_lr_peek(struct ll_conn * conn)163 struct proc_ctx *llcp_lr_peek(struct ll_conn *conn)
164 {
165 /* This function is called from both Thread and Mayfly (ISR),
166 * make sure only a single context have access at a time.
167 */
168 struct proc_ctx *ctx;
169
170 bool key = shared_data_access_lock();
171
172 ctx = (struct proc_ctx *)sys_slist_peek_head(&conn->llcp.local.pend_proc_list);
173
174 shared_data_access_unlock(key);
175
176 return ctx;
177 }
178
llcp_lr_peek_proc(struct ll_conn * conn,uint8_t proc)179 struct proc_ctx *llcp_lr_peek_proc(struct ll_conn *conn, uint8_t proc)
180 {
181 /* This function is called from both Thread and Mayfly (ISR),
182 * make sure only a single context have access at a time.
183 */
184
185 struct proc_ctx *ctx, *tmp;
186
187 bool key = shared_data_access_lock();
188
189 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&conn->llcp.local.pend_proc_list, ctx, tmp, node) {
190 if (ctx->proc == proc) {
191 break;
192 }
193 }
194
195 shared_data_access_unlock(key);
196
197 return ctx;
198 }
199
llcp_lr_ispaused(struct ll_conn * conn)200 bool llcp_lr_ispaused(struct ll_conn *conn)
201 {
202 return conn->llcp.local.pause == 1U;
203 }
204
llcp_lr_pause(struct ll_conn * conn)205 void llcp_lr_pause(struct ll_conn *conn)
206 {
207 conn->llcp.local.pause = 1U;
208 }
209
llcp_lr_resume(struct ll_conn * conn)210 void llcp_lr_resume(struct ll_conn *conn)
211 {
212 conn->llcp.local.pause = 0U;
213 }
214
llcp_lr_prt_restart(struct ll_conn * conn)215 void llcp_lr_prt_restart(struct ll_conn *conn)
216 {
217 conn->llcp.local.prt_expire = conn->llcp.prt_reload;
218 }
219
llcp_lr_prt_restart_with_value(struct ll_conn * conn,uint16_t value)220 void llcp_lr_prt_restart_with_value(struct ll_conn *conn, uint16_t value)
221 {
222 conn->llcp.local.prt_expire = value;
223 }
224
llcp_lr_prt_stop(struct ll_conn * conn)225 void llcp_lr_prt_stop(struct ll_conn *conn)
226 {
227 conn->llcp.local.prt_expire = 0U;
228 }
229
llcp_lr_flush_procedures(struct ll_conn * conn)230 void llcp_lr_flush_procedures(struct ll_conn *conn)
231 {
232 struct proc_ctx *ctx;
233
234 /* Flush all pending procedures */
235 ctx = lr_dequeue(conn);
236 while (ctx) {
237 llcp_nodes_release(conn, ctx);
238 llcp_proc_ctx_release(ctx);
239 ctx = lr_dequeue(conn);
240 }
241 }
242
llcp_lr_rx(struct ll_conn * conn,struct proc_ctx * ctx,memq_link_t * link,struct node_rx_pdu * rx)243 void llcp_lr_rx(struct ll_conn *conn, struct proc_ctx *ctx, memq_link_t *link,
244 struct node_rx_pdu *rx)
245 {
246 /* Store RX node and link */
247 ctx->node_ref.rx = rx;
248 ctx->node_ref.link = link;
249
250 switch (ctx->proc) {
251 #if defined(CONFIG_BT_CTLR_LE_PING)
252 case PROC_LE_PING:
253 llcp_lp_comm_rx(conn, ctx, rx);
254 break;
255 #endif /* CONFIG_BT_CTLR_LE_PING */
256 case PROC_FEATURE_EXCHANGE:
257 llcp_lp_comm_rx(conn, ctx, rx);
258 break;
259 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
260 case PROC_MIN_USED_CHANS:
261 llcp_lp_comm_rx(conn, ctx, rx);
262 break;
263 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */
264 case PROC_VERSION_EXCHANGE:
265 llcp_lp_comm_rx(conn, ctx, rx);
266 break;
267 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_CENTRAL)
268 case PROC_ENCRYPTION_START:
269 case PROC_ENCRYPTION_PAUSE:
270 llcp_lp_enc_rx(conn, ctx, rx);
271 break;
272 #endif /* CONFIG_BT_CTLR_LE_ENC && CONFIG_BT_CENTRAL */
273 #ifdef CONFIG_BT_CTLR_PHY
274 case PROC_PHY_UPDATE:
275 llcp_lp_pu_rx(conn, ctx, rx);
276 break;
277 #endif /* CONFIG_BT_CTLR_PHY */
278 case PROC_CONN_UPDATE:
279 case PROC_CONN_PARAM_REQ:
280 llcp_lp_cu_rx(conn, ctx, rx);
281 break;
282 case PROC_TERMINATE:
283 llcp_lp_comm_rx(conn, ctx, rx);
284 break;
285 #if defined(CONFIG_BT_CENTRAL)
286 case PROC_CHAN_MAP_UPDATE:
287 llcp_lp_chmu_rx(conn, ctx, rx);
288 break;
289 #endif /* CONFIG_BT_CENTRAL */
290 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
291 case PROC_DATA_LENGTH_UPDATE:
292 llcp_lp_comm_rx(conn, ctx, rx);
293 break;
294 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
295 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
296 case PROC_CTE_REQ:
297 llcp_lp_comm_rx(conn, ctx, rx);
298 break;
299 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
300 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
301 case PROC_CIS_TERMINATE:
302 llcp_lp_comm_rx(conn, ctx, rx);
303 break;
304 #endif /* defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) */
305 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
306 case PROC_CIS_CREATE:
307 llcp_lp_cc_rx(conn, ctx, rx);
308 break;
309 #endif /* defined(CONFIG_BT_CTLR_CENTRAL_ISO) */
310 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
311 case PROC_SCA_UPDATE:
312 llcp_lp_comm_rx(conn, ctx, rx);
313 break;
314 #endif /* CONFIG_BT_CTLR_SCA_UPDATE */
315 default:
316 /* Unknown procedure */
317 LL_ASSERT(0);
318 break;
319 }
320
321 /* If rx node was not retained clear reference */
322 if (ctx->node_ref.rx && ctx->node_ref.rx->hdr.type != NODE_RX_TYPE_RETAIN) {
323 ctx->node_ref.rx = NULL;
324 }
325
326 llcp_lr_check_done(conn, ctx);
327 }
328
llcp_lr_tx_ack(struct ll_conn * conn,struct proc_ctx * ctx,struct node_tx * tx)329 void llcp_lr_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, struct node_tx *tx)
330 {
331 switch (ctx->proc) {
332 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
333 case PROC_MIN_USED_CHANS:
334 llcp_lp_comm_tx_ack(conn, ctx, tx);
335 break;
336 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */
337 case PROC_TERMINATE:
338 llcp_lp_comm_tx_ack(conn, ctx, tx);
339 break;
340 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
341 case PROC_DATA_LENGTH_UPDATE:
342 llcp_lp_comm_tx_ack(conn, ctx, tx);
343 break;
344 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
345 #ifdef CONFIG_BT_CTLR_PHY
346 case PROC_PHY_UPDATE:
347 llcp_lp_pu_tx_ack(conn, ctx, tx);
348 break;
349 #endif /* CONFIG_BT_CTLR_PHY */
350 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
351 case PROC_CIS_TERMINATE:
352 llcp_lp_comm_tx_ack(conn, ctx, tx);
353 break;
354 #endif /* defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) */
355 default:
356 break;
357 /* Ignore tx_ack */
358 }
359
360 /* Clear TX node reference */
361 ctx->node_ref.tx_ack = NULL;
362
363 llcp_lr_check_done(conn, ctx);
364 }
365
llcp_lr_tx_ntf(struct ll_conn * conn,struct proc_ctx * ctx)366 void llcp_lr_tx_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
367 {
368 switch (ctx->proc) {
369 #if defined(CONFIG_BT_CTLR_PHY)
370 case PROC_PHY_UPDATE:
371 llcp_lp_pu_tx_ntf(conn, ctx);
372 break;
373 #endif /* CONFIG_BT_CTLR_PHY */
374 default:
375 /* Ignore other procedures */
376 break;
377 }
378
379 llcp_lr_check_done(conn, ctx);
380 }
381
lr_act_run(struct ll_conn * conn)382 static void lr_act_run(struct ll_conn *conn)
383 {
384 struct proc_ctx *ctx;
385
386 ctx = llcp_lr_peek(conn);
387
388 switch (ctx->proc) {
389 #if defined(CONFIG_BT_CTLR_LE_PING)
390 case PROC_LE_PING:
391 llcp_lp_comm_run(conn, ctx, NULL);
392 break;
393 #endif /* CONFIG_BT_CTLR_LE_PING */
394 case PROC_FEATURE_EXCHANGE:
395 llcp_lp_comm_run(conn, ctx, NULL);
396 break;
397 #if defined(CONFIG_BT_CTLR_MIN_USED_CHAN)
398 case PROC_MIN_USED_CHANS:
399 llcp_lp_comm_run(conn, ctx, NULL);
400 break;
401 #endif /* CONFIG_BT_CTLR_MIN_USED_CHAN */
402 case PROC_VERSION_EXCHANGE:
403 llcp_lp_comm_run(conn, ctx, NULL);
404 break;
405 #if defined(CONFIG_BT_CTLR_LE_ENC) && defined(CONFIG_BT_CENTRAL)
406 case PROC_ENCRYPTION_START:
407 case PROC_ENCRYPTION_PAUSE:
408 llcp_lp_enc_run(conn, ctx, NULL);
409 break;
410 #endif /* CONFIG_BT_CTLR_LE_ENC && CONFIG_BT_CENTRAL */
411 #ifdef CONFIG_BT_CTLR_PHY
412 case PROC_PHY_UPDATE:
413 llcp_lp_pu_run(conn, ctx, NULL);
414 break;
415 #endif /* CONFIG_BT_CTLR_PHY */
416 case PROC_CONN_UPDATE:
417 case PROC_CONN_PARAM_REQ:
418 llcp_lp_cu_run(conn, ctx, NULL);
419 break;
420 case PROC_TERMINATE:
421 llcp_lp_comm_run(conn, ctx, NULL);
422 break;
423 #if defined(CONFIG_BT_CENTRAL)
424 case PROC_CHAN_MAP_UPDATE:
425 llcp_lp_chmu_run(conn, ctx, NULL);
426 break;
427 #endif /* CONFIG_BT_CENTRAL */
428 #if defined(CONFIG_BT_CTLR_DATA_LENGTH)
429 case PROC_DATA_LENGTH_UPDATE:
430 llcp_lp_comm_run(conn, ctx, NULL);
431 break;
432 #endif /* CONFIG_BT_CTLR_DATA_LENGTH */
433 #if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
434 case PROC_CTE_REQ:
435 /* 3rd partam null? */
436 llcp_lp_comm_run(conn, ctx, NULL);
437 break;
438 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
439 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO)
440 case PROC_CIS_CREATE:
441 llcp_lp_cc_run(conn, ctx, NULL);
442 break;
443 #endif /* CONFIG_BT_CTLR_CENTRAL_ISO */
444 #if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
445 case PROC_CIS_TERMINATE:
446 llcp_lp_comm_run(conn, ctx, NULL);
447 break;
448 #endif /* defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) */
449 #if defined(CONFIG_BT_CTLR_SCA_UPDATE)
450 case PROC_SCA_UPDATE:
451 llcp_lp_comm_run(conn, ctx, NULL);
452 break;
453 #endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
454 default:
455 /* Unknown procedure */
456 LL_ASSERT(0);
457 break;
458 }
459
460 llcp_lr_check_done(conn, ctx);
461 }
462
lr_act_complete(struct ll_conn * conn)463 static void lr_act_complete(struct ll_conn *conn)
464 {
465 struct proc_ctx *ctx;
466
467 ctx = llcp_lr_peek(conn);
468 LL_ASSERT(ctx != NULL);
469
470 /* Stop procedure response timeout timer */
471 llcp_lr_prt_stop(conn);
472
473 /* Mark the procedure as safe to delete */
474 ctx->done = 1U;
475 }
476
lr_act_connect(struct ll_conn * conn)477 static void lr_act_connect(struct ll_conn *conn)
478 {
479 /* Empty on purpose */
480 }
481
lr_act_disconnect(struct ll_conn * conn)482 static void lr_act_disconnect(struct ll_conn *conn)
483 {
484 llcp_lr_flush_procedures(conn);
485 }
486
lr_st_disconnect(struct ll_conn * conn,uint8_t evt,void * param)487 static void lr_st_disconnect(struct ll_conn *conn, uint8_t evt, void *param)
488 {
489 switch (evt) {
490 case LR_EVT_CONNECT:
491 lr_act_connect(conn);
492 lr_set_state(conn, LR_STATE_IDLE);
493 break;
494 default:
495 /* Ignore other evts */
496 break;
497 }
498 }
499
lr_st_idle(struct ll_conn * conn,uint8_t evt,void * param)500 static void lr_st_idle(struct ll_conn *conn, uint8_t evt, void *param)
501 {
502 struct proc_ctx *ctx;
503
504 switch (evt) {
505 case LR_EVT_RUN:
506 ctx = llcp_lr_peek(conn);
507 if (ctx) {
508 /*
509 * since the call to lr_act_run may release the context we need to remember
510 * which procedure we are running
511 */
512 const enum llcp_proc curr_proc = ctx->proc;
513 lr_act_run(conn);
514 if (curr_proc != PROC_TERMINATE) {
515 lr_set_state(conn, LR_STATE_ACTIVE);
516 } else {
517 lr_set_state(conn, LR_STATE_TERMINATE);
518 }
519 }
520 break;
521 case LR_EVT_DISCONNECT:
522 lr_act_disconnect(conn);
523 lr_set_state(conn, LR_STATE_DISCONNECT);
524 break;
525 case LR_EVT_COMPLETE:
526 /* Some procedures like CTE request may be completed without actual run due to
527 * change in conditions while the procedure was waiting in a queue.
528 */
529 lr_act_complete(conn);
530 break;
531 default:
532 /* Ignore other evts */
533 break;
534 }
535 }
536
lr_st_active(struct ll_conn * conn,uint8_t evt,void * param)537 static void lr_st_active(struct ll_conn *conn, uint8_t evt, void *param)
538 {
539 switch (evt) {
540 case LR_EVT_RUN:
541 if (llcp_lr_peek(conn)) {
542 lr_act_run(conn);
543 }
544 break;
545 case LR_EVT_COMPLETE:
546 lr_act_complete(conn);
547 lr_set_state(conn, LR_STATE_IDLE);
548 break;
549 case LR_EVT_DISCONNECT:
550 lr_act_disconnect(conn);
551 lr_set_state(conn, LR_STATE_DISCONNECT);
552 break;
553 default:
554 /* Ignore other evts */
555 break;
556 }
557 }
558
lr_st_terminate(struct ll_conn * conn,uint8_t evt,void * param)559 static void lr_st_terminate(struct ll_conn *conn, uint8_t evt, void *param)
560 {
561 switch (evt) {
562 case LR_EVT_RUN:
563 if (llcp_lr_peek(conn)) {
564 lr_act_run(conn);
565 }
566 break;
567 case LR_EVT_COMPLETE:
568 lr_act_complete(conn);
569 lr_set_state(conn, LR_STATE_IDLE);
570 break;
571 case LR_EVT_DISCONNECT:
572 lr_act_disconnect(conn);
573 lr_set_state(conn, LR_STATE_DISCONNECT);
574 break;
575 default:
576 /* Ignore other evts */
577 break;
578 }
579 }
580
lr_execute_fsm(struct ll_conn * conn,uint8_t evt,void * param)581 static void lr_execute_fsm(struct ll_conn *conn, uint8_t evt, void *param)
582 {
583 switch (conn->llcp.local.state) {
584 case LR_STATE_DISCONNECT:
585 lr_st_disconnect(conn, evt, param);
586 break;
587 case LR_STATE_IDLE:
588 lr_st_idle(conn, evt, param);
589 break;
590 case LR_STATE_ACTIVE:
591 lr_st_active(conn, evt, param);
592 break;
593 case LR_STATE_TERMINATE:
594 lr_st_terminate(conn, evt, param);
595 break;
596 default:
597 /* Unknown state */
598 LL_ASSERT(0);
599 }
600 }
601
llcp_lr_init(struct ll_conn * conn)602 void llcp_lr_init(struct ll_conn *conn)
603 {
604 lr_set_state(conn, LR_STATE_DISCONNECT);
605 conn->llcp.local.prt_expire = 0U;
606 }
607
llcp_lr_run(struct ll_conn * conn)608 void llcp_lr_run(struct ll_conn *conn)
609 {
610 lr_execute_fsm(conn, LR_EVT_RUN, NULL);
611 }
612
llcp_lr_complete(struct ll_conn * conn)613 void llcp_lr_complete(struct ll_conn *conn)
614 {
615 lr_execute_fsm(conn, LR_EVT_COMPLETE, NULL);
616 }
617
llcp_lr_connect(struct ll_conn * conn)618 void llcp_lr_connect(struct ll_conn *conn)
619 {
620 lr_execute_fsm(conn, LR_EVT_CONNECT, NULL);
621 }
622
llcp_lr_disconnect(struct ll_conn * conn)623 void llcp_lr_disconnect(struct ll_conn *conn)
624 {
625 lr_execute_fsm(conn, LR_EVT_DISCONNECT, NULL);
626 }
627
llcp_lr_terminate(struct ll_conn * conn)628 void llcp_lr_terminate(struct ll_conn *conn)
629 {
630
631 llcp_lr_flush_procedures(conn);
632 llcp_lr_prt_stop(conn);
633 llcp_rr_set_incompat(conn, 0U);
634 lr_set_state(conn, LR_STATE_IDLE);
635 }
636
637 #ifdef ZTEST_UNITTEST
638
llcp_lr_is_disconnected(struct ll_conn * conn)639 bool llcp_lr_is_disconnected(struct ll_conn *conn)
640 {
641 return conn->llcp.local.state == LR_STATE_DISCONNECT;
642 }
643
llcp_lr_is_idle(struct ll_conn * conn)644 bool llcp_lr_is_idle(struct ll_conn *conn)
645 {
646 return conn->llcp.local.state == LR_STATE_IDLE;
647 }
648
llcp_lr_dequeue(struct ll_conn * conn)649 struct proc_ctx *llcp_lr_dequeue(struct ll_conn *conn)
650 {
651 return lr_dequeue(conn);
652 }
653
654 #endif
655