1 /* conn.c - Bluetooth connection handling */
2
3 /*
4 * Copyright (c) 2015-2016 Intel Corporation
5 * Copyright (c) 2025 Nordic Semiconductor ASA
6 *
7 * SPDX-License-Identifier: Apache-2.0
8 */
9
10 #include <zephyr/kernel.h>
11 #include <string.h>
12 #include <errno.h>
13 #include <stdbool.h>
14 #include <zephyr/sys/atomic.h>
15 #include <zephyr/sys/byteorder.h>
16 #include <zephyr/sys/check.h>
17 #include <zephyr/sys/iterable_sections.h>
18 #include <zephyr/sys/util.h>
19 #include <zephyr/sys/util_macro.h>
20 #include <zephyr/sys/slist.h>
21 #include <zephyr/debug/stack.h>
22 #include <zephyr/sys/__assert.h>
23
24 #include <zephyr/bluetooth/hci.h>
25 #include <zephyr/bluetooth/bluetooth.h>
26 #include <zephyr/bluetooth/direction.h>
27 #include <zephyr/bluetooth/conn.h>
28 #include <zephyr/bluetooth/hci_vs.h>
29 #include <zephyr/bluetooth/att.h>
30
31 #include "common/assert.h"
32 #include "common/bt_str.h"
33
34 #include "buf_view.h"
35 #include "addr_internal.h"
36 #include "hci_core.h"
37 #include "id.h"
38 #include "adv.h"
39 #include "scan.h"
40 #include "conn_internal.h"
41 #include "l2cap_internal.h"
42 #include "keys.h"
43 #include "smp.h"
44 #include "classic/ssp.h"
45 #include "att_internal.h"
46 #include "iso_internal.h"
47 #include "direction_internal.h"
48 #include "classic/sco_internal.h"
49
50 #define LOG_LEVEL CONFIG_BT_CONN_LOG_LEVEL
51 #include <zephyr/logging/log.h>
52 LOG_MODULE_REGISTER(bt_conn);
53
54 K_FIFO_DEFINE(free_tx);
55
56 #if defined(CONFIG_BT_CONN_TX_NOTIFY_WQ)
57 static struct k_work_q conn_tx_workq;
58 static K_KERNEL_STACK_DEFINE(conn_tx_workq_thread_stack, CONFIG_BT_CONN_TX_NOTIFY_WQ_STACK_SIZE);
59 #endif /* CONFIG_BT_CONN_TX_NOTIFY_WQ */
60
61 static void tx_free(struct bt_conn_tx *tx);
62
conn_tx_destroy(struct bt_conn * conn,struct bt_conn_tx * tx)63 static void conn_tx_destroy(struct bt_conn *conn, struct bt_conn_tx *tx)
64 {
65 __ASSERT_NO_MSG(tx);
66
67 bt_conn_tx_cb_t cb = tx->cb;
68 void *user_data = tx->user_data;
69
70 LOG_DBG("conn %p tx %p cb %p ud %p", conn, tx, cb, user_data);
71
72 /* Free up TX metadata before calling callback in case the callback
73 * tries to allocate metadata
74 */
75 tx_free(tx);
76
77 if (cb) {
78 cb(conn, user_data, -ESHUTDOWN);
79 }
80 }
81
82 #if defined(CONFIG_BT_CONN_TX)
83 static void tx_complete_work(struct k_work *work);
84 #endif /* CONFIG_BT_CONN_TX */
85
86 static void notify_recycled_conn_slot(void);
87
88 void bt_tx_irq_raise(void);
89
90 /* Group Connected BT_CONN only in this */
91 #if defined(CONFIG_BT_CONN)
92 /* Peripheral timeout to initialize Connection Parameter Update procedure */
93 #define CONN_UPDATE_TIMEOUT K_MSEC(CONFIG_BT_CONN_PARAM_UPDATE_TIMEOUT)
94
95 static void deferred_work(struct k_work *work);
96 static void notify_connected(struct bt_conn *conn);
97
98 static struct bt_conn acl_conns[CONFIG_BT_MAX_CONN];
99 NET_BUF_POOL_DEFINE(acl_tx_pool, CONFIG_BT_L2CAP_TX_BUF_COUNT,
100 BT_L2CAP_BUF_SIZE(CONFIG_BT_L2CAP_TX_MTU),
101 CONFIG_BT_CONN_TX_USER_DATA_SIZE, NULL);
102
103 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC)
104 const struct bt_conn_auth_cb *bt_auth;
105 sys_slist_t bt_auth_info_cbs = SYS_SLIST_STATIC_INIT(&bt_auth_info_cbs);
106 #endif /* CONFIG_BT_SMP || CONFIG_BT_CLASSIC */
107
108
109 static sys_slist_t conn_cbs = SYS_SLIST_STATIC_INIT(&conn_cbs);
110
111 static struct bt_conn_tx conn_tx[CONFIG_BT_CONN_TX_MAX];
112
113 #if defined(CONFIG_BT_CLASSIC)
114 static int bt_hci_connect_br_cancel(struct bt_conn *conn);
115
116 static struct bt_conn sco_conns[CONFIG_BT_MAX_SCO_CONN];
117 #endif /* CONFIG_BT_CLASSIC */
118 #endif /* CONFIG_BT_CONN */
119
120 #if defined(CONFIG_BT_CONN_TX)
121 void frag_destroy(struct net_buf *buf);
122
123 /* Storage for fragments (views) into the upper layers' PDUs. */
124 /* TODO: remove user-data requirements */
125 NET_BUF_POOL_FIXED_DEFINE(fragments, CONFIG_BT_CONN_FRAG_COUNT, 0,
126 CONFIG_BT_CONN_TX_USER_DATA_SIZE, frag_destroy);
127
128 struct frag_md {
129 struct bt_buf_view_meta view_meta;
130 };
131 struct frag_md frag_md_pool[CONFIG_BT_CONN_FRAG_COUNT];
132
get_frag_md(struct net_buf * fragment)133 struct frag_md *get_frag_md(struct net_buf *fragment)
134 {
135 return &frag_md_pool[net_buf_id(fragment)];
136 }
137
frag_destroy(struct net_buf * frag)138 void frag_destroy(struct net_buf *frag)
139 {
140 /* allow next view to be allocated (and unlock the parent buf) */
141 bt_buf_destroy_view(frag, &get_frag_md(frag)->view_meta);
142
143 LOG_DBG("");
144
145 /* Kick the TX processor to send the rest of the frags. */
146 bt_tx_irq_raise();
147 }
148
get_data_frag(struct net_buf * outside,size_t winsize)149 static struct net_buf *get_data_frag(struct net_buf *outside, size_t winsize)
150 {
151 struct net_buf *window;
152
153 __ASSERT_NO_MSG(!bt_buf_has_view(outside));
154
155 /* Keeping a ref is the caller's responsibility */
156 window = net_buf_alloc_len(&fragments, 0, K_NO_WAIT);
157 if (!window) {
158 return window;
159 }
160
161 window = bt_buf_make_view(window, outside,
162 winsize, &get_frag_md(window)->view_meta);
163
164 LOG_DBG("get-acl-frag: outside %p window %p size %zu", outside, window, winsize);
165
166 return window;
167 }
168 #else /* !CONFIG_BT_CONN_TX */
get_data_frag(struct net_buf * outside,size_t winsize)169 static struct net_buf *get_data_frag(struct net_buf *outside, size_t winsize)
170 {
171 ARG_UNUSED(outside);
172 ARG_UNUSED(winsize);
173
174 /* This will never get called. It's only to allow compilation to take
175 * place and the later linker stage to remove this implementation.
176 */
177
178 return NULL;
179 }
180 #endif /* CONFIG_BT_CONN_TX */
181
182 #if defined(CONFIG_BT_ISO)
183 extern struct bt_conn iso_conns[CONFIG_BT_ISO_MAX_CHAN];
184
185 /* Callback TX buffers for ISO */
186 static struct bt_conn_tx iso_tx[CONFIG_BT_ISO_TX_BUF_COUNT];
187
bt_conn_iso_init(void)188 int bt_conn_iso_init(void)
189 {
190 for (size_t i = 0; i < ARRAY_SIZE(iso_tx); i++) {
191 k_fifo_put(&free_tx, &iso_tx[i]);
192 }
193
194 return 0;
195 }
196 #endif /* CONFIG_BT_ISO */
197
bt_conn_get_pkts(struct bt_conn * conn)198 struct k_sem *bt_conn_get_pkts(struct bt_conn *conn)
199 {
200 #if defined(CONFIG_BT_CLASSIC)
201 if (conn->type == BT_CONN_TYPE_BR || !bt_dev.le.acl_mtu) {
202 return &bt_dev.br.pkts;
203 }
204 #endif /* CONFIG_BT_CLASSIC */
205
206 #if defined(CONFIG_BT_ISO)
207 /* Use ISO pkts semaphore if LE Read Buffer Size command returned
208 * dedicated ISO buffers.
209 */
210 if (conn->type == BT_CONN_TYPE_ISO) {
211 if (bt_dev.le.iso_mtu && bt_dev.le.iso_limit != 0) {
212 return &bt_dev.le.iso_pkts;
213 }
214
215 return NULL;
216 }
217 #endif /* CONFIG_BT_ISO */
218
219 #if defined(CONFIG_BT_CONN)
220 if (bt_dev.le.acl_mtu) {
221 return &bt_dev.le.acl_pkts;
222 }
223 #endif /* CONFIG_BT_CONN */
224
225 return NULL;
226 }
227
state2str(bt_conn_state_t state)228 static inline const char *state2str(bt_conn_state_t state)
229 {
230 switch (state) {
231 case BT_CONN_DISCONNECTED:
232 return "disconnected";
233 case BT_CONN_DISCONNECT_COMPLETE:
234 return "disconnect-complete";
235 case BT_CONN_INITIATING:
236 return "initiating";
237 case BT_CONN_SCAN_BEFORE_INITIATING:
238 return "scan-before-initiating";
239 case BT_CONN_INITIATING_FILTER_LIST:
240 return "initiating-filter-list";
241 case BT_CONN_ADV_CONNECTABLE:
242 return "adv-connectable";
243 case BT_CONN_ADV_DIR_CONNECTABLE:
244 return "adv-dir-connectable";
245 case BT_CONN_CONNECTED:
246 return "connected";
247 case BT_CONN_DISCONNECTING:
248 return "disconnecting";
249 default:
250 return "(unknown)";
251 }
252 }
253
tx_free(struct bt_conn_tx * tx)254 static void tx_free(struct bt_conn_tx *tx)
255 {
256 LOG_DBG("%p", tx);
257 tx->cb = NULL;
258 tx->user_data = NULL;
259 k_fifo_put(&free_tx, tx);
260 }
261
262 #if defined(CONFIG_BT_CONN_TX)
tx_notify_workqueue_get(void)263 static struct k_work_q *tx_notify_workqueue_get(void)
264 {
265 #if defined(CONFIG_BT_CONN_TX_NOTIFY_WQ)
266 return &conn_tx_workq;
267 #else
268 return &k_sys_work_q;
269 #endif /* CONFIG_BT_CONN_TX_NOTIFY_WQ */
270 }
271
tx_notify_process(struct bt_conn * conn)272 static void tx_notify_process(struct bt_conn *conn)
273 {
274 /* TX notify processing is done only from a single thread. */
275 __ASSERT_NO_MSG(k_current_get() == k_work_queue_thread_get(tx_notify_workqueue_get()));
276
277 LOG_DBG("conn %p", (void *)conn);
278
279 while (1) {
280 struct bt_conn_tx *tx = NULL;
281 unsigned int key;
282 bt_conn_tx_cb_t cb;
283 void *user_data;
284
285 key = irq_lock();
286 if (!sys_slist_is_empty(&conn->tx_complete)) {
287 const sys_snode_t *node = sys_slist_get_not_empty(&conn->tx_complete);
288
289 tx = CONTAINER_OF(node, struct bt_conn_tx, node);
290 }
291 irq_unlock(key);
292
293 if (!tx) {
294 return;
295 }
296
297 LOG_DBG("tx %p cb %p user_data %p", tx, tx->cb, tx->user_data);
298
299 /* Copy over the params */
300 cb = tx->cb;
301 user_data = tx->user_data;
302
303 /* Free up TX notify since there may be user waiting */
304 tx_free(tx);
305
306 /* Run the callback, at this point it should be safe to
307 * allocate new buffers since the TX should have been
308 * unblocked by tx_free.
309 */
310 if (cb) {
311 cb(conn, user_data, 0);
312 }
313
314 LOG_DBG("raise TX IRQ");
315 bt_tx_irq_raise();
316 }
317 }
318 #endif /* CONFIG_BT_CONN_TX */
319
bt_conn_tx_notify(struct bt_conn * conn,bool wait_for_completion)320 void bt_conn_tx_notify(struct bt_conn *conn, bool wait_for_completion)
321 {
322 #if defined(CONFIG_BT_CONN_TX)
323 /* Ensure that function is called only from a single context. */
324 if (k_current_get() == k_work_queue_thread_get(tx_notify_workqueue_get())) {
325 tx_notify_process(conn);
326 } else {
327 struct k_work_sync sync;
328 int err;
329
330 err = k_work_submit_to_queue(tx_notify_workqueue_get(), &conn->tx_complete_work);
331 __ASSERT(err >= 0, "couldn't submit (err %d)", err);
332
333 if (wait_for_completion) {
334 (void)k_work_flush(&conn->tx_complete_work, &sync);
335 }
336 }
337 #else
338 ARG_UNUSED(conn);
339 ARG_UNUSED(wait_for_completion);
340 #endif /* CONFIG_BT_CONN_TX */
341 }
342
bt_conn_new(struct bt_conn * conns,size_t size)343 struct bt_conn *bt_conn_new(struct bt_conn *conns, size_t size)
344 {
345 struct bt_conn *conn = NULL;
346 int i;
347
348 for (i = 0; i < size; i++) {
349 if (atomic_cas(&conns[i].ref, 0, 1)) {
350 conn = &conns[i];
351 break;
352 }
353 }
354
355 if (!conn) {
356 return NULL;
357 }
358
359 (void)memset(conn, 0, offsetof(struct bt_conn, ref));
360
361 #if defined(CONFIG_BT_CONN)
362 k_work_init_delayable(&conn->deferred_work, deferred_work);
363 #endif /* CONFIG_BT_CONN */
364 #if defined(CONFIG_BT_CONN_TX)
365 k_work_init(&conn->tx_complete_work, tx_complete_work);
366 #endif /* CONFIG_BT_CONN_TX */
367
368 return conn;
369 }
370
bt_conn_reset_rx_state(struct bt_conn * conn)371 void bt_conn_reset_rx_state(struct bt_conn *conn)
372 {
373 if (!conn->rx) {
374 return;
375 }
376
377 net_buf_unref(conn->rx);
378 conn->rx = NULL;
379 }
380
bt_acl_recv(struct bt_conn * conn,struct net_buf * buf,uint8_t flags)381 static void bt_acl_recv(struct bt_conn *conn, struct net_buf *buf, uint8_t flags)
382 {
383 uint16_t acl_total_len;
384
385 bt_acl_set_ncp_sent(buf, false);
386
387 /* Check packet boundary flags */
388 switch (flags) {
389 case BT_ACL_START:
390 if (conn->rx) {
391 LOG_ERR("Unexpected first L2CAP frame");
392 bt_conn_reset_rx_state(conn);
393 }
394
395 LOG_DBG("First, len %u final %u", buf->len,
396 (buf->len < sizeof(uint16_t)) ? 0 : sys_get_le16(buf->data));
397
398 conn->rx = net_buf_ref(buf);
399 break;
400 case BT_ACL_CONT:
401 if (!conn->rx) {
402 LOG_ERR("Unexpected L2CAP continuation");
403 bt_conn_reset_rx_state(conn);
404 net_buf_unref(buf);
405 return;
406 }
407
408 if (!buf->len) {
409 LOG_DBG("Empty ACL_CONT");
410 net_buf_unref(buf);
411 return;
412 }
413
414 if (buf->len > net_buf_tailroom(conn->rx)) {
415 LOG_ERR("Not enough buffer space for L2CAP data");
416
417 /* Frame is not complete but we still pass it to L2CAP
418 * so that it may handle error on protocol level
419 * eg disconnect channel.
420 */
421 bt_l2cap_recv(conn, conn->rx, false);
422 conn->rx = NULL;
423 net_buf_unref(buf);
424 return;
425 }
426
427 net_buf_add_mem(conn->rx, buf->data, buf->len);
428 break;
429 default:
430 /* BT_ACL_START_NO_FLUSH and BT_ACL_COMPLETE are not allowed on
431 * LE-U from Controller to Host.
432 * Only BT_ACL_POINT_TO_POINT is supported.
433 */
434 LOG_ERR("Unexpected ACL flags (0x%02x)", flags);
435 bt_conn_reset_rx_state(conn);
436 net_buf_unref(buf);
437 return;
438 }
439
440 if (conn->rx->len < sizeof(uint16_t)) {
441 /* Still not enough data received to retrieve the L2CAP header
442 * length field.
443 */
444 bt_send_one_host_num_completed_packets(conn->handle);
445 bt_acl_set_ncp_sent(buf, true);
446 net_buf_unref(buf);
447
448 return;
449 }
450
451 acl_total_len = sys_get_le16(conn->rx->data) + sizeof(struct bt_l2cap_hdr);
452
453 if (conn->rx->len < acl_total_len) {
454 /* L2CAP frame not complete. */
455 bt_send_one_host_num_completed_packets(conn->handle);
456 bt_acl_set_ncp_sent(buf, true);
457 net_buf_unref(buf);
458
459 return;
460 }
461
462 net_buf_unref(buf);
463
464 if (conn->rx->len > acl_total_len) {
465 LOG_ERR("ACL len mismatch (%u > %u)", conn->rx->len, acl_total_len);
466 bt_conn_reset_rx_state(conn);
467 return;
468 }
469
470 /* L2CAP frame complete. */
471 buf = conn->rx;
472 conn->rx = NULL;
473
474 __ASSERT(buf->ref == 1, "buf->ref %d", buf->ref);
475
476 LOG_DBG("Successfully parsed %u byte L2CAP packet", buf->len);
477 bt_l2cap_recv(conn, buf, true);
478 }
479
bt_conn_recv(struct bt_conn * conn,struct net_buf * buf,uint8_t flags)480 void bt_conn_recv(struct bt_conn *conn, struct net_buf *buf, uint8_t flags)
481 {
482 /* Make sure we notify any pending TX callbacks before processing
483 * new data for this connection.
484 *
485 * Always do so from the same context for sanity. In this case that will
486 * be either a dedicated Bluetooth connection TX workqueue or system workqueue.
487 */
488 bt_conn_tx_notify(conn, true);
489
490 LOG_DBG("handle %u len %u flags %02x", conn->handle, buf->len, flags);
491
492 if (IS_ENABLED(CONFIG_BT_ISO_RX) && conn->type == BT_CONN_TYPE_ISO) {
493 bt_iso_recv(conn, buf, flags);
494 return;
495 } else if (IS_ENABLED(CONFIG_BT_CONN)) {
496 bt_acl_recv(conn, buf, flags);
497 } else {
498 __ASSERT(false, "Invalid connection type %u", conn->type);
499 }
500 }
501
dont_have_tx_context(struct bt_conn * conn)502 static bool dont_have_tx_context(struct bt_conn *conn)
503 {
504 return k_fifo_is_empty(&free_tx);
505 }
506
conn_tx_alloc(void)507 static struct bt_conn_tx *conn_tx_alloc(void)
508 {
509 struct bt_conn_tx *ret = k_fifo_get(&free_tx, K_NO_WAIT);
510
511 LOG_DBG("%p", ret);
512
513 return ret;
514 }
515
516 enum {
517 FRAG_START,
518 FRAG_CONT,
519 FRAG_SINGLE,
520 FRAG_END
521 };
522
send_acl(struct bt_conn * conn,struct net_buf * buf,uint8_t flags)523 static int send_acl(struct bt_conn *conn, struct net_buf *buf, uint8_t flags)
524 {
525 struct bt_hci_acl_hdr *hdr;
526
527 switch (flags) {
528 case FRAG_START:
529 case FRAG_SINGLE:
530 flags = BT_ACL_START_NO_FLUSH;
531 break;
532 case FRAG_CONT:
533 case FRAG_END:
534 flags = BT_ACL_CONT;
535 break;
536 default:
537 return -EINVAL;
538 }
539
540 hdr = net_buf_push(buf, sizeof(*hdr));
541 hdr->handle = sys_cpu_to_le16(bt_acl_handle_pack(conn->handle, flags));
542 hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
543
544 bt_buf_set_type(buf, BT_BUF_ACL_OUT);
545
546 return bt_send(buf);
547 }
548
contains_iso_timestamp(struct net_buf * buf)549 static enum bt_iso_timestamp contains_iso_timestamp(struct net_buf *buf)
550 {
551 enum bt_iso_timestamp ts;
552
553 if (net_buf_headroom(buf) ==
554 (BT_BUF_ISO_SIZE(0) - sizeof(struct bt_hci_iso_sdu_ts_hdr))) {
555 ts = BT_ISO_TS_PRESENT;
556 } else {
557 ts = BT_ISO_TS_ABSENT;
558 }
559
560 return ts;
561 }
562
send_iso(struct bt_conn * conn,struct net_buf * buf,uint8_t flags)563 static int send_iso(struct bt_conn *conn, struct net_buf *buf, uint8_t flags)
564 {
565 struct bt_hci_iso_hdr *hdr;
566 enum bt_iso_timestamp ts;
567
568 switch (flags) {
569 case FRAG_START:
570 flags = BT_ISO_START;
571 break;
572 case FRAG_CONT:
573 flags = BT_ISO_CONT;
574 break;
575 case FRAG_SINGLE:
576 flags = BT_ISO_SINGLE;
577 break;
578 case FRAG_END:
579 flags = BT_ISO_END;
580 break;
581 default:
582 return -EINVAL;
583 }
584
585 /* The TS bit is set by `iso.c:conn_iso_send`. This special byte
586 * prepends the whole SDU, and won't be there for individual fragments.
587 *
588 * Conveniently, it is only legal to set the TS bit on the first HCI
589 * fragment, so we don't have to pass this extra metadata around for
590 * every fragment, only the first one.
591 */
592 if (flags == BT_ISO_SINGLE || flags == BT_ISO_START) {
593 ts = contains_iso_timestamp(buf);
594 } else {
595 ts = BT_ISO_TS_ABSENT;
596 }
597
598 hdr = net_buf_push(buf, sizeof(*hdr));
599 hdr->handle = sys_cpu_to_le16(bt_iso_handle_pack(conn->handle, flags, ts));
600 hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
601
602 bt_buf_set_type(buf, BT_BUF_ISO_OUT);
603
604 return bt_send(buf);
605 }
606
conn_mtu(struct bt_conn * conn)607 static inline uint16_t conn_mtu(struct bt_conn *conn)
608 {
609 #if defined(CONFIG_BT_CLASSIC)
610 if (conn->type == BT_CONN_TYPE_BR ||
611 (conn->type != BT_CONN_TYPE_ISO && !bt_dev.le.acl_mtu)) {
612 return bt_dev.br.mtu;
613 }
614 #endif /* CONFIG_BT_CLASSIC */
615 #if defined(CONFIG_BT_ISO)
616 if (conn->type == BT_CONN_TYPE_ISO) {
617 return bt_dev.le.iso_mtu;
618 }
619 #endif /* CONFIG_BT_ISO */
620 #if defined(CONFIG_BT_CONN)
621 return bt_dev.le.acl_mtu;
622 #else
623 return 0;
624 #endif /* CONFIG_BT_CONN */
625 }
626
is_classic_conn(struct bt_conn * conn)627 static bool is_classic_conn(struct bt_conn *conn)
628 {
629 return (IS_ENABLED(CONFIG_BT_CLASSIC) &&
630 conn->type == BT_CONN_TYPE_BR);
631 }
632
is_iso_tx_conn(struct bt_conn * conn)633 static bool is_iso_tx_conn(struct bt_conn *conn)
634 {
635 return IS_ENABLED(CONFIG_BT_ISO_TX) &&
636 conn->type == BT_CONN_TYPE_ISO;
637 }
638
is_le_conn(struct bt_conn * conn)639 static bool is_le_conn(struct bt_conn *conn)
640 {
641 return IS_ENABLED(CONFIG_BT_CONN) && conn->type == BT_CONN_TYPE_LE;
642 }
643
is_acl_conn(struct bt_conn * conn)644 static bool is_acl_conn(struct bt_conn *conn)
645 {
646 return is_le_conn(conn) || is_classic_conn(conn);
647 }
648
send_buf(struct bt_conn * conn,struct net_buf * buf,size_t len,void * cb,void * ud)649 static int send_buf(struct bt_conn *conn, struct net_buf *buf,
650 size_t len, void *cb, void *ud)
651 {
652 struct net_buf *frag = NULL;
653 struct bt_conn_tx *tx = NULL;
654 uint8_t flags;
655 int err;
656
657 if (buf->len == 0) {
658 __ASSERT_NO_MSG(0);
659
660 return -EMSGSIZE;
661 }
662
663 if (bt_buf_has_view(buf)) {
664 __ASSERT_NO_MSG(0);
665
666 return -EIO;
667 }
668
669 LOG_DBG("conn %p buf %p len %zu buf->len %u cb %p ud %p",
670 conn, buf, len, buf->len, cb, ud);
671
672 /* Acquire the right to send 1 packet to the controller */
673 if (k_sem_take(bt_conn_get_pkts(conn), K_NO_WAIT)) {
674 /* This shouldn't happen now that we acquire the resources
675 * before calling `send_buf` (in `get_conn_ready`). We say
676 * "acquire" as `tx_processor()` is not re-entrant and the
677 * thread is non-preemptible. So the sem value shouldn't change.
678 */
679 __ASSERT(0, "No controller bufs");
680
681 return -ENOMEM;
682 }
683
684 /* Allocate and set the TX context */
685 tx = conn_tx_alloc();
686
687 /* See big comment above */
688 if (!tx) {
689 __ASSERT(0, "No TX context");
690
691 return -ENOMEM;
692 }
693
694 tx->cb = cb;
695 tx->user_data = ud;
696
697 uint16_t frag_len = MIN(conn_mtu(conn), len);
698
699 __ASSERT_NO_MSG(buf->ref == 1);
700
701 if (buf->len > frag_len) {
702 LOG_DBG("keep %p around", buf);
703 frag = get_data_frag(net_buf_ref(buf), frag_len);
704 } else {
705 LOG_DBG("move %p ref in", buf);
706 /* Move the ref into `frag` for the last TX. That way `buf` will
707 * get destroyed when `frag` is destroyed.
708 */
709 frag = get_data_frag(buf, frag_len);
710 }
711
712 /* Caller is supposed to check we have all resources to send */
713 __ASSERT_NO_MSG(frag != NULL);
714
715 /* If the current buffer doesn't fit a controller buffer */
716 if (len > conn_mtu(conn)) {
717 flags = conn->next_is_frag ? FRAG_CONT : FRAG_START;
718 conn->next_is_frag = true;
719 } else {
720 flags = conn->next_is_frag ? FRAG_END : FRAG_SINGLE;
721 conn->next_is_frag = false;
722 }
723
724 LOG_DBG("send frag: buf %p len %d", buf, frag_len);
725
726 /* At this point, the buffer is either a fragment or a full HCI packet.
727 * The flags are also valid.
728 */
729 LOG_DBG("conn %p buf %p len %u flags 0x%02x",
730 conn, frag, frag->len, flags);
731
732 /* Keep track of sent buffers. We have to append _before_
733 * sending, as we might get pre-empted if the HCI driver calls
734 * k_yield() before returning.
735 *
736 * In that case, the controller could also send a num-complete-packets
737 * event and our handler will be confused that there is no corresponding
738 * callback node in the `tx_pending` list.
739 */
740 atomic_inc(&conn->in_ll);
741 sys_slist_append(&conn->tx_pending, &tx->node);
742
743 if (is_iso_tx_conn(conn)) {
744 err = send_iso(conn, frag, flags);
745 } else if (is_acl_conn(conn)) {
746 err = send_acl(conn, frag, flags);
747 } else {
748 err = -EINVAL; /* asserts may be disabled */
749 __ASSERT(false, "Invalid connection type %u", conn->type);
750 }
751
752 if (!err) {
753 return 0;
754 }
755
756 /* Remove buf from pending list */
757 atomic_dec(&conn->in_ll);
758 (void)sys_slist_find_and_remove(&conn->tx_pending, &tx->node);
759
760 LOG_ERR("Unable to send to driver (err %d)", err);
761
762 /* If we get here, something has seriously gone wrong: the `parent` buf
763 * (of which the current fragment belongs) should also be destroyed.
764 */
765 net_buf_unref(frag);
766
767 /* `buf` might not get destroyed right away, and its `tx`
768 * pointer will still be reachable. Make sure that we don't try
769 * to use the destroyed context later.
770 */
771 conn_tx_destroy(conn, tx);
772 k_sem_give(bt_conn_get_pkts(conn));
773
774 /* Merge HCI driver errors */
775 return -EIO;
776 }
777
778 static struct k_poll_signal conn_change =
779 K_POLL_SIGNAL_INITIALIZER(conn_change);
780
conn_destroy(struct bt_conn * conn,void * data)781 static void conn_destroy(struct bt_conn *conn, void *data)
782 {
783 if (conn->state == BT_CONN_CONNECTED ||
784 conn->state == BT_CONN_DISCONNECTING) {
785 bt_conn_set_state(conn, BT_CONN_DISCONNECT_COMPLETE);
786 }
787
788 if (conn->state != BT_CONN_DISCONNECTED) {
789 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
790 }
791 }
792
bt_conn_cleanup_all(void)793 void bt_conn_cleanup_all(void)
794 {
795 bt_conn_foreach(BT_CONN_TYPE_ALL, conn_destroy, NULL);
796 }
797
798 #if defined(CONFIG_BT_CONN)
799 /* Returns true if L2CAP has data to send on this conn */
acl_has_data(struct bt_conn * conn)800 static bool acl_has_data(struct bt_conn *conn)
801 {
802 return sys_slist_peek_head(&conn->l2cap_data_ready) != NULL;
803 }
804 #endif /* defined(CONFIG_BT_CONN) */
805
806 /* Connection "Scheduler" of sorts:
807 *
808 * Will try to get the optimal number of queued buffers for the connection.
809 *
810 * Partitions the controller's buffers to each connection according to some
811 * heuristic. This is made to be tunable, fairness, simplicity, throughput etc.
812 *
813 * In the future, this will be a hook exposed to the application.
814 */
should_stop_tx(struct bt_conn * conn)815 static bool should_stop_tx(struct bt_conn *conn)
816 {
817 LOG_DBG("%p", conn);
818
819 if (conn->state != BT_CONN_CONNECTED) {
820 return true;
821 }
822
823 /* TODO: This function should be overridable by the application: they
824 * should be able to provide their own heuristic.
825 */
826 if (!conn->has_data(conn)) {
827 LOG_DBG("No more data for %p", conn);
828 return true;
829 }
830
831 /* Queue only 3 buffers per-conn for now */
832 if (atomic_get(&conn->in_ll) < 3) {
833 /* The goal of this heuristic is to allow the link-layer to
834 * extend an ACL connection event as long as the application
835 * layer can provide data.
836 *
837 * Here we chose three buffers, as some LLs need two enqueued
838 * packets to be able to set the more-data bit, and one more
839 * buffer to allow refilling by the app while one of them is
840 * being sent over-the-air.
841 */
842 return false;
843 }
844
845 return true;
846 }
847
bt_conn_data_ready(struct bt_conn * conn)848 void bt_conn_data_ready(struct bt_conn *conn)
849 {
850 LOG_DBG("DR");
851
852 /* The TX processor will call the `pull_cb` to get the buf */
853 if (!atomic_set(&conn->_conn_ready_lock, 1)) {
854 /* Attach a reference to the `bt_dev.le.conn_ready` list.
855 *
856 * This reference will be consumed when the conn is popped off
857 * the list (in `get_conn_ready`).
858 *
859 * The `bt_dev.le.conn_ready` list is accessed by the tx_processor
860 * which runs in a workqueue, but `bt_conn_data_ready` can be called
861 * from different threads so we need to make sure that nothing will
862 * trigger a thread switch while we are manipulating the list since
863 * sys_slist_*() functions are not thread safe.
864 */
865 bt_conn_ref(conn);
866 k_sched_lock();
867 sys_slist_append(&bt_dev.le.conn_ready,
868 &conn->_conn_ready);
869 k_sched_unlock();
870 LOG_DBG("raised");
871 } else {
872 LOG_DBG("already in list");
873 }
874
875 /* Kick the TX processor */
876 bt_tx_irq_raise();
877 }
878
cannot_send_to_controller(struct bt_conn * conn)879 static bool cannot_send_to_controller(struct bt_conn *conn)
880 {
881 return k_sem_count_get(bt_conn_get_pkts(conn)) == 0;
882 }
883
dont_have_viewbufs(void)884 static bool dont_have_viewbufs(void)
885 {
886 #if defined(CONFIG_BT_CONN_TX)
887 /* The LIFO only tracks buffers that have been destroyed at least once,
888 * hence the uninit check beforehand.
889 */
890 if (fragments.uninit_count > 0) {
891 /* If there are uninitialized bufs, we are guaranteed allocation. */
892 return false;
893 }
894
895 /* In practice k_fifo == k_lifo ABI. */
896 return k_fifo_is_empty(&fragments.free);
897
898 #else /* !CONFIG_BT_CONN_TX */
899 return false;
900 #endif /* CONFIG_BT_CONN_TX */
901 }
902
dont_have_methods(struct bt_conn * conn)903 __maybe_unused static bool dont_have_methods(struct bt_conn *conn)
904 {
905 return (conn->tx_data_pull == NULL) ||
906 (conn->get_and_clear_cb == NULL) ||
907 (conn->has_data == NULL);
908 }
909
get_conn_ready(void)910 struct bt_conn *get_conn_ready(void)
911 {
912 struct bt_conn *conn, *tmp;
913 sys_snode_t *prev = NULL;
914
915 if (dont_have_viewbufs()) {
916 /* We will get scheduled again when the (view) buffers are freed. If you
917 * hit this a lot, try increasing `CONFIG_BT_CONN_FRAG_COUNT`
918 */
919 LOG_DBG("no view bufs");
920 return NULL;
921 }
922
923 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&bt_dev.le.conn_ready, conn, tmp, _conn_ready) {
924 __ASSERT_NO_MSG(tmp != conn);
925
926 /* Iterate over the list of connections that have data to send
927 * and return the first one that can be sent.
928 */
929
930 if (cannot_send_to_controller(conn)) {
931 /* When buffers are full, try next connection. */
932 LOG_DBG("no LL bufs for %p", conn);
933 prev = &conn->_conn_ready;
934 continue;
935 }
936
937 if (dont_have_tx_context(conn)) {
938 /* When TX contexts are not available, try next connection. */
939 LOG_DBG("no TX contexts for %p", conn);
940 prev = &conn->_conn_ready;
941 continue;
942 }
943
944 CHECKIF(dont_have_methods(conn)) {
945 /* When a connection is missing mandatory methods, try next connection. */
946 LOG_DBG("conn %p (type %d) is missing mandatory methods", conn, conn->type);
947 prev = &conn->_conn_ready;
948 continue;
949 }
950
951 if (should_stop_tx(conn)) {
952 /* Move reference off the list */
953 __ASSERT_NO_MSG(prev != &conn->_conn_ready);
954 sys_slist_remove(&bt_dev.le.conn_ready, prev, &conn->_conn_ready);
955 (void)atomic_set(&conn->_conn_ready_lock, 0);
956
957 /* Append connection to list if it still has data */
958 if (conn->has_data(conn)) {
959 LOG_DBG("appending %p to back of TX queue", conn);
960 bt_conn_data_ready(conn);
961 }
962
963 return conn;
964 }
965
966 return bt_conn_ref(conn);
967 }
968
969 /* No connection has data to send */
970 return NULL;
971 }
972
973 /* Crazy that this file is compiled even if this is not true, but here we are. */
974 #if defined(CONFIG_BT_CONN)
acl_get_and_clear_cb(struct bt_conn * conn,struct net_buf * buf,bt_conn_tx_cb_t * cb,void ** ud)975 static void acl_get_and_clear_cb(struct bt_conn *conn, struct net_buf *buf,
976 bt_conn_tx_cb_t *cb, void **ud)
977 {
978 __ASSERT_NO_MSG(is_acl_conn(conn));
979
980 *cb = closure_cb(buf->user_data);
981 *ud = closure_data(buf->user_data);
982 memset(buf->user_data, 0, buf->user_data_size);
983 }
984 #endif /* defined(CONFIG_BT_CONN) */
985
986 /* Acts as a "null-routed" bt_send(). This fn will decrease the refcount of
987 * `buf` and call the user callback with an error code.
988 */
destroy_and_callback(struct bt_conn * conn,struct net_buf * buf,bt_conn_tx_cb_t cb,void * ud)989 static void destroy_and_callback(struct bt_conn *conn,
990 struct net_buf *buf,
991 bt_conn_tx_cb_t cb,
992 void *ud)
993 {
994 if (!cb) {
995 conn->get_and_clear_cb(conn, buf, &cb, &ud);
996 }
997
998 LOG_DBG("pop: cb %p userdata %p", cb, ud);
999
1000 /* bt_send() would've done an unref. Do it here also, so the buffer is
1001 * hopefully destroyed and the user callback can allocate a new one.
1002 */
1003 net_buf_unref(buf);
1004
1005 if (cb) {
1006 cb(conn, ud, -ESHUTDOWN);
1007 }
1008 }
1009
1010 static volatile bool _suspend_tx;
1011
1012 #if defined(CONFIG_BT_TESTING)
bt_conn_suspend_tx(bool suspend)1013 void bt_conn_suspend_tx(bool suspend)
1014 {
1015 _suspend_tx = suspend;
1016
1017 LOG_DBG("%sing all data TX", suspend ? "suspend" : "resum");
1018
1019 bt_tx_irq_raise();
1020 }
1021 #endif /* CONFIG_BT_TESTING */
1022
bt_conn_tx_processor(void)1023 void bt_conn_tx_processor(void)
1024 {
1025 LOG_DBG("start");
1026 struct bt_conn *conn;
1027 struct net_buf *buf;
1028 bt_conn_tx_cb_t cb = NULL;
1029 size_t buf_len;
1030 void *ud = NULL;
1031
1032 if (!IS_ENABLED(CONFIG_BT_CONN_TX)) {
1033 /* Mom, can we have a real compiler? */
1034 return;
1035 }
1036
1037 if (IS_ENABLED(CONFIG_BT_TESTING) && _suspend_tx) {
1038 return;
1039 }
1040
1041 conn = get_conn_ready();
1042
1043 if (!conn) {
1044 LOG_DBG("no connection wants to do stuff");
1045 return;
1046 }
1047
1048 LOG_DBG("processing conn %p", conn);
1049
1050 if (conn->state != BT_CONN_CONNECTED) {
1051 LOG_WRN("conn %p: not connected", conn);
1052
1053 /* Call the user callbacks & destroy (final-unref) the buffers
1054 * we were supposed to send.
1055 */
1056 buf = conn->tx_data_pull(conn, SIZE_MAX, &buf_len);
1057 while (buf) {
1058 destroy_and_callback(conn, buf, cb, ud);
1059 buf = conn->tx_data_pull(conn, SIZE_MAX, &buf_len);
1060 }
1061
1062 goto exit;
1063 }
1064
1065 /* now that we are guaranteed resources, we can pull data from the upper
1066 * layer (L2CAP or ISO).
1067 */
1068 buf = conn->tx_data_pull(conn, conn_mtu(conn), &buf_len);
1069 if (!buf) {
1070 /* Either there is no more data, or the buffer is already in-use
1071 * by a view on it. In both cases, the TX processor will be
1072 * triggered again, either by the view's destroy callback, or by
1073 * the upper layer when it has more data.
1074 */
1075 LOG_DBG("no buf returned");
1076
1077 goto exit;
1078 }
1079
1080 bool last_buf = conn_mtu(conn) >= buf_len;
1081
1082 if (last_buf) {
1083 /* Only pull the callback info from the last buffer.
1084 * We still allocate one TX context per-fragment though.
1085 */
1086 conn->get_and_clear_cb(conn, buf, &cb, &ud);
1087 LOG_DBG("pop: cb %p userdata %p", cb, ud);
1088 }
1089
1090 LOG_DBG("TX process: conn %p buf %p (%s)",
1091 conn, buf, last_buf ? "last" : "frag");
1092
1093 int err = send_buf(conn, buf, buf_len, cb, ud);
1094
1095 if (err) {
1096 /* -EIO means `unrecoverable error`. It can be an assertion that
1097 * failed or an error from the HCI driver.
1098 *
1099 * -ENOMEM means we thought we had all the resources to send the
1100 * buf (ie. TX context + controller buffer) but one of them was
1101 * not available. This is likely due to a failure of
1102 * assumption, likely that we have been pre-empted somehow and
1103 * that `tx_processor()` has been re-entered.
1104 *
1105 * In both cases, we destroy the buffer and mark the connection
1106 * as dead.
1107 */
1108 LOG_ERR("Fatal error (%d). Disconnecting %p", err, conn);
1109 destroy_and_callback(conn, buf, cb, ud);
1110 bt_conn_disconnect(conn, BT_HCI_ERR_REMOTE_USER_TERM_CONN);
1111
1112 goto exit;
1113 }
1114
1115 /* Always kick the TX work. It will self-suspend if it doesn't get
1116 * resources or there is nothing left to send.
1117 */
1118 bt_tx_irq_raise();
1119
1120 exit:
1121 /* Give back the ref that `get_conn_ready()` gave us */
1122 bt_conn_unref(conn);
1123 }
1124
process_unack_tx(struct bt_conn * conn)1125 static void process_unack_tx(struct bt_conn *conn)
1126 {
1127 LOG_DBG("%p", conn);
1128
1129 /* Return any unacknowledged packets */
1130 while (1) {
1131 struct bt_conn_tx *tx;
1132 sys_snode_t *node;
1133
1134 node = sys_slist_get(&conn->tx_pending);
1135
1136 if (!node) {
1137 bt_tx_irq_raise();
1138 return;
1139 }
1140
1141 tx = CONTAINER_OF(node, struct bt_conn_tx, node);
1142
1143 conn_tx_destroy(conn, tx);
1144 k_sem_give(bt_conn_get_pkts(conn));
1145 }
1146 }
1147
conn_lookup_handle(struct bt_conn * conns,size_t size,uint16_t handle)1148 struct bt_conn *conn_lookup_handle(struct bt_conn *conns, size_t size,
1149 uint16_t handle)
1150 {
1151 int i;
1152
1153 for (i = 0; i < size; i++) {
1154 struct bt_conn *conn = bt_conn_ref(&conns[i]);
1155
1156 if (!conn) {
1157 continue;
1158 }
1159
1160 /* We only care about connections with a valid handle */
1161 if (!bt_conn_is_handle_valid(conn)) {
1162 bt_conn_unref(conn);
1163 continue;
1164 }
1165
1166 if (conn->handle != handle) {
1167 bt_conn_unref(conn);
1168 continue;
1169 }
1170
1171 return conn;
1172 }
1173
1174 return NULL;
1175 }
1176
bt_conn_set_state(struct bt_conn * conn,bt_conn_state_t state)1177 void bt_conn_set_state(struct bt_conn *conn, bt_conn_state_t state)
1178 {
1179 bt_conn_state_t old_state;
1180
1181 LOG_DBG("%s -> %s", state2str(conn->state), state2str(state));
1182
1183 if (conn->state == state) {
1184 LOG_WRN("no transition %s", state2str(state));
1185 return;
1186 }
1187
1188 old_state = conn->state;
1189 conn->state = state;
1190
1191 /* Actions needed for exiting the old state */
1192 switch (old_state) {
1193 case BT_CONN_DISCONNECTED:
1194 /* Take a reference for the first state transition after
1195 * bt_conn_add_le() and keep it until reaching DISCONNECTED
1196 * again.
1197 */
1198 if (conn->type != BT_CONN_TYPE_ISO) {
1199 bt_conn_ref(conn);
1200 }
1201 break;
1202 case BT_CONN_INITIATING:
1203 if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1204 conn->type == BT_CONN_TYPE_LE) {
1205 k_work_cancel_delayable(&conn->deferred_work);
1206 }
1207 break;
1208 default:
1209 break;
1210 }
1211
1212 /* Actions needed for entering the new state */
1213 switch (conn->state) {
1214 case BT_CONN_CONNECTED:
1215 if (conn->type == BT_CONN_TYPE_SCO) {
1216 if (IS_ENABLED(CONFIG_BT_CLASSIC)) {
1217 bt_sco_connected(conn);
1218 }
1219 break;
1220 }
1221 k_poll_signal_raise(&conn_change, 0);
1222
1223 if (IS_ENABLED(CONFIG_BT_ISO) &&
1224 conn->type == BT_CONN_TYPE_ISO) {
1225 bt_iso_connected(conn);
1226 break;
1227 }
1228
1229 #if defined(CONFIG_BT_CONN)
1230 sys_slist_init(&conn->channels);
1231
1232 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
1233 conn->role == BT_CONN_ROLE_PERIPHERAL) {
1234
1235 #if defined(CONFIG_BT_GAP_AUTO_UPDATE_CONN_PARAMS)
1236 if (conn->type == BT_CONN_TYPE_LE) {
1237 conn->le.conn_param_retry_countdown =
1238 CONFIG_BT_CONN_PARAM_RETRY_COUNT;
1239 }
1240 #endif /* CONFIG_BT_GAP_AUTO_UPDATE_CONN_PARAMS */
1241
1242 k_work_schedule(&conn->deferred_work,
1243 CONN_UPDATE_TIMEOUT);
1244 }
1245 #endif /* CONFIG_BT_CONN */
1246
1247 break;
1248 case BT_CONN_DISCONNECTED:
1249 #if defined(CONFIG_BT_CONN)
1250 if (conn->type == BT_CONN_TYPE_SCO) {
1251 if (IS_ENABLED(CONFIG_BT_CLASSIC)) {
1252 bt_sco_disconnected(conn);
1253 }
1254 bt_conn_unref(conn);
1255 break;
1256 }
1257
1258 /* Notify disconnection and queue a dummy buffer to wake
1259 * up and stop the tx thread for states where it was
1260 * running.
1261 */
1262 switch (old_state) {
1263 case BT_CONN_DISCONNECT_COMPLETE:
1264 /* Any previously scheduled deferred work now becomes invalid
1265 * so cancel it here, before we yield to tx thread.
1266 */
1267 k_work_cancel_delayable(&conn->deferred_work);
1268
1269 bt_conn_tx_notify(conn, true);
1270
1271 bt_conn_reset_rx_state(conn);
1272
1273 LOG_DBG("trigger disconnect work");
1274 k_work_reschedule(&conn->deferred_work, K_NO_WAIT);
1275
1276 /* The last ref will be dropped during cleanup */
1277 break;
1278 case BT_CONN_INITIATING:
1279 /* LE Create Connection command failed. This might be
1280 * directly from the API, don't notify application in
1281 * this case.
1282 */
1283 if (conn->err) {
1284 notify_connected(conn);
1285 }
1286
1287 bt_conn_unref(conn);
1288 break;
1289 case BT_CONN_SCAN_BEFORE_INITIATING:
1290 /* This indicates that connection establishment
1291 * has been stopped. This could either be triggered by
1292 * the application through bt_conn_disconnect or by
1293 * timeout set by bt_conn_le_create_param.timeout.
1294 */
1295 if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
1296 int err = bt_le_scan_user_remove(BT_LE_SCAN_USER_CONN);
1297
1298 if (err) {
1299 LOG_WRN("Error while removing conn user from scanner (%d)",
1300 err);
1301 }
1302
1303 if (conn->err) {
1304 notify_connected(conn);
1305 }
1306 }
1307 bt_conn_unref(conn);
1308 break;
1309 case BT_CONN_ADV_DIR_CONNECTABLE:
1310 /* this indicate Directed advertising stopped */
1311 if (conn->err) {
1312 notify_connected(conn);
1313 }
1314
1315 bt_conn_unref(conn);
1316 break;
1317 case BT_CONN_INITIATING_FILTER_LIST:
1318 /* this indicates LE Create Connection with filter
1319 * policy has been stopped. This can only be triggered
1320 * by the application, so don't notify.
1321 */
1322 bt_conn_unref(conn);
1323 break;
1324 case BT_CONN_ADV_CONNECTABLE:
1325 /* This can only happen when application stops the
1326 * advertiser, conn->err is never set in this case.
1327 */
1328 bt_conn_unref(conn);
1329 break;
1330 case BT_CONN_CONNECTED:
1331 case BT_CONN_DISCONNECTING:
1332 case BT_CONN_DISCONNECTED:
1333 /* Cannot happen. */
1334 LOG_WRN("Invalid (%u) old state", state);
1335 break;
1336 }
1337 break;
1338 case BT_CONN_INITIATING_FILTER_LIST:
1339 break;
1340 case BT_CONN_ADV_CONNECTABLE:
1341 break;
1342 case BT_CONN_SCAN_BEFORE_INITIATING:
1343 break;
1344 case BT_CONN_ADV_DIR_CONNECTABLE:
1345 break;
1346 case BT_CONN_INITIATING:
1347 if (conn->type == BT_CONN_TYPE_SCO) {
1348 break;
1349 }
1350 /*
1351 * Timer is needed only for LE. For other link types controller
1352 * will handle connection timeout.
1353 */
1354 if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1355 conn->type == BT_CONN_TYPE_LE &&
1356 bt_dev.create_param.timeout != 0) {
1357 k_work_schedule(&conn->deferred_work,
1358 K_MSEC(10 * bt_dev.create_param.timeout));
1359 }
1360
1361 break;
1362 case BT_CONN_DISCONNECTING:
1363 break;
1364 #endif /* CONFIG_BT_CONN */
1365 case BT_CONN_DISCONNECT_COMPLETE:
1366 if (conn->err == BT_HCI_ERR_CONN_FAIL_TO_ESTAB) {
1367 /* No ACK or data was ever received. The peripheral may be
1368 * unaware of the connection attempt.
1369 *
1370 * Beware of confusing higher layer errors. Anything that looks
1371 * like it's from the remote is synthetic.
1372 */
1373 LOG_WRN("conn %p failed to establish. RF noise?", conn);
1374 }
1375
1376 process_unack_tx(conn);
1377 break;
1378 default:
1379 LOG_WRN("no valid (%u) state was set", state);
1380
1381 break;
1382 }
1383 }
1384
bt_conn_lookup_handle(uint16_t handle,enum bt_conn_type type)1385 struct bt_conn *bt_conn_lookup_handle(uint16_t handle, enum bt_conn_type type)
1386 {
1387 struct bt_conn *conn;
1388
1389 #if defined(CONFIG_BT_CONN)
1390 conn = conn_lookup_handle(acl_conns, ARRAY_SIZE(acl_conns), handle);
1391 if (conn) {
1392 goto found;
1393 }
1394 #endif /* CONFIG_BT_CONN */
1395
1396 #if defined(CONFIG_BT_ISO)
1397 conn = conn_lookup_handle(iso_conns, ARRAY_SIZE(iso_conns), handle);
1398 if (conn) {
1399 goto found;
1400 }
1401 #endif
1402
1403 #if defined(CONFIG_BT_CLASSIC)
1404 conn = conn_lookup_handle(sco_conns, ARRAY_SIZE(sco_conns), handle);
1405 if (conn) {
1406 goto found;
1407 }
1408 #endif
1409
1410 found:
1411 if (conn) {
1412 if (type & conn->type) {
1413 return conn;
1414 }
1415 LOG_WRN("incompatible handle %u", handle);
1416 bt_conn_unref(conn);
1417 }
1418 return NULL;
1419 }
1420
bt_hci_conn_lookup_handle(uint16_t handle)1421 struct bt_conn *bt_hci_conn_lookup_handle(uint16_t handle)
1422 {
1423 return bt_conn_lookup_handle(handle, BT_CONN_TYPE_ALL);
1424 }
1425
bt_conn_foreach(enum bt_conn_type type,void (* func)(struct bt_conn * conn,void * data),void * data)1426 void bt_conn_foreach(enum bt_conn_type type,
1427 void (*func)(struct bt_conn *conn, void *data),
1428 void *data)
1429 {
1430 int i;
1431
1432 #if defined(CONFIG_BT_CONN)
1433 for (i = 0; i < ARRAY_SIZE(acl_conns); i++) {
1434 struct bt_conn *conn = bt_conn_ref(&acl_conns[i]);
1435
1436 if (!conn) {
1437 continue;
1438 }
1439
1440 if (!(conn->type & type)) {
1441 bt_conn_unref(conn);
1442 continue;
1443 }
1444
1445 func(conn, data);
1446 bt_conn_unref(conn);
1447 }
1448 #if defined(CONFIG_BT_CLASSIC)
1449 if (type & BT_CONN_TYPE_SCO) {
1450 for (i = 0; i < ARRAY_SIZE(sco_conns); i++) {
1451 struct bt_conn *conn = bt_conn_ref(&sco_conns[i]);
1452
1453 if (!conn) {
1454 continue;
1455 }
1456
1457 func(conn, data);
1458 bt_conn_unref(conn);
1459 }
1460 }
1461 #endif /* defined(CONFIG_BT_CLASSIC) */
1462 #endif /* CONFIG_BT_CONN */
1463
1464 #if defined(CONFIG_BT_ISO)
1465 if (type & BT_CONN_TYPE_ISO) {
1466 for (i = 0; i < ARRAY_SIZE(iso_conns); i++) {
1467 struct bt_conn *conn = bt_conn_ref(&iso_conns[i]);
1468
1469 if (!conn) {
1470 continue;
1471 }
1472
1473 func(conn, data);
1474 bt_conn_unref(conn);
1475 }
1476 }
1477 #endif /* defined(CONFIG_BT_ISO) */
1478 }
1479
bt_conn_ref(struct bt_conn * conn)1480 struct bt_conn *bt_conn_ref(struct bt_conn *conn)
1481 {
1482 atomic_val_t old;
1483
1484 __ASSERT_NO_MSG(conn);
1485
1486 /* Reference counter must be checked to avoid incrementing ref from
1487 * zero, then we should return NULL instead.
1488 * Loop on clear-and-set in case someone has modified the reference
1489 * count since the read, and start over again when that happens.
1490 */
1491 do {
1492 old = atomic_get(&conn->ref);
1493
1494 if (!old) {
1495 return NULL;
1496 }
1497 } while (!atomic_cas(&conn->ref, old, old + 1));
1498
1499 LOG_DBG("handle %u ref %ld -> %ld", conn->handle, old, old + 1);
1500
1501 return conn;
1502 }
1503
1504 static K_SEM_DEFINE(pending_recycled_events, 0, K_SEM_MAX_LIMIT);
1505
recycled_work_handler(struct k_work * work)1506 static void recycled_work_handler(struct k_work *work)
1507 {
1508 if (k_sem_take(&pending_recycled_events, K_NO_WAIT) == 0) {
1509 notify_recycled_conn_slot();
1510 k_work_submit(work);
1511 }
1512 }
1513
1514 static K_WORK_DEFINE(recycled_work, recycled_work_handler);
1515
bt_conn_unref(struct bt_conn * conn)1516 void bt_conn_unref(struct bt_conn *conn)
1517 {
1518 atomic_val_t old;
1519 bool deallocated;
1520 enum bt_conn_type conn_type;
1521 uint8_t conn_role;
1522 uint16_t conn_handle;
1523
1524 __ASSERT(conn, "Invalid connection reference");
1525
1526 /* Storing parameters of interest so we don't access the object
1527 * after decrementing its ref-count
1528 */
1529 conn_type = conn->type;
1530 conn_role = conn->role;
1531 conn_handle = conn->handle;
1532
1533 old = atomic_dec(&conn->ref);
1534 /* Prevent from accessing connection object */
1535 conn = NULL;
1536 deallocated = (atomic_get(&old) == 1);
1537
1538 LOG_DBG("handle %u ref %ld -> %ld", conn_handle, old, (old - 1));
1539
1540 __ASSERT(old > 0, "Conn reference counter is 0");
1541
1542 /* Slot has been freed and can be taken. No guarantees are made on requests
1543 * to claim connection object as only the first claim will be served.
1544 */
1545 if (deallocated) {
1546 k_sem_give(&pending_recycled_events);
1547 k_work_submit(&recycled_work);
1548 }
1549
1550 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn_type == BT_CONN_TYPE_LE &&
1551 conn_role == BT_CONN_ROLE_PERIPHERAL && deallocated) {
1552 bt_le_adv_resume();
1553 }
1554 }
1555
bt_conn_index(const struct bt_conn * conn)1556 uint8_t bt_conn_index(const struct bt_conn *conn)
1557 {
1558 ptrdiff_t index = 0;
1559
1560 switch (conn->type) {
1561 #if defined(CONFIG_BT_ISO)
1562 case BT_CONN_TYPE_ISO:
1563 index = conn - iso_conns;
1564 __ASSERT(index >= 0 && index < ARRAY_SIZE(iso_conns),
1565 "Invalid bt_conn pointer");
1566 break;
1567 #endif
1568 #if defined(CONFIG_BT_CLASSIC)
1569 case BT_CONN_TYPE_SCO:
1570 index = conn - sco_conns;
1571 __ASSERT(index >= 0 && index < ARRAY_SIZE(sco_conns),
1572 "Invalid bt_conn pointer");
1573 break;
1574 #endif
1575 default:
1576 #if defined(CONFIG_BT_CONN)
1577 index = conn - acl_conns;
1578 __ASSERT(index >= 0 && index < ARRAY_SIZE(acl_conns),
1579 "Invalid bt_conn pointer");
1580 #else
1581 __ASSERT(false, "Invalid connection type %u", conn->type);
1582 #endif /* CONFIG_BT_CONN */
1583 break;
1584 }
1585
1586 return (uint8_t)index;
1587 }
1588
1589
1590 #if defined(CONFIG_NET_BUF_LOG)
bt_conn_create_pdu_timeout_debug(struct net_buf_pool * pool,size_t reserve,k_timeout_t timeout,const char * func,int line)1591 struct net_buf *bt_conn_create_pdu_timeout_debug(struct net_buf_pool *pool,
1592 size_t reserve,
1593 k_timeout_t timeout,
1594 const char *func, int line)
1595 #else
1596 struct net_buf *bt_conn_create_pdu_timeout(struct net_buf_pool *pool,
1597 size_t reserve, k_timeout_t timeout)
1598 #endif
1599 {
1600 struct net_buf *buf;
1601
1602 /*
1603 * PDU must not be allocated from ISR as we block with 'K_FOREVER'
1604 * during the allocation
1605 */
1606 __ASSERT_NO_MSG(!k_is_in_isr());
1607
1608 if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
1609 k_current_get() == k_work_queue_thread_get(&k_sys_work_q)) {
1610 LOG_WRN("Timeout discarded. No blocking in syswq.");
1611 timeout = K_NO_WAIT;
1612 }
1613
1614 if (!pool) {
1615 #if defined(CONFIG_BT_CONN)
1616 pool = &acl_tx_pool;
1617 #else
1618 return NULL;
1619 #endif /* CONFIG_BT_CONN */
1620 }
1621
1622 if (IS_ENABLED(CONFIG_BT_CONN_LOG_LEVEL_DBG)) {
1623 #if defined(CONFIG_NET_BUF_LOG)
1624 buf = net_buf_alloc_fixed_debug(pool, K_NO_WAIT, func, line);
1625 #else
1626 buf = net_buf_alloc(pool, K_NO_WAIT);
1627 #endif
1628 if (!buf) {
1629 LOG_WRN("Unable to allocate buffer with K_NO_WAIT");
1630 #if defined(CONFIG_NET_BUF_LOG)
1631 buf = net_buf_alloc_fixed_debug(pool, timeout, func,
1632 line);
1633 #else
1634 buf = net_buf_alloc(pool, timeout);
1635 #endif
1636 }
1637 } else {
1638 #if defined(CONFIG_NET_BUF_LOG)
1639 buf = net_buf_alloc_fixed_debug(pool, timeout, func,
1640 line);
1641 #else
1642 buf = net_buf_alloc(pool, timeout);
1643 #endif
1644 }
1645
1646 if (!buf) {
1647 LOG_WRN("Unable to allocate buffer within timeout");
1648 return NULL;
1649 }
1650
1651 reserve += sizeof(struct bt_hci_acl_hdr) + BT_BUF_RESERVE;
1652 net_buf_reserve(buf, reserve);
1653
1654 return buf;
1655 }
1656
1657 #if defined(CONFIG_BT_CONN_TX)
tx_complete_work(struct k_work * work)1658 static void tx_complete_work(struct k_work *work)
1659 {
1660 struct bt_conn *conn = CONTAINER_OF(work, struct bt_conn, tx_complete_work);
1661
1662 tx_notify_process(conn);
1663 }
1664 #endif /* CONFIG_BT_CONN_TX */
1665
notify_recycled_conn_slot(void)1666 static void notify_recycled_conn_slot(void)
1667 {
1668 #if defined(CONFIG_BT_CONN)
1669 struct bt_conn_cb *callback;
1670
1671 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
1672 if (callback->recycled) {
1673 callback->recycled();
1674 }
1675 }
1676
1677 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
1678 if (cb->recycled) {
1679 cb->recycled();
1680 }
1681 }
1682 #endif
1683 }
1684
1685 #if !defined(CONFIG_BT_CONN)
bt_conn_disconnect(struct bt_conn * conn,uint8_t reason)1686 int bt_conn_disconnect(struct bt_conn *conn, uint8_t reason)
1687 {
1688 ARG_UNUSED(conn);
1689 ARG_UNUSED(reason);
1690
1691 /* Dummy implementation to satisfy the compiler */
1692
1693 return 0;
1694 }
1695 #endif /* !CONFIG_BT_CONN */
1696
1697 /* Group Connected BT_CONN only in this */
1698 #if defined(CONFIG_BT_CONN)
1699
1700 /* We don't want the application to get a PHY update callback upon connection
1701 * establishment on 2M PHY. Therefore we must prevent issuing LE Set PHY
1702 * in this scenario.
1703 *
1704 * It is ifdef'd because the struct fields don't exist in some configs.
1705 */
uses_symmetric_2mbit_phy(struct bt_conn * conn)1706 static bool uses_symmetric_2mbit_phy(struct bt_conn *conn)
1707 {
1708 #if defined(CONFIG_BT_USER_PHY_UPDATE)
1709 if (IS_ENABLED(CONFIG_BT_EXT_ADV)) {
1710 if (conn->le.phy.tx_phy == BT_HCI_LE_PHY_2M &&
1711 conn->le.phy.rx_phy == BT_HCI_LE_PHY_2M) {
1712 return true;
1713 }
1714 }
1715 #else
1716 ARG_UNUSED(conn);
1717 #endif
1718
1719 return false;
1720 }
1721
can_initiate_feature_exchange(struct bt_conn * conn)1722 static bool can_initiate_feature_exchange(struct bt_conn *conn)
1723 {
1724 /* Spec says both central and peripheral can send the command. However,
1725 * peripheral-initiated feature exchange is an optional feature.
1726 *
1727 * We provide an optimization if we are in the same image as the
1728 * controller, as we know at compile time whether it supports or not
1729 * peripheral feature exchange.
1730 */
1731
1732 if (IS_ENABLED(CONFIG_BT_CENTRAL) && (conn->role == BT_HCI_ROLE_CENTRAL)) {
1733 return true;
1734 }
1735
1736 if (IS_ENABLED(CONFIG_HAS_BT_CTLR) && IS_ENABLED(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)) {
1737 return true;
1738 }
1739
1740 return BT_FEAT_LE_PER_INIT_FEAT_XCHG(bt_dev.le.features);
1741 }
1742
perform_auto_initiated_procedures(struct bt_conn * conn,void * unused)1743 static void perform_auto_initiated_procedures(struct bt_conn *conn, void *unused)
1744 {
1745 int err;
1746
1747 ARG_UNUSED(unused);
1748
1749 LOG_DBG("[%p] Running auto-initiated procedures", conn);
1750
1751 if (conn->state != BT_CONN_CONNECTED) {
1752 /* It is possible that connection was disconnected directly from
1753 * connected callback so we must check state before doing
1754 * connection parameters update.
1755 */
1756 return;
1757 }
1758
1759 if (atomic_test_and_set_bit(conn->flags, BT_CONN_AUTO_INIT_PROCEDURES_DONE)) {
1760 /* We have already run the auto-initiated procedures */
1761 return;
1762 }
1763
1764 if (!atomic_test_bit(conn->flags, BT_CONN_LE_FEATURES_EXCHANGED) &&
1765 can_initiate_feature_exchange(conn)) {
1766 err = bt_hci_le_read_remote_features(conn);
1767 if (err) {
1768 LOG_ERR("Failed read remote features (%d)", err);
1769 }
1770 if (conn->state != BT_CONN_CONNECTED) {
1771 return;
1772 }
1773 }
1774
1775 if (IS_ENABLED(CONFIG_BT_REMOTE_VERSION) &&
1776 !atomic_test_bit(conn->flags, BT_CONN_AUTO_VERSION_INFO)) {
1777 err = bt_hci_read_remote_version(conn);
1778 if (err) {
1779 LOG_ERR("Failed read remote version (%d)", err);
1780 }
1781 if (conn->state != BT_CONN_CONNECTED) {
1782 return;
1783 }
1784 }
1785
1786 if (IS_ENABLED(CONFIG_BT_AUTO_PHY_UPDATE) && BT_FEAT_LE_PHY_2M(bt_dev.le.features) &&
1787 !uses_symmetric_2mbit_phy(conn)) {
1788 err = bt_le_set_phy(conn, 0U, BT_HCI_LE_PHY_PREFER_2M, BT_HCI_LE_PHY_PREFER_2M,
1789 BT_HCI_LE_PHY_CODED_ANY);
1790 if (err) {
1791 LOG_ERR("Failed LE Set PHY (%d)", err);
1792 }
1793 if (conn->state != BT_CONN_CONNECTED) {
1794 return;
1795 }
1796 }
1797
1798 /* Data length should be automatically updated to the maximum by the
1799 * controller. Not updating it is a quirk and this is the workaround.
1800 */
1801 if (IS_ENABLED(CONFIG_BT_AUTO_DATA_LEN_UPDATE) && BT_FEAT_LE_DLE(bt_dev.le.features) &&
1802 bt_drv_quirk_no_auto_dle()) {
1803 uint16_t tx_octets, tx_time;
1804
1805 err = bt_hci_le_read_max_data_len(&tx_octets, &tx_time);
1806 if (!err) {
1807 err = bt_le_set_data_len(conn, tx_octets, tx_time);
1808 if (err) {
1809 LOG_ERR("Failed to set data len (%d)", err);
1810 }
1811 }
1812 }
1813
1814 LOG_DBG("[%p] Successfully ran auto-initiated procedures", conn);
1815 }
1816
1817 /* Executes procedures after a connection is established:
1818 * - read remote features
1819 * - read remote version
1820 * - update PHY
1821 * - update data length
1822 */
auto_initiated_procedures(struct k_work * unused)1823 static void auto_initiated_procedures(struct k_work *unused)
1824 {
1825 ARG_UNUSED(unused);
1826
1827 bt_conn_foreach(BT_CONN_TYPE_LE, perform_auto_initiated_procedures, NULL);
1828 }
1829
1830 static K_WORK_DEFINE(procedures_on_connect, auto_initiated_procedures);
1831
schedule_auto_initiated_procedures(struct bt_conn * conn)1832 static void schedule_auto_initiated_procedures(struct bt_conn *conn)
1833 {
1834 LOG_DBG("[%p] Scheduling auto-init procedures", conn);
1835 k_work_submit(&procedures_on_connect);
1836 }
1837
bt_conn_connected(struct bt_conn * conn)1838 void bt_conn_connected(struct bt_conn *conn)
1839 {
1840 schedule_auto_initiated_procedures(conn);
1841 bt_l2cap_connected(conn);
1842 notify_connected(conn);
1843 }
1844
conn_disconnect(struct bt_conn * conn,uint8_t reason)1845 static int conn_disconnect(struct bt_conn *conn, uint8_t reason)
1846 {
1847 int err;
1848
1849 err = bt_hci_disconnect(conn->handle, reason);
1850 if (err) {
1851 return err;
1852 }
1853
1854 if (conn->state == BT_CONN_CONNECTED) {
1855 bt_conn_set_state(conn, BT_CONN_DISCONNECTING);
1856 }
1857
1858 return 0;
1859 }
1860
bt_conn_disconnect(struct bt_conn * conn,uint8_t reason)1861 int bt_conn_disconnect(struct bt_conn *conn, uint8_t reason)
1862 {
1863 switch (conn->state) {
1864 case BT_CONN_SCAN_BEFORE_INITIATING:
1865 conn->err = reason;
1866 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
1867 if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
1868 return bt_le_scan_user_add(BT_LE_SCAN_USER_CONN);
1869 }
1870 return 0;
1871 case BT_CONN_INITIATING:
1872 if (conn->type == BT_CONN_TYPE_LE) {
1873 if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
1874 k_work_cancel_delayable(&conn->deferred_work);
1875 return bt_le_create_conn_cancel();
1876 }
1877 }
1878 #if defined(CONFIG_BT_ISO)
1879 else if (conn->type == BT_CONN_TYPE_ISO) {
1880 return conn_disconnect(conn, reason);
1881 }
1882 #endif /* CONFIG_BT_ISO */
1883 #if defined(CONFIG_BT_CLASSIC)
1884 else if (conn->type == BT_CONN_TYPE_BR) {
1885 return bt_hci_connect_br_cancel(conn);
1886 }
1887 #endif /* CONFIG_BT_CLASSIC */
1888 else {
1889 __ASSERT(false, "Invalid conn type %u", conn->type);
1890 }
1891
1892 return 0;
1893 case BT_CONN_CONNECTED:
1894 return conn_disconnect(conn, reason);
1895 case BT_CONN_DISCONNECTING:
1896 return 0;
1897 case BT_CONN_DISCONNECTED:
1898 default:
1899 return -ENOTCONN;
1900 }
1901 }
1902
notify_connected(struct bt_conn * conn)1903 static void notify_connected(struct bt_conn *conn)
1904 {
1905 struct bt_conn_cb *callback;
1906
1907 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
1908
1909 if (callback->connected) {
1910 callback->connected(conn, conn->err);
1911 }
1912 }
1913
1914 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
1915 if (cb->connected) {
1916 cb->connected(conn, conn->err);
1917 }
1918 }
1919 }
1920
notify_disconnected(struct bt_conn * conn)1921 static void notify_disconnected(struct bt_conn *conn)
1922 {
1923 struct bt_conn_cb *callback;
1924
1925 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
1926 if (callback->disconnected) {
1927 callback->disconnected(conn, conn->err);
1928 }
1929 }
1930
1931 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
1932 if (cb->disconnected) {
1933 cb->disconnected(conn, conn->err);
1934 }
1935 }
1936 }
1937
1938 #if defined(CONFIG_BT_REMOTE_INFO)
notify_remote_info(struct bt_conn * conn)1939 void notify_remote_info(struct bt_conn *conn)
1940 {
1941 struct bt_conn_remote_info remote_info;
1942 int err;
1943
1944 err = bt_conn_get_remote_info(conn, &remote_info);
1945 if (err) {
1946 LOG_DBG("Notify remote info failed %d", err);
1947 return;
1948 }
1949
1950 struct bt_conn_cb *callback;
1951
1952 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
1953 if (callback->remote_info_available) {
1954 callback->remote_info_available(conn, &remote_info);
1955 }
1956 }
1957
1958 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
1959 if (cb->remote_info_available) {
1960 cb->remote_info_available(conn, &remote_info);
1961 }
1962 }
1963 }
1964 #endif /* defined(CONFIG_BT_REMOTE_INFO) */
1965
notify_le_param_updated(struct bt_conn * conn)1966 void notify_le_param_updated(struct bt_conn *conn)
1967 {
1968 /* If new connection parameters meet requirement of pending
1969 * parameters don't send peripheral conn param request anymore on timeout
1970 */
1971 if (atomic_test_bit(conn->flags, BT_CONN_PERIPHERAL_PARAM_SET) &&
1972 conn->le.interval >= conn->le.interval_min &&
1973 conn->le.interval <= conn->le.interval_max &&
1974 conn->le.latency == conn->le.pending_latency &&
1975 conn->le.timeout == conn->le.pending_timeout) {
1976 atomic_clear_bit(conn->flags, BT_CONN_PERIPHERAL_PARAM_SET);
1977 }
1978
1979 struct bt_conn_cb *callback;
1980
1981 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
1982 if (callback->le_param_updated) {
1983 callback->le_param_updated(conn, conn->le.interval,
1984 conn->le.latency,
1985 conn->le.timeout);
1986 }
1987 }
1988
1989 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
1990 if (cb->le_param_updated) {
1991 cb->le_param_updated(conn, conn->le.interval,
1992 conn->le.latency,
1993 conn->le.timeout);
1994 }
1995 }
1996 }
1997
1998 #if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
notify_le_data_len_updated(struct bt_conn * conn)1999 void notify_le_data_len_updated(struct bt_conn *conn)
2000 {
2001 struct bt_conn_cb *callback;
2002
2003 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
2004 if (callback->le_data_len_updated) {
2005 callback->le_data_len_updated(conn, &conn->le.data_len);
2006 }
2007 }
2008
2009 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
2010 if (cb->le_data_len_updated) {
2011 cb->le_data_len_updated(conn, &conn->le.data_len);
2012 }
2013 }
2014 }
2015 #endif
2016
2017 #if defined(CONFIG_BT_USER_PHY_UPDATE)
notify_le_phy_updated(struct bt_conn * conn)2018 void notify_le_phy_updated(struct bt_conn *conn)
2019 {
2020 struct bt_conn_cb *callback;
2021
2022 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
2023 if (callback->le_phy_updated) {
2024 callback->le_phy_updated(conn, &conn->le.phy);
2025 }
2026 }
2027
2028 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
2029 if (cb->le_phy_updated) {
2030 cb->le_phy_updated(conn, &conn->le.phy);
2031 }
2032 }
2033 }
2034 #endif
2035
le_param_req(struct bt_conn * conn,struct bt_le_conn_param * param)2036 bool le_param_req(struct bt_conn *conn, struct bt_le_conn_param *param)
2037 {
2038 if (!bt_le_conn_params_valid(param)) {
2039 return false;
2040 }
2041
2042 struct bt_conn_cb *callback;
2043
2044 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
2045 if (!callback->le_param_req) {
2046 continue;
2047 }
2048
2049 if (!callback->le_param_req(conn, param)) {
2050 return false;
2051 }
2052
2053 /* The callback may modify the parameters so we need to
2054 * double-check that it returned valid parameters.
2055 */
2056 if (!bt_le_conn_params_valid(param)) {
2057 return false;
2058 }
2059 }
2060
2061 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
2062 if (!cb->le_param_req) {
2063 continue;
2064 }
2065
2066 if (!cb->le_param_req(conn, param)) {
2067 return false;
2068 }
2069
2070 /* The callback may modify the parameters so we need to
2071 * double-check that it returned valid parameters.
2072 */
2073 if (!bt_le_conn_params_valid(param)) {
2074 return false;
2075 }
2076 }
2077
2078 /* Default to accepting if there's no app callback */
2079 return true;
2080 }
2081
send_conn_le_param_update(struct bt_conn * conn,const struct bt_le_conn_param * param)2082 static int send_conn_le_param_update(struct bt_conn *conn,
2083 const struct bt_le_conn_param *param)
2084 {
2085 LOG_DBG("conn %p features 0x%02x params (%d-%d %d %d)", conn, conn->le.features[0],
2086 param->interval_min, param->interval_max, param->latency, param->timeout);
2087
2088 /* Proceed only if connection parameters contains valid values*/
2089 if (!bt_le_conn_params_valid(param)) {
2090 return -EINVAL;
2091 }
2092
2093 /* Use LE connection parameter request if both local and remote support
2094 * it; or if local role is central then use LE connection update.
2095 */
2096 if ((BT_FEAT_LE_CONN_PARAM_REQ_PROC(bt_dev.le.features) &&
2097 BT_FEAT_LE_CONN_PARAM_REQ_PROC(conn->le.features) &&
2098 !atomic_test_bit(conn->flags, BT_CONN_PERIPHERAL_PARAM_L2CAP)) ||
2099 (conn->role == BT_HCI_ROLE_CENTRAL)) {
2100 int rc;
2101
2102 rc = bt_conn_le_conn_update(conn, param);
2103
2104 /* store those in case of fallback to L2CAP */
2105 if (rc == 0) {
2106 conn->le.interval_min = param->interval_min;
2107 conn->le.interval_max = param->interval_max;
2108 conn->le.pending_latency = param->latency;
2109 conn->le.pending_timeout = param->timeout;
2110 }
2111
2112 return rc;
2113 }
2114
2115 /* If remote central does not support LL Connection Parameters Request
2116 * Procedure
2117 */
2118 return bt_l2cap_update_conn_param(conn, param);
2119 }
2120
2121 #if defined(CONFIG_BT_ISO_UNICAST)
conn_lookup_iso(struct bt_conn * conn)2122 static struct bt_conn *conn_lookup_iso(struct bt_conn *conn)
2123 {
2124 int i;
2125
2126 for (i = 0; i < ARRAY_SIZE(iso_conns); i++) {
2127 struct bt_conn *iso = bt_conn_ref(&iso_conns[i]);
2128
2129 if (iso == NULL) {
2130 continue;
2131 }
2132
2133 if (iso->iso.acl == conn) {
2134 return iso;
2135 }
2136
2137 bt_conn_unref(iso);
2138 }
2139
2140 return NULL;
2141 }
2142 #endif /* CONFIG_BT_ISO */
2143
2144 #if defined(CONFIG_BT_CLASSIC)
conn_lookup_sco(struct bt_conn * conn)2145 static struct bt_conn *conn_lookup_sco(struct bt_conn *conn)
2146 {
2147 int i;
2148
2149 for (i = 0; i < ARRAY_SIZE(sco_conns); i++) {
2150 struct bt_conn *sco = bt_conn_ref(&sco_conns[i]);
2151
2152 if (sco == NULL) {
2153 continue;
2154 }
2155
2156 if (sco->sco.acl == conn) {
2157 return sco;
2158 }
2159
2160 bt_conn_unref(sco);
2161 }
2162
2163 return NULL;
2164 }
2165 #endif /* CONFIG_BT_CLASSIC */
2166
deferred_work(struct k_work * work)2167 static void deferred_work(struct k_work *work)
2168 {
2169 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
2170 struct bt_conn *conn = CONTAINER_OF(dwork, struct bt_conn, deferred_work);
2171 const struct bt_le_conn_param *param;
2172
2173 LOG_DBG("conn %p", conn);
2174
2175 if (conn->state == BT_CONN_DISCONNECTED) {
2176 #if defined(CONFIG_BT_ISO_UNICAST)
2177 struct bt_conn *iso;
2178
2179 if (conn->type == BT_CONN_TYPE_ISO) {
2180 /* bt_iso_disconnected is responsible for unref'ing the
2181 * connection pointer, as it is conditional on whether
2182 * the connection is a central or peripheral.
2183 */
2184 bt_iso_disconnected(conn);
2185 return;
2186 }
2187
2188 /* Mark all ISO channels associated
2189 * with ACL conn as not connected, and
2190 * remove ACL reference
2191 */
2192 iso = conn_lookup_iso(conn);
2193 while (iso != NULL) {
2194 struct bt_iso_chan *chan = iso->iso.chan;
2195
2196 if (chan != NULL) {
2197 bt_iso_chan_set_state(chan,
2198 BT_ISO_STATE_DISCONNECTING);
2199 }
2200
2201 bt_iso_cleanup_acl(iso);
2202
2203 bt_conn_unref(iso);
2204 iso = conn_lookup_iso(conn);
2205 }
2206 #endif
2207 #if defined(CONFIG_BT_CLASSIC)
2208 struct bt_conn *sco;
2209
2210 /* Mark all SCO channels associated
2211 * with ACL conn as not connected, and
2212 * remove ACL reference
2213 */
2214 sco = conn_lookup_sco(conn);
2215 while (sco != NULL) {
2216 struct bt_sco_chan *chan = sco->sco.chan;
2217
2218 if (chan != NULL) {
2219 bt_sco_chan_set_state(chan,
2220 BT_SCO_STATE_DISCONNECTING);
2221 }
2222
2223 bt_sco_cleanup_acl(sco);
2224
2225 bt_conn_unref(sco);
2226 sco = conn_lookup_sco(conn);
2227 }
2228 #endif /* CONFIG_BT_CLASSIC */
2229 bt_l2cap_disconnected(conn);
2230 notify_disconnected(conn);
2231
2232 /* Release the reference we took for the very first
2233 * state transition.
2234 */
2235 bt_conn_unref(conn);
2236 return;
2237 }
2238
2239 if (conn->type != BT_CONN_TYPE_LE) {
2240 return;
2241 }
2242
2243 if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
2244 conn->role == BT_CONN_ROLE_CENTRAL) {
2245 /* we don't call bt_conn_disconnect as it would also clear
2246 * auto connect flag if it was set, instead just cancel
2247 * connection directly
2248 */
2249 bt_le_create_conn_cancel();
2250 return;
2251 }
2252
2253 /* if application set own params use those, otherwise use defaults. */
2254 if (atomic_test_and_clear_bit(conn->flags,
2255 BT_CONN_PERIPHERAL_PARAM_SET)) {
2256 int err;
2257
2258 param = BT_LE_CONN_PARAM(conn->le.interval_min,
2259 conn->le.interval_max,
2260 conn->le.pending_latency,
2261 conn->le.pending_timeout);
2262
2263 err = send_conn_le_param_update(conn, param);
2264 if (!err) {
2265 atomic_clear_bit(conn->flags,
2266 BT_CONN_PERIPHERAL_PARAM_AUTO_UPDATE);
2267 } else {
2268 LOG_WRN("Send LE param update failed (err %d)", err);
2269 }
2270 } else if (IS_ENABLED(CONFIG_BT_GAP_AUTO_UPDATE_CONN_PARAMS)) {
2271 #if defined(CONFIG_BT_GAP_PERIPHERAL_PREF_PARAMS)
2272 int err;
2273
2274 param = BT_LE_CONN_PARAM(
2275 CONFIG_BT_PERIPHERAL_PREF_MIN_INT,
2276 CONFIG_BT_PERIPHERAL_PREF_MAX_INT,
2277 CONFIG_BT_PERIPHERAL_PREF_LATENCY,
2278 CONFIG_BT_PERIPHERAL_PREF_TIMEOUT);
2279
2280 err = send_conn_le_param_update(conn, param);
2281 if (!err) {
2282 atomic_set_bit(conn->flags,
2283 BT_CONN_PERIPHERAL_PARAM_AUTO_UPDATE);
2284 } else {
2285 LOG_WRN("Send auto LE param update failed (err %d)",
2286 err);
2287 }
2288 #endif
2289 }
2290
2291 atomic_set_bit(conn->flags, BT_CONN_PERIPHERAL_PARAM_UPDATE);
2292 }
2293
acl_conn_new(void)2294 static struct bt_conn *acl_conn_new(void)
2295 {
2296 return bt_conn_new(acl_conns, ARRAY_SIZE(acl_conns));
2297 }
2298
2299 #if defined(CONFIG_BT_CLASSIC)
bt_sco_cleanup(struct bt_conn * sco_conn)2300 void bt_sco_cleanup(struct bt_conn *sco_conn)
2301 {
2302 bt_sco_cleanup_acl(sco_conn);
2303 bt_conn_unref(sco_conn);
2304 }
2305
sco_conn_new(void)2306 static struct bt_conn *sco_conn_new(void)
2307 {
2308 return bt_conn_new(sco_conns, ARRAY_SIZE(sco_conns));
2309 }
2310
bt_conn_create_br(const bt_addr_t * peer,const struct bt_br_conn_param * param)2311 struct bt_conn *bt_conn_create_br(const bt_addr_t *peer,
2312 const struct bt_br_conn_param *param)
2313 {
2314 struct bt_hci_cp_connect *cp;
2315 struct bt_conn *conn;
2316 struct net_buf *buf;
2317
2318 conn = bt_conn_lookup_addr_br(peer);
2319 if (conn) {
2320 switch (conn->state) {
2321 case BT_CONN_INITIATING:
2322 case BT_CONN_CONNECTED:
2323 return conn;
2324 default:
2325 bt_conn_unref(conn);
2326 return NULL;
2327 }
2328 }
2329
2330 conn = bt_conn_add_br(peer);
2331 if (!conn) {
2332 return NULL;
2333 }
2334
2335 buf = bt_hci_cmd_create(BT_HCI_OP_CONNECT, sizeof(*cp));
2336 if (!buf) {
2337 bt_conn_unref(conn);
2338 return NULL;
2339 }
2340
2341 cp = net_buf_add(buf, sizeof(*cp));
2342
2343 (void)memset(cp, 0, sizeof(*cp));
2344
2345 memcpy(&cp->bdaddr, peer, sizeof(cp->bdaddr));
2346 cp->packet_type = sys_cpu_to_le16(0xcc18); /* DM1 DH1 DM3 DH5 DM5 DH5 */
2347 cp->pscan_rep_mode = 0x02; /* R2 */
2348 cp->allow_role_switch = param->allow_role_switch ? 0x01 : 0x00;
2349 cp->clock_offset = 0x0000; /* TODO used cached clock offset */
2350
2351 if (bt_hci_cmd_send_sync(BT_HCI_OP_CONNECT, buf, NULL) < 0) {
2352 bt_conn_unref(conn);
2353 return NULL;
2354 }
2355
2356 bt_conn_set_state(conn, BT_CONN_INITIATING);
2357 conn->role = BT_CONN_ROLE_CENTRAL;
2358
2359 return conn;
2360 }
2361
bt_conn_lookup_addr_sco(const bt_addr_t * peer)2362 struct bt_conn *bt_conn_lookup_addr_sco(const bt_addr_t *peer)
2363 {
2364 int i;
2365
2366 for (i = 0; i < ARRAY_SIZE(sco_conns); i++) {
2367 struct bt_conn *conn = bt_conn_ref(&sco_conns[i]);
2368
2369 if (!conn) {
2370 continue;
2371 }
2372
2373 if (conn->type != BT_CONN_TYPE_SCO) {
2374 bt_conn_unref(conn);
2375 continue;
2376 }
2377
2378 if (!bt_addr_eq(peer, &conn->sco.acl->br.dst)) {
2379 bt_conn_unref(conn);
2380 continue;
2381 }
2382
2383 return conn;
2384 }
2385
2386 return NULL;
2387 }
2388
bt_conn_lookup_addr_br(const bt_addr_t * peer)2389 struct bt_conn *bt_conn_lookup_addr_br(const bt_addr_t *peer)
2390 {
2391 int i;
2392
2393 for (i = 0; i < ARRAY_SIZE(acl_conns); i++) {
2394 struct bt_conn *conn = bt_conn_ref(&acl_conns[i]);
2395
2396 if (!conn) {
2397 continue;
2398 }
2399
2400 if (conn->type != BT_CONN_TYPE_BR) {
2401 bt_conn_unref(conn);
2402 continue;
2403 }
2404
2405 if (!bt_addr_eq(peer, &conn->br.dst)) {
2406 bt_conn_unref(conn);
2407 continue;
2408 }
2409
2410 return conn;
2411 }
2412
2413 return NULL;
2414 }
2415
bt_conn_add_sco(const bt_addr_t * peer,int link_type)2416 struct bt_conn *bt_conn_add_sco(const bt_addr_t *peer, int link_type)
2417 {
2418 struct bt_conn *sco_conn = sco_conn_new();
2419
2420 if (!sco_conn) {
2421 return NULL;
2422 }
2423
2424 sco_conn->sco.acl = bt_conn_lookup_addr_br(peer);
2425 if (!sco_conn->sco.acl) {
2426 bt_conn_unref(sco_conn);
2427 return NULL;
2428 }
2429
2430 sco_conn->type = BT_CONN_TYPE_SCO;
2431
2432 if (link_type == BT_HCI_SCO) {
2433 if (BT_FEAT_LMP_ESCO_CAPABLE(bt_dev.features)) {
2434 sco_conn->sco.pkt_type = (bt_dev.br.esco_pkt_type &
2435 ESCO_PKT_MASK);
2436 } else {
2437 sco_conn->sco.pkt_type = (bt_dev.br.esco_pkt_type &
2438 SCO_PKT_MASK);
2439 }
2440 } else if (link_type == BT_HCI_ESCO) {
2441 sco_conn->sco.pkt_type = (bt_dev.br.esco_pkt_type &
2442 ~EDR_ESCO_PKT_MASK);
2443 }
2444
2445 return sco_conn;
2446 }
2447
bt_conn_add_br(const bt_addr_t * peer)2448 struct bt_conn *bt_conn_add_br(const bt_addr_t *peer)
2449 {
2450 struct bt_conn *conn = acl_conn_new();
2451
2452 if (!conn) {
2453 return NULL;
2454 }
2455
2456 bt_addr_copy(&conn->br.dst, peer);
2457 conn->type = BT_CONN_TYPE_BR;
2458 conn->tx_data_pull = l2cap_br_data_pull;
2459 conn->get_and_clear_cb = acl_get_and_clear_cb;
2460 conn->has_data = acl_has_data;
2461
2462 return conn;
2463 }
2464
bt_hci_connect_br_cancel(struct bt_conn * conn)2465 static int bt_hci_connect_br_cancel(struct bt_conn *conn)
2466 {
2467 struct bt_hci_cp_connect_cancel *cp;
2468 struct bt_hci_rp_connect_cancel *rp;
2469 struct net_buf *buf, *rsp;
2470 int err;
2471
2472 buf = bt_hci_cmd_create(BT_HCI_OP_CONNECT_CANCEL, sizeof(*cp));
2473 if (!buf) {
2474 return -ENOBUFS;
2475 }
2476
2477 cp = net_buf_add(buf, sizeof(*cp));
2478 memcpy(&cp->bdaddr, &conn->br.dst, sizeof(cp->bdaddr));
2479
2480 err = bt_hci_cmd_send_sync(BT_HCI_OP_CONNECT_CANCEL, buf, &rsp);
2481 if (err) {
2482 return err;
2483 }
2484
2485 rp = (void *)rsp->data;
2486
2487 err = rp->status ? -EIO : 0;
2488
2489 net_buf_unref(rsp);
2490
2491 return err;
2492 }
2493
2494 #endif /* CONFIG_BT_CLASSIC */
2495
2496 #if defined(CONFIG_BT_SMP)
bt_conn_ltk_present(const struct bt_conn * conn)2497 bool bt_conn_ltk_present(const struct bt_conn *conn)
2498 {
2499 const struct bt_keys *keys = conn->le.keys;
2500
2501 if (!keys) {
2502 keys = bt_keys_find_addr(conn->id, &conn->le.dst);
2503 }
2504
2505 if (keys) {
2506 if (conn->role == BT_HCI_ROLE_CENTRAL) {
2507 return keys->keys & (BT_KEYS_LTK_P256 | BT_KEYS_PERIPH_LTK);
2508 } else {
2509 return keys->keys & (BT_KEYS_LTK_P256 | BT_KEYS_LTK);
2510 }
2511 }
2512
2513 return false;
2514 }
2515
bt_conn_identity_resolved(struct bt_conn * conn)2516 void bt_conn_identity_resolved(struct bt_conn *conn)
2517 {
2518 const bt_addr_le_t *rpa;
2519
2520 if (conn->role == BT_HCI_ROLE_CENTRAL) {
2521 rpa = &conn->le.resp_addr;
2522 } else {
2523 rpa = &conn->le.init_addr;
2524 }
2525
2526
2527 struct bt_conn_cb *callback;
2528
2529 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
2530 if (callback->identity_resolved) {
2531 callback->identity_resolved(conn, rpa, &conn->le.dst);
2532 }
2533 }
2534
2535 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
2536 if (cb->identity_resolved) {
2537 cb->identity_resolved(conn, rpa, &conn->le.dst);
2538 }
2539 }
2540 }
2541
bt_conn_le_start_encryption(struct bt_conn * conn,uint8_t rand[8],uint8_t ediv[2],const uint8_t * ltk,size_t len)2542 int bt_conn_le_start_encryption(struct bt_conn *conn, uint8_t rand[8],
2543 uint8_t ediv[2], const uint8_t *ltk, size_t len)
2544 {
2545 struct bt_hci_cp_le_start_encryption *cp;
2546 struct net_buf *buf;
2547
2548 if (len > sizeof(cp->ltk)) {
2549 return -EINVAL;
2550 }
2551
2552 buf = bt_hci_cmd_create(BT_HCI_OP_LE_START_ENCRYPTION, sizeof(*cp));
2553 if (!buf) {
2554 return -ENOBUFS;
2555 }
2556
2557 cp = net_buf_add(buf, sizeof(*cp));
2558 cp->handle = sys_cpu_to_le16(conn->handle);
2559 memcpy(&cp->rand, rand, sizeof(cp->rand));
2560 memcpy(&cp->ediv, ediv, sizeof(cp->ediv));
2561
2562 memcpy(cp->ltk, ltk, len);
2563 if (len < sizeof(cp->ltk)) {
2564 (void)memset(cp->ltk + len, 0, sizeof(cp->ltk) - len);
2565 }
2566
2567 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_START_ENCRYPTION, buf, NULL);
2568 }
2569 #endif /* CONFIG_BT_SMP */
2570
2571 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC)
bt_conn_enc_key_size(const struct bt_conn * conn)2572 uint8_t bt_conn_enc_key_size(const struct bt_conn *conn)
2573 {
2574 if (!bt_conn_is_type(conn, BT_CONN_TYPE_LE | BT_CONN_TYPE_BR)) {
2575 LOG_DBG("Invalid connection type: %u for %p", conn->type, conn);
2576 return 0;
2577 }
2578
2579 if (!conn->encrypt) {
2580 return 0;
2581 }
2582
2583 if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
2584 conn->type == BT_CONN_TYPE_BR) {
2585 struct bt_hci_cp_read_encryption_key_size *cp;
2586 struct bt_hci_rp_read_encryption_key_size *rp;
2587 struct net_buf *buf;
2588 struct net_buf *rsp;
2589 uint8_t key_size;
2590
2591 buf = bt_hci_cmd_create(BT_HCI_OP_READ_ENCRYPTION_KEY_SIZE,
2592 sizeof(*cp));
2593 if (!buf) {
2594 return 0;
2595 }
2596
2597 cp = net_buf_add(buf, sizeof(*cp));
2598 cp->handle = sys_cpu_to_le16(conn->handle);
2599
2600 if (bt_hci_cmd_send_sync(BT_HCI_OP_READ_ENCRYPTION_KEY_SIZE,
2601 buf, &rsp)) {
2602 return 0;
2603 }
2604
2605 rp = (void *)rsp->data;
2606
2607 key_size = rp->status ? 0 : rp->key_size;
2608
2609 net_buf_unref(rsp);
2610
2611 return key_size;
2612 }
2613
2614 if (IS_ENABLED(CONFIG_BT_SMP)) {
2615 return conn->le.keys ? conn->le.keys->enc_size : 0;
2616 }
2617
2618 return 0;
2619 }
2620
reset_pairing(struct bt_conn * conn)2621 static void reset_pairing(struct bt_conn *conn)
2622 {
2623 #if defined(CONFIG_BT_CLASSIC)
2624 if (conn->type == BT_CONN_TYPE_BR) {
2625 atomic_clear_bit(conn->flags, BT_CONN_BR_PAIRING);
2626 atomic_clear_bit(conn->flags, BT_CONN_BR_PAIRED);
2627 atomic_clear_bit(conn->flags, BT_CONN_BR_PAIRING_INITIATOR);
2628 atomic_clear_bit(conn->flags, BT_CONN_BR_LEGACY_SECURE);
2629 atomic_clear_bit(conn->flags, BT_CONN_BR_GENERAL_BONDING);
2630 }
2631 #endif /* CONFIG_BT_CLASSIC */
2632
2633 /* Reset required security level to current operational */
2634 conn->required_sec_level = conn->sec_level;
2635 }
2636
bt_conn_security_changed(struct bt_conn * conn,uint8_t hci_err,enum bt_security_err err)2637 void bt_conn_security_changed(struct bt_conn *conn, uint8_t hci_err,
2638 enum bt_security_err err)
2639 {
2640 reset_pairing(conn);
2641 bt_l2cap_security_changed(conn, hci_err);
2642 if (IS_ENABLED(CONFIG_BT_ISO_CENTRAL)) {
2643 bt_iso_security_changed(conn, hci_err);
2644 }
2645
2646 struct bt_conn_cb *callback;
2647
2648 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
2649 if (callback->security_changed) {
2650 callback->security_changed(conn, conn->sec_level, err);
2651 }
2652 }
2653
2654 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
2655 if (cb->security_changed) {
2656 cb->security_changed(conn, conn->sec_level, err);
2657 }
2658 }
2659
2660 #if defined(CONFIG_BT_KEYS_OVERWRITE_OLDEST)
2661 if (!err && conn->sec_level >= BT_SECURITY_L2) {
2662 if (conn->type == BT_CONN_TYPE_LE) {
2663 bt_keys_update_usage(conn->id, bt_conn_get_dst(conn));
2664 }
2665
2666 #if defined(CONFIG_BT_CLASSIC)
2667 if (conn->type == BT_CONN_TYPE_BR) {
2668 bt_keys_link_key_update_usage(&conn->br.dst);
2669 }
2670 #endif /* CONFIG_BT_CLASSIC */
2671
2672 }
2673 #endif
2674 }
2675
start_security(struct bt_conn * conn)2676 static int start_security(struct bt_conn *conn)
2677 {
2678 if (IS_ENABLED(CONFIG_BT_CLASSIC) && conn->type == BT_CONN_TYPE_BR) {
2679 return bt_ssp_start_security(conn);
2680 }
2681
2682 if (IS_ENABLED(CONFIG_BT_SMP)) {
2683 return bt_smp_start_security(conn);
2684 }
2685
2686 return -EINVAL;
2687 }
2688
bt_conn_set_security(struct bt_conn * conn,bt_security_t sec)2689 int bt_conn_set_security(struct bt_conn *conn, bt_security_t sec)
2690 {
2691 bool force_pair;
2692 int err;
2693
2694 if (!bt_conn_is_type(conn, BT_CONN_TYPE_LE | BT_CONN_TYPE_BR)) {
2695 LOG_DBG("Invalid connection type: %u for %p", conn->type, conn);
2696 return -EINVAL;
2697 }
2698
2699 if (conn->state != BT_CONN_CONNECTED) {
2700 return -ENOTCONN;
2701 }
2702
2703 force_pair = sec & BT_SECURITY_FORCE_PAIR;
2704 sec &= ~BT_SECURITY_FORCE_PAIR;
2705
2706 if (IS_ENABLED(CONFIG_BT_SMP_SC_ONLY)) {
2707 sec = BT_SECURITY_L4;
2708 }
2709
2710 if (IS_ENABLED(CONFIG_BT_SMP_OOB_LEGACY_PAIR_ONLY)) {
2711 sec = BT_SECURITY_L3;
2712 }
2713
2714 /* nothing to do */
2715 if (!force_pair && (conn->sec_level >= sec || conn->required_sec_level >= sec)) {
2716 return 0;
2717 }
2718
2719 atomic_set_bit_to(conn->flags, BT_CONN_FORCE_PAIR, force_pair);
2720 conn->required_sec_level = sec;
2721
2722 err = start_security(conn);
2723
2724 /* reset required security level in case of error */
2725 if (err) {
2726 conn->required_sec_level = conn->sec_level;
2727 }
2728
2729 return err;
2730 }
2731
bt_conn_get_security(const struct bt_conn * conn)2732 bt_security_t bt_conn_get_security(const struct bt_conn *conn)
2733 {
2734 if (!bt_conn_is_type(conn, BT_CONN_TYPE_LE | BT_CONN_TYPE_BR)) {
2735 LOG_DBG("Invalid connection type: %u for %p", conn->type, conn);
2736 return BT_SECURITY_L0;
2737 }
2738
2739 return conn->sec_level;
2740 }
2741 #else
bt_conn_get_security(const struct bt_conn * conn)2742 bt_security_t bt_conn_get_security(const struct bt_conn *conn)
2743 {
2744 return BT_SECURITY_L1;
2745 }
2746 #endif /* CONFIG_BT_SMP */
2747
bt_conn_cb_register(struct bt_conn_cb * cb)2748 int bt_conn_cb_register(struct bt_conn_cb *cb)
2749 {
2750 if (sys_slist_find(&conn_cbs, &cb->_node, NULL)) {
2751 return -EEXIST;
2752 }
2753
2754 sys_slist_append(&conn_cbs, &cb->_node);
2755
2756 return 0;
2757 }
2758
bt_conn_cb_unregister(struct bt_conn_cb * cb)2759 int bt_conn_cb_unregister(struct bt_conn_cb *cb)
2760 {
2761 CHECKIF(cb == NULL) {
2762 return -EINVAL;
2763 }
2764
2765 if (!sys_slist_find_and_remove(&conn_cbs, &cb->_node)) {
2766 return -ENOENT;
2767 }
2768
2769 return 0;
2770 }
2771
bt_conn_exists_le(uint8_t id,const bt_addr_le_t * peer)2772 bool bt_conn_exists_le(uint8_t id, const bt_addr_le_t *peer)
2773 {
2774 struct bt_conn *conn = bt_conn_lookup_addr_le(id, peer);
2775
2776 if (conn) {
2777 /* Connection object already exists.
2778 * If the connection state is not "disconnected",then the
2779 * connection was created but has not yet been disconnected.
2780 * If the connection state is "disconnected" then the connection
2781 * still has valid references. The last reference of the stack
2782 * is released after the disconnected callback.
2783 */
2784 LOG_WRN("Found valid connection (%p) with address %s in %s state ", conn,
2785 bt_addr_le_str(peer), state2str(conn->state));
2786 bt_conn_unref(conn);
2787 return true;
2788 }
2789
2790 return false;
2791 }
2792
bt_conn_add_le(uint8_t id,const bt_addr_le_t * peer)2793 struct bt_conn *bt_conn_add_le(uint8_t id, const bt_addr_le_t *peer)
2794 {
2795 struct bt_conn *conn = acl_conn_new();
2796
2797 if (!conn) {
2798 return NULL;
2799 }
2800
2801 conn->id = id;
2802 bt_addr_le_copy(&conn->le.dst, peer);
2803 #if defined(CONFIG_BT_SMP)
2804 conn->sec_level = BT_SECURITY_L1;
2805 conn->required_sec_level = BT_SECURITY_L1;
2806 #endif /* CONFIG_BT_SMP */
2807 conn->type = BT_CONN_TYPE_LE;
2808 conn->tx_data_pull = l2cap_data_pull;
2809 conn->get_and_clear_cb = acl_get_and_clear_cb;
2810 conn->has_data = acl_has_data;
2811 conn->le.interval_min = BT_GAP_INIT_CONN_INT_MIN;
2812 conn->le.interval_max = BT_GAP_INIT_CONN_INT_MAX;
2813
2814 return conn;
2815 }
2816
bt_conn_is_peer_addr_le(const struct bt_conn * conn,uint8_t id,const bt_addr_le_t * peer)2817 bool bt_conn_is_peer_addr_le(const struct bt_conn *conn, uint8_t id,
2818 const bt_addr_le_t *peer)
2819 {
2820 if (id != conn->id) {
2821 return false;
2822 }
2823
2824 /* Check against conn dst address as it may be the identity address */
2825 if (bt_addr_le_eq(peer, &conn->le.dst)) {
2826 return true;
2827 }
2828
2829 /* Check against initial connection address */
2830 if (conn->role == BT_HCI_ROLE_CENTRAL) {
2831 return bt_addr_le_eq(peer, &conn->le.resp_addr);
2832 }
2833
2834 return bt_addr_le_eq(peer, &conn->le.init_addr);
2835 }
2836
bt_conn_lookup_addr_le(uint8_t id,const bt_addr_le_t * peer)2837 struct bt_conn *bt_conn_lookup_addr_le(uint8_t id, const bt_addr_le_t *peer)
2838 {
2839 int i;
2840
2841 for (i = 0; i < ARRAY_SIZE(acl_conns); i++) {
2842 struct bt_conn *conn = bt_conn_ref(&acl_conns[i]);
2843
2844 if (!conn) {
2845 continue;
2846 }
2847
2848 if (conn->type != BT_CONN_TYPE_LE) {
2849 bt_conn_unref(conn);
2850 continue;
2851 }
2852
2853 if (!bt_conn_is_peer_addr_le(conn, id, peer)) {
2854 bt_conn_unref(conn);
2855 continue;
2856 }
2857
2858 return conn;
2859 }
2860
2861 return NULL;
2862 }
2863
bt_conn_lookup_state_le(uint8_t id,const bt_addr_le_t * peer,const bt_conn_state_t state)2864 struct bt_conn *bt_conn_lookup_state_le(uint8_t id, const bt_addr_le_t *peer,
2865 const bt_conn_state_t state)
2866 {
2867 int i;
2868
2869 for (i = 0; i < ARRAY_SIZE(acl_conns); i++) {
2870 struct bt_conn *conn = bt_conn_ref(&acl_conns[i]);
2871
2872 if (!conn) {
2873 continue;
2874 }
2875
2876 if (conn->type != BT_CONN_TYPE_LE) {
2877 bt_conn_unref(conn);
2878 continue;
2879 }
2880
2881 if (peer && !bt_conn_is_peer_addr_le(conn, id, peer)) {
2882 bt_conn_unref(conn);
2883 continue;
2884 }
2885
2886 if (!(conn->state == state && conn->id == id)) {
2887 bt_conn_unref(conn);
2888 continue;
2889 }
2890
2891 return conn;
2892 }
2893
2894 return NULL;
2895 }
2896
bt_conn_get_dst(const struct bt_conn * conn)2897 const bt_addr_le_t *bt_conn_get_dst(const struct bt_conn *conn)
2898 {
2899 if (!bt_conn_is_type(conn, BT_CONN_TYPE_LE)) {
2900 LOG_DBG("Invalid connection type: %u for %p", conn->type, conn);
2901 return NULL;
2902 }
2903
2904 return &conn->le.dst;
2905 }
2906
conn_internal_to_public_state(bt_conn_state_t state)2907 static enum bt_conn_state conn_internal_to_public_state(bt_conn_state_t state)
2908 {
2909 switch (state) {
2910 case BT_CONN_DISCONNECTED:
2911 case BT_CONN_DISCONNECT_COMPLETE:
2912 return BT_CONN_STATE_DISCONNECTED;
2913 case BT_CONN_SCAN_BEFORE_INITIATING:
2914 case BT_CONN_INITIATING_FILTER_LIST:
2915 case BT_CONN_ADV_CONNECTABLE:
2916 case BT_CONN_ADV_DIR_CONNECTABLE:
2917 case BT_CONN_INITIATING:
2918 return BT_CONN_STATE_CONNECTING;
2919 case BT_CONN_CONNECTED:
2920 return BT_CONN_STATE_CONNECTED;
2921 case BT_CONN_DISCONNECTING:
2922 return BT_CONN_STATE_DISCONNECTING;
2923 default:
2924 __ASSERT(false, "Invalid conn state %u", state);
2925 return 0;
2926 }
2927 }
2928
bt_conn_get_info(const struct bt_conn * conn,struct bt_conn_info * info)2929 int bt_conn_get_info(const struct bt_conn *conn, struct bt_conn_info *info)
2930 {
2931 info->type = conn->type;
2932 info->role = conn->role;
2933 info->id = conn->id;
2934 info->state = conn_internal_to_public_state(conn->state);
2935 info->security.flags = 0;
2936 info->security.level = bt_conn_get_security(conn);
2937 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC)
2938 info->security.enc_key_size = bt_conn_enc_key_size(conn);
2939 #else
2940 info->security.enc_key_size = 0;
2941 #endif /* CONFIG_BT_SMP || CONFIG_BT_CLASSIC */
2942
2943 switch (conn->type) {
2944 case BT_CONN_TYPE_LE:
2945 info->le.dst = &conn->le.dst;
2946 info->le.src = &bt_dev.id_addr[conn->id];
2947 if (conn->role == BT_HCI_ROLE_CENTRAL) {
2948 info->le.local = &conn->le.init_addr;
2949 info->le.remote = &conn->le.resp_addr;
2950 } else {
2951 info->le.local = &conn->le.resp_addr;
2952 info->le.remote = &conn->le.init_addr;
2953 }
2954 info->le.interval = conn->le.interval;
2955 info->le.latency = conn->le.latency;
2956 info->le.timeout = conn->le.timeout;
2957 #if defined(CONFIG_BT_USER_PHY_UPDATE)
2958 info->le.phy = &conn->le.phy;
2959 #endif
2960 #if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
2961 info->le.data_len = &conn->le.data_len;
2962 #endif
2963 #if defined(CONFIG_BT_SUBRATING)
2964 info->le.subrate = &conn->le.subrate;
2965 #endif
2966 if (conn->le.keys && (conn->le.keys->flags & BT_KEYS_SC)) {
2967 info->security.flags |= BT_SECURITY_FLAG_SC;
2968 }
2969 if (conn->le.keys && (conn->le.keys->flags & BT_KEYS_OOB)) {
2970 info->security.flags |= BT_SECURITY_FLAG_OOB;
2971 }
2972 return 0;
2973 #if defined(CONFIG_BT_CLASSIC)
2974 case BT_CONN_TYPE_BR:
2975 info->br.dst = &conn->br.dst;
2976 return 0;
2977 #endif
2978 #if defined(CONFIG_BT_ISO)
2979 case BT_CONN_TYPE_ISO:
2980 if (IS_ENABLED(CONFIG_BT_ISO_UNICAST) &&
2981 conn->iso.info.type == BT_ISO_CHAN_TYPE_CONNECTED && conn->iso.acl != NULL) {
2982 info->le.dst = &conn->iso.acl->le.dst;
2983 info->le.src = &bt_dev.id_addr[conn->iso.acl->id];
2984 } else {
2985 info->le.src = BT_ADDR_LE_NONE;
2986 info->le.dst = BT_ADDR_LE_NONE;
2987 }
2988 return 0;
2989 #endif
2990 default:
2991 break;
2992 }
2993
2994 return -EINVAL;
2995 }
2996
bt_conn_is_type(const struct bt_conn * conn,enum bt_conn_type type)2997 bool bt_conn_is_type(const struct bt_conn *conn, enum bt_conn_type type)
2998 {
2999 if (conn == NULL) {
3000 return false;
3001 }
3002
3003 return (conn->type & type) != 0;
3004 }
3005
bt_conn_get_remote_info(const struct bt_conn * conn,struct bt_conn_remote_info * remote_info)3006 int bt_conn_get_remote_info(const struct bt_conn *conn, struct bt_conn_remote_info *remote_info)
3007 {
3008 if (!bt_conn_is_type(conn, BT_CONN_TYPE_LE | BT_CONN_TYPE_BR)) {
3009 LOG_DBG("Invalid connection type: %u for %p", conn->type, conn);
3010 return -EINVAL;
3011 }
3012
3013 if (!atomic_test_bit(conn->flags, BT_CONN_LE_FEATURES_EXCHANGED) ||
3014 (IS_ENABLED(CONFIG_BT_REMOTE_VERSION) &&
3015 !atomic_test_bit(conn->flags, BT_CONN_AUTO_VERSION_INFO))) {
3016 return -EBUSY;
3017 }
3018
3019 remote_info->type = conn->type;
3020 #if defined(CONFIG_BT_REMOTE_VERSION)
3021 /* The conn->rv values will be just zeroes if the operation failed */
3022 remote_info->version = conn->rv.version;
3023 remote_info->manufacturer = conn->rv.manufacturer;
3024 remote_info->subversion = conn->rv.subversion;
3025 #else
3026 remote_info->version = 0;
3027 remote_info->manufacturer = 0;
3028 remote_info->subversion = 0;
3029 #endif
3030
3031 switch (conn->type) {
3032 case BT_CONN_TYPE_LE:
3033 remote_info->le.features = conn->le.features;
3034 return 0;
3035 #if defined(CONFIG_BT_CLASSIC)
3036 case BT_CONN_TYPE_BR:
3037 /* TODO: Make sure the HCI commands to read br features and
3038 * extended features has finished. */
3039 return -ENOTSUP;
3040 #endif
3041 default:
3042 return -EINVAL;
3043 }
3044 }
3045
3046 /* Read Transmit Power Level HCI command */
bt_conn_get_tx_power_level(struct bt_conn * conn,uint8_t type,int8_t * tx_power_level)3047 static int bt_conn_get_tx_power_level(struct bt_conn *conn, uint8_t type,
3048 int8_t *tx_power_level)
3049 {
3050 int err;
3051 struct bt_hci_rp_read_tx_power_level *rp;
3052 struct net_buf *rsp;
3053 struct bt_hci_cp_read_tx_power_level *cp;
3054 struct net_buf *buf;
3055
3056 buf = bt_hci_cmd_create(BT_HCI_OP_READ_TX_POWER_LEVEL, sizeof(*cp));
3057 if (!buf) {
3058 return -ENOBUFS;
3059 }
3060
3061 cp = net_buf_add(buf, sizeof(*cp));
3062 cp->type = type;
3063 cp->handle = sys_cpu_to_le16(conn->handle);
3064
3065 err = bt_hci_cmd_send_sync(BT_HCI_OP_READ_TX_POWER_LEVEL, buf, &rsp);
3066 if (err) {
3067 return err;
3068 }
3069
3070 rp = (void *) rsp->data;
3071 *tx_power_level = rp->tx_power_level;
3072 net_buf_unref(rsp);
3073
3074 return 0;
3075 }
3076
3077 #if defined(CONFIG_BT_TRANSMIT_POWER_CONTROL)
notify_tx_power_report(struct bt_conn * conn,struct bt_conn_le_tx_power_report report)3078 void notify_tx_power_report(struct bt_conn *conn,
3079 struct bt_conn_le_tx_power_report report)
3080 {
3081 struct bt_conn_cb *callback;
3082
3083 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3084 if (callback->tx_power_report) {
3085 callback->tx_power_report(conn, &report);
3086 }
3087 }
3088
3089 STRUCT_SECTION_FOREACH(bt_conn_cb, cb)
3090 {
3091 if (cb->tx_power_report) {
3092 cb->tx_power_report(conn, &report);
3093 }
3094 }
3095 }
3096
bt_conn_le_enhanced_get_tx_power_level(struct bt_conn * conn,struct bt_conn_le_tx_power * tx_power)3097 int bt_conn_le_enhanced_get_tx_power_level(struct bt_conn *conn,
3098 struct bt_conn_le_tx_power *tx_power)
3099 {
3100 int err;
3101 struct bt_hci_rp_le_read_tx_power_level *rp;
3102 struct net_buf *rsp;
3103 struct bt_hci_cp_le_read_tx_power_level *cp;
3104 struct net_buf *buf;
3105
3106 if (!bt_conn_is_type(conn, BT_CONN_TYPE_LE)) {
3107 LOG_DBG("Invalid connection type: %u for %p", conn->type, conn);
3108 return -EINVAL;
3109 }
3110
3111 if (!tx_power->phy) {
3112 return -EINVAL;
3113 }
3114
3115 buf = bt_hci_cmd_create(BT_HCI_OP_LE_ENH_READ_TX_POWER_LEVEL, sizeof(*cp));
3116 if (!buf) {
3117 return -ENOBUFS;
3118 }
3119
3120 cp = net_buf_add(buf, sizeof(*cp));
3121 cp->handle = sys_cpu_to_le16(conn->handle);
3122 cp->phy = tx_power->phy;
3123
3124 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_ENH_READ_TX_POWER_LEVEL, buf, &rsp);
3125 if (err) {
3126 return err;
3127 }
3128
3129 rp = (void *) rsp->data;
3130 tx_power->phy = rp->phy;
3131 tx_power->current_level = rp->current_tx_power_level;
3132 tx_power->max_level = rp->max_tx_power_level;
3133 net_buf_unref(rsp);
3134
3135 return 0;
3136 }
3137
bt_conn_le_get_remote_tx_power_level(struct bt_conn * conn,enum bt_conn_le_tx_power_phy phy)3138 int bt_conn_le_get_remote_tx_power_level(struct bt_conn *conn,
3139 enum bt_conn_le_tx_power_phy phy)
3140 {
3141 struct bt_hci_cp_le_read_tx_power_level *cp;
3142 struct net_buf *buf;
3143
3144 if (!bt_conn_is_type(conn, BT_CONN_TYPE_LE)) {
3145 LOG_DBG("Invalid connection type: %u for %p", conn->type, conn);
3146 return -EINVAL;
3147 }
3148
3149 if (!phy) {
3150 return -EINVAL;
3151 }
3152
3153 buf = bt_hci_cmd_create(BT_HCI_OP_LE_READ_REMOTE_TX_POWER_LEVEL, sizeof(*cp));
3154 if (!buf) {
3155 return -ENOBUFS;
3156 }
3157
3158 cp = net_buf_add(buf, sizeof(*cp));
3159 cp->handle = sys_cpu_to_le16(conn->handle);
3160 cp->phy = phy;
3161
3162 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_REMOTE_TX_POWER_LEVEL, buf, NULL);
3163 }
3164
bt_conn_le_set_tx_power_report_enable(struct bt_conn * conn,bool local_enable,bool remote_enable)3165 int bt_conn_le_set_tx_power_report_enable(struct bt_conn *conn,
3166 bool local_enable,
3167 bool remote_enable)
3168 {
3169 struct bt_hci_cp_le_set_tx_power_report_enable *cp;
3170 struct net_buf *buf;
3171
3172 if (!bt_conn_is_type(conn, BT_CONN_TYPE_LE)) {
3173 LOG_DBG("Invalid connection type: %u for %p", conn->type, conn);
3174 return -EINVAL;
3175 }
3176
3177 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_TX_POWER_REPORT_ENABLE, sizeof(*cp));
3178 if (!buf) {
3179 return -ENOBUFS;
3180 }
3181
3182 cp = net_buf_add(buf, sizeof(*cp));
3183 cp->handle = sys_cpu_to_le16(conn->handle);
3184 cp->local_enable = local_enable ? BT_HCI_LE_TX_POWER_REPORT_ENABLE :
3185 BT_HCI_LE_TX_POWER_REPORT_DISABLE;
3186 cp->remote_enable = remote_enable ? BT_HCI_LE_TX_POWER_REPORT_ENABLE :
3187 BT_HCI_LE_TX_POWER_REPORT_DISABLE;
3188
3189 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_TX_POWER_REPORT_ENABLE, buf, NULL);
3190 }
3191 #endif /* CONFIG_BT_TRANSMIT_POWER_CONTROL */
3192
bt_conn_le_get_tx_power_level(struct bt_conn * conn,struct bt_conn_le_tx_power * tx_power_level)3193 int bt_conn_le_get_tx_power_level(struct bt_conn *conn,
3194 struct bt_conn_le_tx_power *tx_power_level)
3195 {
3196 int err;
3197
3198 if (!bt_conn_is_type(conn, BT_CONN_TYPE_LE)) {
3199 LOG_DBG("Invalid connection type: %u for %p", conn->type, conn);
3200 return -EINVAL;
3201 }
3202
3203 if (tx_power_level->phy != 0) {
3204 if (IS_ENABLED(CONFIG_BT_TRANSMIT_POWER_CONTROL)) {
3205 return bt_conn_le_enhanced_get_tx_power_level(conn, tx_power_level);
3206 } else {
3207 return -ENOTSUP;
3208 }
3209 }
3210
3211 err = bt_conn_get_tx_power_level(conn, BT_TX_POWER_LEVEL_CURRENT,
3212 &tx_power_level->current_level);
3213 if (err) {
3214 return err;
3215 }
3216
3217 err = bt_conn_get_tx_power_level(conn, BT_TX_POWER_LEVEL_MAX,
3218 &tx_power_level->max_level);
3219 return err;
3220 }
3221
3222 #if defined(CONFIG_BT_PATH_LOSS_MONITORING)
notify_path_loss_threshold_report(struct bt_conn * conn,struct bt_conn_le_path_loss_threshold_report report)3223 void notify_path_loss_threshold_report(struct bt_conn *conn,
3224 struct bt_conn_le_path_loss_threshold_report report)
3225 {
3226 struct bt_conn_cb *callback;
3227
3228 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3229 if (callback->path_loss_threshold_report) {
3230 callback->path_loss_threshold_report(conn, &report);
3231 }
3232 }
3233
3234 STRUCT_SECTION_FOREACH(bt_conn_cb, cb)
3235 {
3236 if (cb->path_loss_threshold_report) {
3237 cb->path_loss_threshold_report(conn, &report);
3238 }
3239 }
3240 }
3241
bt_conn_le_set_path_loss_mon_param(struct bt_conn * conn,const struct bt_conn_le_path_loss_reporting_param * params)3242 int bt_conn_le_set_path_loss_mon_param(struct bt_conn *conn,
3243 const struct bt_conn_le_path_loss_reporting_param *params)
3244 {
3245 struct bt_hci_cp_le_set_path_loss_reporting_parameters *cp;
3246 struct net_buf *buf;
3247
3248 if (!bt_conn_is_type(conn, BT_CONN_TYPE_LE)) {
3249 LOG_DBG("Invalid connection type: %u for %p", conn->type, conn);
3250 return -EINVAL;
3251 }
3252
3253 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_PATH_LOSS_REPORTING_PARAMETERS, sizeof(*cp));
3254 if (!buf) {
3255 return -ENOBUFS;
3256 }
3257
3258 cp = net_buf_add(buf, sizeof(*cp));
3259 cp->handle = sys_cpu_to_le16(conn->handle);
3260 cp->high_threshold = params->high_threshold;
3261 cp->high_hysteresis = params->high_hysteresis;
3262 cp->low_threshold = params->low_threshold;
3263 cp->low_hysteresis = params->low_hysteresis;
3264 cp->min_time_spent = sys_cpu_to_le16(params->min_time_spent);
3265
3266 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_PATH_LOSS_REPORTING_PARAMETERS, buf, NULL);
3267 }
3268
bt_conn_le_set_path_loss_mon_enable(struct bt_conn * conn,bool reporting_enable)3269 int bt_conn_le_set_path_loss_mon_enable(struct bt_conn *conn, bool reporting_enable)
3270 {
3271 struct bt_hci_cp_le_set_path_loss_reporting_enable *cp;
3272 struct net_buf *buf;
3273
3274 if (!bt_conn_is_type(conn, BT_CONN_TYPE_LE)) {
3275 LOG_DBG("Invalid connection type: %u for %p", conn->type, conn);
3276 return -EINVAL;
3277 }
3278
3279 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_PATH_LOSS_REPORTING_ENABLE, sizeof(*cp));
3280 if (!buf) {
3281 return -ENOBUFS;
3282 }
3283
3284 cp = net_buf_add(buf, sizeof(*cp));
3285 cp->handle = sys_cpu_to_le16(conn->handle);
3286 cp->enable = reporting_enable ? BT_HCI_LE_PATH_LOSS_REPORTING_ENABLE :
3287 BT_HCI_LE_PATH_LOSS_REPORTING_DISABLE;
3288
3289 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_PATH_LOSS_REPORTING_ENABLE, buf, NULL);
3290 }
3291 #endif /* CONFIG_BT_PATH_LOSS_MONITORING */
3292
3293 #if defined(CONFIG_BT_SUBRATING)
notify_subrate_change(struct bt_conn * conn,const struct bt_conn_le_subrate_changed params)3294 void notify_subrate_change(struct bt_conn *conn,
3295 const struct bt_conn_le_subrate_changed params)
3296 {
3297 struct bt_conn_cb *callback;
3298
3299 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3300 if (callback->subrate_changed) {
3301 callback->subrate_changed(conn, ¶ms);
3302 }
3303 }
3304
3305 STRUCT_SECTION_FOREACH(bt_conn_cb, cb)
3306 {
3307 if (cb->subrate_changed) {
3308 cb->subrate_changed(conn, ¶ms);
3309 }
3310 }
3311 }
3312
le_subrate_common_params_valid(const struct bt_conn_le_subrate_param * param)3313 static bool le_subrate_common_params_valid(const struct bt_conn_le_subrate_param *param)
3314 {
3315 /* All limits according to BT Core spec 5.4 [Vol 4, Part E, 7.8.123] */
3316
3317 if (param->subrate_min < 0x0001 || param->subrate_min > 0x01F4 ||
3318 param->subrate_max < 0x0001 || param->subrate_max > 0x01F4 ||
3319 param->subrate_min > param->subrate_max) {
3320 return false;
3321 }
3322
3323 if (param->max_latency > 0x01F3 ||
3324 param->subrate_max * (param->max_latency + 1) > 500) {
3325 return false;
3326 }
3327
3328 if (param->continuation_number > 0x01F3 ||
3329 param->continuation_number >= param->subrate_max) {
3330 return false;
3331 }
3332
3333 if (param->supervision_timeout < 0x000A ||
3334 param->supervision_timeout > 0xC80) {
3335 return false;
3336 }
3337
3338 return true;
3339 }
3340
bt_conn_le_subrate_set_defaults(const struct bt_conn_le_subrate_param * params)3341 int bt_conn_le_subrate_set_defaults(const struct bt_conn_le_subrate_param *params)
3342 {
3343 struct bt_hci_cp_le_set_default_subrate *cp;
3344 struct net_buf *buf;
3345
3346 if (!IS_ENABLED(CONFIG_BT_CENTRAL)) {
3347 return -ENOTSUP;
3348 }
3349
3350 if (!le_subrate_common_params_valid(params)) {
3351 return -EINVAL;
3352 }
3353
3354 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_DEFAULT_SUBRATE, sizeof(*cp));
3355 if (!buf) {
3356 return -ENOBUFS;
3357 }
3358
3359 cp = net_buf_add(buf, sizeof(*cp));
3360 cp->subrate_min = sys_cpu_to_le16(params->subrate_min);
3361 cp->subrate_max = sys_cpu_to_le16(params->subrate_max);
3362 cp->max_latency = sys_cpu_to_le16(params->max_latency);
3363 cp->continuation_number = sys_cpu_to_le16(params->continuation_number);
3364 cp->supervision_timeout = sys_cpu_to_le16(params->supervision_timeout);
3365
3366 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_DEFAULT_SUBRATE, buf, NULL);
3367 }
3368
bt_conn_le_subrate_request(struct bt_conn * conn,const struct bt_conn_le_subrate_param * params)3369 int bt_conn_le_subrate_request(struct bt_conn *conn,
3370 const struct bt_conn_le_subrate_param *params)
3371 {
3372 struct bt_hci_cp_le_subrate_request *cp;
3373 struct net_buf *buf;
3374
3375 if (!bt_conn_is_type(conn, BT_CONN_TYPE_LE)) {
3376 LOG_DBG("Invalid connection type: %u for %p", conn->type, conn);
3377 return -EINVAL;
3378 }
3379
3380 if (!le_subrate_common_params_valid(params)) {
3381 return -EINVAL;
3382 }
3383
3384 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SUBRATE_REQUEST, sizeof(*cp));
3385 if (!buf) {
3386 return -ENOBUFS;
3387 }
3388
3389 cp = net_buf_add(buf, sizeof(*cp));
3390 cp->handle = sys_cpu_to_le16(conn->handle);
3391 cp->subrate_min = sys_cpu_to_le16(params->subrate_min);
3392 cp->subrate_max = sys_cpu_to_le16(params->subrate_max);
3393 cp->max_latency = sys_cpu_to_le16(params->max_latency);
3394 cp->continuation_number = sys_cpu_to_le16(params->continuation_number);
3395 cp->supervision_timeout = sys_cpu_to_le16(params->supervision_timeout);
3396
3397 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SUBRATE_REQUEST, buf, NULL);
3398 }
3399 #endif /* CONFIG_BT_SUBRATING */
3400
3401 #if defined(CONFIG_BT_CHANNEL_SOUNDING)
notify_remote_cs_capabilities(struct bt_conn * conn,struct bt_conn_le_cs_capabilities params)3402 void notify_remote_cs_capabilities(struct bt_conn *conn, struct bt_conn_le_cs_capabilities params)
3403 {
3404 struct bt_conn_cb *callback;
3405
3406 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3407 if (callback->le_cs_remote_capabilities_available) {
3408 callback->le_cs_remote_capabilities_available(conn, ¶ms);
3409 }
3410 }
3411
3412 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
3413 if (cb->le_cs_remote_capabilities_available) {
3414 cb->le_cs_remote_capabilities_available(conn, ¶ms);
3415 }
3416 }
3417 }
3418
notify_remote_cs_fae_table(struct bt_conn * conn,struct bt_conn_le_cs_fae_table params)3419 void notify_remote_cs_fae_table(struct bt_conn *conn, struct bt_conn_le_cs_fae_table params)
3420 {
3421 struct bt_conn_cb *callback;
3422
3423 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3424 if (callback->le_cs_remote_fae_table_available) {
3425 callback->le_cs_remote_fae_table_available(conn, ¶ms);
3426 }
3427 }
3428
3429 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
3430 if (cb->le_cs_remote_fae_table_available) {
3431 cb->le_cs_remote_fae_table_available(conn, ¶ms);
3432 }
3433 }
3434 }
3435
notify_cs_config_created(struct bt_conn * conn,struct bt_conn_le_cs_config * params)3436 void notify_cs_config_created(struct bt_conn *conn, struct bt_conn_le_cs_config *params)
3437 {
3438 struct bt_conn_cb *callback;
3439
3440 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3441 if (callback->le_cs_config_created) {
3442 callback->le_cs_config_created(conn, params);
3443 }
3444 }
3445
3446 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
3447 if (cb->le_cs_config_created) {
3448 cb->le_cs_config_created(conn, params);
3449 }
3450 }
3451 }
3452
notify_cs_config_removed(struct bt_conn * conn,uint8_t config_id)3453 void notify_cs_config_removed(struct bt_conn *conn, uint8_t config_id)
3454 {
3455 struct bt_conn_cb *callback;
3456
3457 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3458 if (callback->le_cs_config_removed) {
3459 callback->le_cs_config_removed(conn, config_id);
3460 }
3461 }
3462
3463 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
3464 if (cb->le_cs_config_removed) {
3465 cb->le_cs_config_removed(conn, config_id);
3466 }
3467 }
3468 }
3469
notify_cs_security_enable_available(struct bt_conn * conn)3470 void notify_cs_security_enable_available(struct bt_conn *conn)
3471 {
3472 struct bt_conn_cb *callback;
3473
3474 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3475 if (callback->le_cs_security_enabled) {
3476 callback->le_cs_security_enabled(conn);
3477 }
3478 }
3479
3480 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
3481 if (cb->le_cs_security_enabled) {
3482 cb->le_cs_security_enabled(conn);
3483 }
3484 }
3485 }
3486
notify_cs_procedure_enable_available(struct bt_conn * conn,struct bt_conn_le_cs_procedure_enable_complete * params)3487 void notify_cs_procedure_enable_available(struct bt_conn *conn,
3488 struct bt_conn_le_cs_procedure_enable_complete *params)
3489 {
3490 struct bt_conn_cb *callback;
3491
3492 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3493 if (callback->le_cs_procedure_enabled) {
3494 callback->le_cs_procedure_enabled(conn, params);
3495 }
3496 }
3497
3498 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
3499 if (cb->le_cs_procedure_enabled) {
3500 cb->le_cs_procedure_enabled(conn, params);
3501 }
3502 }
3503 }
3504
notify_cs_subevent_result(struct bt_conn * conn,struct bt_conn_le_cs_subevent_result * result)3505 void notify_cs_subevent_result(struct bt_conn *conn, struct bt_conn_le_cs_subevent_result *result)
3506 {
3507 struct bt_conn_cb *callback;
3508
3509 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3510 if (callback->le_cs_subevent_data_available) {
3511 callback->le_cs_subevent_data_available(conn, result);
3512 }
3513 }
3514
3515 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
3516 if (cb->le_cs_subevent_data_available) {
3517 cb->le_cs_subevent_data_available(conn, result);
3518 }
3519 }
3520 }
3521 #endif /* CONFIG_BT_CHANNEL_SOUNDING */
3522
bt_conn_le_param_update(struct bt_conn * conn,const struct bt_le_conn_param * param)3523 int bt_conn_le_param_update(struct bt_conn *conn,
3524 const struct bt_le_conn_param *param)
3525 {
3526 if (!bt_conn_is_type(conn, BT_CONN_TYPE_LE)) {
3527 LOG_DBG("Invalid connection type: %u for %p", conn->type, conn);
3528 return -EINVAL;
3529 }
3530
3531 LOG_DBG("conn %p features 0x%02x params (%d-%d %d %d)", conn, conn->le.features[0],
3532 param->interval_min, param->interval_max, param->latency, param->timeout);
3533
3534 if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
3535 conn->role == BT_CONN_ROLE_CENTRAL) {
3536 return send_conn_le_param_update(conn, param);
3537 }
3538
3539 if (IS_ENABLED(CONFIG_BT_PERIPHERAL)) {
3540 /* if peripheral conn param update timer expired just send request */
3541 if (atomic_test_bit(conn->flags, BT_CONN_PERIPHERAL_PARAM_UPDATE)) {
3542 return send_conn_le_param_update(conn, param);
3543 }
3544
3545 /* store new conn params to be used by update timer */
3546 conn->le.interval_min = param->interval_min;
3547 conn->le.interval_max = param->interval_max;
3548 conn->le.pending_latency = param->latency;
3549 conn->le.pending_timeout = param->timeout;
3550 atomic_set_bit(conn->flags, BT_CONN_PERIPHERAL_PARAM_SET);
3551 }
3552
3553 return 0;
3554 }
3555
3556 #if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
bt_conn_le_data_len_update(struct bt_conn * conn,const struct bt_conn_le_data_len_param * param)3557 int bt_conn_le_data_len_update(struct bt_conn *conn,
3558 const struct bt_conn_le_data_len_param *param)
3559 {
3560 if (!bt_conn_is_type(conn, BT_CONN_TYPE_LE)) {
3561 LOG_DBG("Invalid connection type: %u for %p", conn->type, conn);
3562 return -EINVAL;
3563 }
3564
3565 if (conn->le.data_len.tx_max_len == param->tx_max_len &&
3566 conn->le.data_len.tx_max_time == param->tx_max_time) {
3567 return -EALREADY;
3568 }
3569
3570 return bt_le_set_data_len(conn, param->tx_max_len, param->tx_max_time);
3571 }
3572 #endif /* CONFIG_BT_USER_DATA_LEN_UPDATE */
3573
3574 #if defined(CONFIG_BT_USER_PHY_UPDATE)
bt_conn_le_phy_update(struct bt_conn * conn,const struct bt_conn_le_phy_param * param)3575 int bt_conn_le_phy_update(struct bt_conn *conn,
3576 const struct bt_conn_le_phy_param *param)
3577 {
3578 uint8_t phy_opts, all_phys;
3579
3580 if (!bt_conn_is_type(conn, BT_CONN_TYPE_LE)) {
3581 LOG_DBG("Invalid connection type: %u for %p", conn->type, conn);
3582 return -EINVAL;
3583 }
3584
3585 if ((param->options & BT_CONN_LE_PHY_OPT_CODED_S2) &&
3586 (param->options & BT_CONN_LE_PHY_OPT_CODED_S8)) {
3587 phy_opts = BT_HCI_LE_PHY_CODED_ANY;
3588 } else if (param->options & BT_CONN_LE_PHY_OPT_CODED_S2) {
3589 phy_opts = BT_HCI_LE_PHY_CODED_S2;
3590 } else if (param->options & BT_CONN_LE_PHY_OPT_CODED_S8) {
3591 phy_opts = BT_HCI_LE_PHY_CODED_S8;
3592 } else {
3593 phy_opts = BT_HCI_LE_PHY_CODED_ANY;
3594 }
3595
3596 all_phys = 0U;
3597 if (param->pref_tx_phy == BT_GAP_LE_PHY_NONE) {
3598 all_phys |= BT_HCI_LE_PHY_TX_ANY;
3599 }
3600
3601 if (param->pref_rx_phy == BT_GAP_LE_PHY_NONE) {
3602 all_phys |= BT_HCI_LE_PHY_RX_ANY;
3603 }
3604
3605 return bt_le_set_phy(conn, all_phys, param->pref_tx_phy,
3606 param->pref_rx_phy, phy_opts);
3607 }
3608 #endif
3609
3610 #if defined(CONFIG_BT_CENTRAL)
bt_conn_set_param_le(struct bt_conn * conn,const struct bt_le_conn_param * param)3611 static void bt_conn_set_param_le(struct bt_conn *conn,
3612 const struct bt_le_conn_param *param)
3613 {
3614 conn->le.interval_min = param->interval_min;
3615 conn->le.interval_max = param->interval_max;
3616 conn->le.latency = param->latency;
3617 conn->le.timeout = param->timeout;
3618 }
3619
create_param_setup(const struct bt_conn_le_create_param * param)3620 static void create_param_setup(const struct bt_conn_le_create_param *param)
3621 {
3622 bt_dev.create_param = *param;
3623
3624 bt_dev.create_param.timeout =
3625 (bt_dev.create_param.timeout != 0) ?
3626 bt_dev.create_param.timeout :
3627 (MSEC_PER_SEC / 10) * CONFIG_BT_CREATE_CONN_TIMEOUT;
3628
3629 bt_dev.create_param.interval_coded =
3630 (bt_dev.create_param.interval_coded != 0) ?
3631 bt_dev.create_param.interval_coded :
3632 bt_dev.create_param.interval;
3633
3634 bt_dev.create_param.window_coded =
3635 (bt_dev.create_param.window_coded != 0) ?
3636 bt_dev.create_param.window_coded :
3637 bt_dev.create_param.window;
3638 }
3639
3640 #if defined(CONFIG_BT_FILTER_ACCEPT_LIST)
bt_conn_le_create_auto(const struct bt_conn_le_create_param * create_param,const struct bt_le_conn_param * param)3641 int bt_conn_le_create_auto(const struct bt_conn_le_create_param *create_param,
3642 const struct bt_le_conn_param *param)
3643 {
3644 struct bt_conn *conn;
3645 int err;
3646
3647 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
3648 return -EAGAIN;
3649 }
3650
3651 if (!bt_le_conn_params_valid(param)) {
3652 return -EINVAL;
3653 }
3654
3655 conn = bt_conn_lookup_state_le(BT_ID_DEFAULT, BT_ADDR_LE_NONE,
3656 BT_CONN_INITIATING_FILTER_LIST);
3657 if (conn) {
3658 bt_conn_unref(conn);
3659 return -EALREADY;
3660 }
3661
3662 /* Scanning either to connect or explicit scan, either case scanner was
3663 * started by application and should not be stopped.
3664 */
3665 if (!BT_LE_STATES_SCAN_INIT(bt_dev.le.states) &&
3666 atomic_test_bit(bt_dev.flags, BT_DEV_SCANNING)) {
3667 return -EINVAL;
3668 }
3669
3670 if (atomic_test_bit(bt_dev.flags, BT_DEV_INITIATING)) {
3671 return -EINVAL;
3672 }
3673
3674 if (!bt_id_scan_random_addr_check()) {
3675 return -EINVAL;
3676 }
3677
3678 conn = bt_conn_add_le(BT_ID_DEFAULT, BT_ADDR_LE_NONE);
3679 if (!conn) {
3680 return -ENOMEM;
3681 }
3682
3683 bt_conn_set_param_le(conn, param);
3684 create_param_setup(create_param);
3685
3686 atomic_set_bit(conn->flags, BT_CONN_AUTO_CONNECT);
3687 bt_conn_set_state(conn, BT_CONN_INITIATING_FILTER_LIST);
3688
3689 err = bt_le_create_conn(conn);
3690 if (err) {
3691 LOG_ERR("Failed to start filtered scan");
3692 conn->err = 0;
3693 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
3694 bt_conn_unref(conn);
3695 return err;
3696 }
3697
3698 /* Since we don't give the application a reference to manage in
3699 * this case, we need to release this reference here.
3700 */
3701 bt_conn_unref(conn);
3702 return 0;
3703 }
3704
bt_conn_create_auto_stop(void)3705 int bt_conn_create_auto_stop(void)
3706 {
3707 struct bt_conn *conn;
3708 int err;
3709
3710 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
3711 return -EINVAL;
3712 }
3713
3714 conn = bt_conn_lookup_state_le(BT_ID_DEFAULT, BT_ADDR_LE_NONE,
3715 BT_CONN_INITIATING_FILTER_LIST);
3716 if (!conn) {
3717 return -EINVAL;
3718 }
3719
3720 if (!atomic_test_bit(bt_dev.flags, BT_DEV_INITIATING)) {
3721 return -EINVAL;
3722 }
3723
3724 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
3725 bt_conn_unref(conn);
3726
3727 err = bt_le_create_conn_cancel();
3728 if (err) {
3729 LOG_ERR("Failed to stop initiator");
3730 return err;
3731 }
3732
3733 return 0;
3734 }
3735 #endif /* defined(CONFIG_BT_FILTER_ACCEPT_LIST) */
3736
conn_le_create_common_checks(const bt_addr_le_t * peer,const struct bt_le_conn_param * conn_param)3737 static int conn_le_create_common_checks(const bt_addr_le_t *peer,
3738 const struct bt_le_conn_param *conn_param)
3739 {
3740
3741 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
3742 LOG_DBG("Conn check failed: BT dev not ready.");
3743 return -EAGAIN;
3744 }
3745
3746 if (!bt_le_conn_params_valid(conn_param)) {
3747 LOG_DBG("Conn check failed: invalid parameters.");
3748 return -EINVAL;
3749 }
3750
3751 if (!BT_LE_STATES_SCAN_INIT(bt_dev.le.states) && bt_le_explicit_scanner_running()) {
3752 LOG_DBG("Conn check failed: scanner was explicitly requested.");
3753 return -EAGAIN;
3754 }
3755
3756 if (atomic_test_bit(bt_dev.flags, BT_DEV_INITIATING)) {
3757 LOG_DBG("Conn check failed: device is already initiating.");
3758 return -EALREADY;
3759 }
3760
3761 if (!bt_id_scan_random_addr_check()) {
3762 LOG_DBG("Conn check failed: invalid random address.");
3763 return -EINVAL;
3764 }
3765
3766 if (bt_conn_exists_le(BT_ID_DEFAULT, peer)) {
3767 LOG_DBG("Conn check failed: ACL connection already exists.");
3768 return -EINVAL;
3769 }
3770
3771 return 0;
3772 }
3773
conn_le_create_helper(const bt_addr_le_t * peer,const struct bt_le_conn_param * conn_param)3774 static struct bt_conn *conn_le_create_helper(const bt_addr_le_t *peer,
3775 const struct bt_le_conn_param *conn_param)
3776 {
3777 bt_addr_le_t dst;
3778 struct bt_conn *conn;
3779
3780 if (bt_addr_le_is_resolved(peer)) {
3781 bt_addr_le_copy_resolved(&dst, peer);
3782 } else {
3783 bt_addr_le_copy(&dst, bt_lookup_id_addr(BT_ID_DEFAULT, peer));
3784 }
3785
3786 /* Only default identity supported for now */
3787 conn = bt_conn_add_le(BT_ID_DEFAULT, &dst);
3788 if (!conn) {
3789 return NULL;
3790 }
3791
3792 bt_conn_set_param_le(conn, conn_param);
3793
3794 return conn;
3795 }
3796
bt_conn_le_create(const bt_addr_le_t * peer,const struct bt_conn_le_create_param * create_param,const struct bt_le_conn_param * conn_param,struct bt_conn ** ret_conn)3797 int bt_conn_le_create(const bt_addr_le_t *peer, const struct bt_conn_le_create_param *create_param,
3798 const struct bt_le_conn_param *conn_param, struct bt_conn **ret_conn)
3799 {
3800 struct bt_conn *conn;
3801 int err;
3802
3803 CHECKIF(ret_conn == NULL) {
3804 return -EINVAL;
3805 }
3806
3807 CHECKIF(*ret_conn != NULL) {
3808 /* This rule helps application developers prevent leaks of connection references. If
3809 * a bt_conn variable is not null, it presumably holds a reference and must not be
3810 * overwritten. To avoid this warning, initialize the variables to null, and set
3811 * them to null when moving the reference.
3812 */
3813 LOG_WRN("*conn should be unreferenced and initialized to NULL");
3814
3815 if (IS_ENABLED(CONFIG_BT_CONN_CHECK_NULL_BEFORE_CREATE)) {
3816 return -EINVAL;
3817 }
3818 }
3819
3820 err = conn_le_create_common_checks(peer, conn_param);
3821 if (err) {
3822 return err;
3823 }
3824
3825 conn = conn_le_create_helper(peer, conn_param);
3826 if (!conn) {
3827 return -ENOMEM;
3828 }
3829
3830 if (BT_LE_STATES_SCAN_INIT(bt_dev.le.states) &&
3831 bt_le_explicit_scanner_running() &&
3832 !bt_le_explicit_scanner_uses_same_params(create_param)) {
3833 LOG_WRN("Use same scan and connection create params to obtain best performance");
3834 }
3835
3836 create_param_setup(create_param);
3837
3838 #if defined(CONFIG_BT_SMP)
3839 if (bt_dev.le.rl_entries > bt_dev.le.rl_size) {
3840 /* Use host-based identity resolving. */
3841 bt_conn_set_state(conn, BT_CONN_SCAN_BEFORE_INITIATING);
3842
3843 err = bt_le_scan_user_add(BT_LE_SCAN_USER_CONN);
3844 if (err) {
3845 bt_le_scan_user_remove(BT_LE_SCAN_USER_CONN);
3846 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
3847 bt_conn_unref(conn);
3848
3849 return err;
3850 }
3851
3852 *ret_conn = conn;
3853 return 0;
3854 }
3855 #endif
3856
3857 bt_conn_set_state(conn, BT_CONN_INITIATING);
3858
3859 err = bt_le_create_conn(conn);
3860 if (err) {
3861 conn->err = 0;
3862 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
3863 bt_conn_unref(conn);
3864
3865 /* Best-effort attempt to inform the scanner that the initiator stopped. */
3866 int scan_check_err = bt_le_scan_user_add(BT_LE_SCAN_USER_NONE);
3867
3868 if (scan_check_err) {
3869 LOG_WRN("Error while updating the scanner (%d)", scan_check_err);
3870 }
3871 return err;
3872 }
3873
3874 *ret_conn = conn;
3875 return 0;
3876 }
3877
bt_conn_le_create_synced(const struct bt_le_ext_adv * adv,const struct bt_conn_le_create_synced_param * synced_param,const struct bt_le_conn_param * conn_param,struct bt_conn ** ret_conn)3878 int bt_conn_le_create_synced(const struct bt_le_ext_adv *adv,
3879 const struct bt_conn_le_create_synced_param *synced_param,
3880 const struct bt_le_conn_param *conn_param, struct bt_conn **ret_conn)
3881 {
3882 struct bt_conn *conn;
3883 int err;
3884
3885 CHECKIF(ret_conn == NULL) {
3886 return -EINVAL;
3887 }
3888
3889 CHECKIF(*ret_conn != NULL) {
3890 /* This rule helps application developers prevent leaks of connection references. If
3891 * a bt_conn variable is not null, it presumably holds a reference and must not be
3892 * overwritten. To avoid this warning, initialize the variables to null, and set
3893 * them to null when moving the reference.
3894 */
3895 LOG_WRN("*conn should be unreferenced and initialized to NULL");
3896
3897 if (IS_ENABLED(CONFIG_BT_CONN_CHECK_NULL_BEFORE_CREATE)) {
3898 return -EINVAL;
3899 }
3900 }
3901
3902 err = conn_le_create_common_checks(synced_param->peer, conn_param);
3903 if (err) {
3904 return err;
3905 }
3906
3907 if (!atomic_test_bit(adv->flags, BT_PER_ADV_ENABLED)) {
3908 return -EINVAL;
3909 }
3910
3911 if (!BT_FEAT_LE_PAWR_ADVERTISER(bt_dev.le.features)) {
3912 return -ENOTSUP;
3913 }
3914
3915 if (synced_param->subevent >= BT_HCI_PAWR_SUBEVENT_MAX) {
3916 return -EINVAL;
3917 }
3918
3919 conn = conn_le_create_helper(synced_param->peer, conn_param);
3920 if (!conn) {
3921 return -ENOMEM;
3922 }
3923
3924 /* The connection creation timeout is not really useful for PAwR.
3925 * The controller will give a result for the connection attempt
3926 * within a periodic interval. We do not know the periodic interval
3927 * used, so disable the timeout.
3928 */
3929 bt_dev.create_param.timeout = 0;
3930 bt_conn_set_state(conn, BT_CONN_INITIATING);
3931
3932 err = bt_le_create_conn_synced(conn, adv, synced_param->subevent);
3933 if (err) {
3934 conn->err = 0;
3935 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
3936 bt_conn_unref(conn);
3937
3938 return err;
3939 }
3940
3941 *ret_conn = conn;
3942 return 0;
3943 }
3944
3945 #if !defined(CONFIG_BT_FILTER_ACCEPT_LIST)
bt_le_set_auto_conn(const bt_addr_le_t * addr,const struct bt_le_conn_param * param)3946 int bt_le_set_auto_conn(const bt_addr_le_t *addr,
3947 const struct bt_le_conn_param *param)
3948 {
3949 struct bt_conn *conn;
3950
3951 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
3952 return -EAGAIN;
3953 }
3954
3955 if (param && !bt_le_conn_params_valid(param)) {
3956 return -EINVAL;
3957 }
3958
3959 if (!bt_id_scan_random_addr_check()) {
3960 return -EINVAL;
3961 }
3962
3963 /* Only default identity is supported */
3964 conn = bt_conn_lookup_addr_le(BT_ID_DEFAULT, addr);
3965 if (!conn) {
3966 conn = bt_conn_add_le(BT_ID_DEFAULT, addr);
3967 if (!conn) {
3968 return -ENOMEM;
3969 }
3970 }
3971
3972 if (param) {
3973 bt_conn_set_param_le(conn, param);
3974
3975 if (!atomic_test_and_set_bit(conn->flags,
3976 BT_CONN_AUTO_CONNECT)) {
3977 bt_conn_ref(conn);
3978 }
3979 } else {
3980 if (atomic_test_and_clear_bit(conn->flags,
3981 BT_CONN_AUTO_CONNECT)) {
3982 bt_conn_unref(conn);
3983 if (conn->state == BT_CONN_SCAN_BEFORE_INITIATING) {
3984 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
3985 }
3986 }
3987 }
3988
3989 int err = 0;
3990 if (conn->state == BT_CONN_DISCONNECTED &&
3991 atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
3992 if (param) {
3993 bt_conn_set_state(conn, BT_CONN_SCAN_BEFORE_INITIATING);
3994 err = bt_le_scan_user_add(BT_LE_SCAN_USER_CONN);
3995 }
3996 }
3997
3998 bt_conn_unref(conn);
3999
4000 return err;
4001 }
4002 #endif /* !defined(CONFIG_BT_FILTER_ACCEPT_LIST) */
4003 #endif /* CONFIG_BT_CENTRAL */
4004
bt_conn_le_conn_update(struct bt_conn * conn,const struct bt_le_conn_param * param)4005 int bt_conn_le_conn_update(struct bt_conn *conn,
4006 const struct bt_le_conn_param *param)
4007 {
4008 struct hci_cp_le_conn_update *conn_update;
4009 struct net_buf *buf;
4010
4011 buf = bt_hci_cmd_create(BT_HCI_OP_LE_CONN_UPDATE,
4012 sizeof(*conn_update));
4013 if (!buf) {
4014 return -ENOBUFS;
4015 }
4016
4017 conn_update = net_buf_add(buf, sizeof(*conn_update));
4018 (void)memset(conn_update, 0, sizeof(*conn_update));
4019 conn_update->handle = sys_cpu_to_le16(conn->handle);
4020 conn_update->conn_interval_min = sys_cpu_to_le16(param->interval_min);
4021 conn_update->conn_interval_max = sys_cpu_to_le16(param->interval_max);
4022 conn_update->conn_latency = sys_cpu_to_le16(param->latency);
4023 conn_update->supervision_timeout = sys_cpu_to_le16(param->timeout);
4024
4025 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_CONN_UPDATE, buf, NULL);
4026 }
4027
4028 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC)
bt_conn_auth_cb_register(const struct bt_conn_auth_cb * cb)4029 int bt_conn_auth_cb_register(const struct bt_conn_auth_cb *cb)
4030 {
4031 if (!cb) {
4032 bt_auth = NULL;
4033 return 0;
4034 }
4035
4036 if (bt_auth) {
4037 return -EALREADY;
4038 }
4039
4040 /* The cancel callback must always be provided if the app provides
4041 * interactive callbacks.
4042 */
4043 if (!cb->cancel &&
4044 (cb->passkey_display || cb->passkey_entry || cb->passkey_confirm ||
4045 #if defined(CONFIG_BT_CLASSIC)
4046 cb->pincode_entry ||
4047 #endif
4048 cb->pairing_confirm)) {
4049 return -EINVAL;
4050 }
4051
4052 bt_auth = cb;
4053 return 0;
4054 }
4055
4056 #if defined(CONFIG_BT_SMP)
bt_conn_auth_cb_overlay(struct bt_conn * conn,const struct bt_conn_auth_cb * cb)4057 int bt_conn_auth_cb_overlay(struct bt_conn *conn, const struct bt_conn_auth_cb *cb)
4058 {
4059 if (!bt_conn_is_type(conn, BT_CONN_TYPE_LE | BT_CONN_TYPE_BR)) {
4060 LOG_DBG("Invalid connection type: %u for %p", conn->type, conn);
4061 return -EINVAL;
4062 }
4063
4064 /* The cancel callback must always be provided if the app provides
4065 * interactive callbacks.
4066 */
4067 if (cb && !cb->cancel &&
4068 (cb->passkey_display || cb->passkey_entry || cb->passkey_confirm ||
4069 cb->pairing_confirm)) {
4070 return -EINVAL;
4071 }
4072
4073 if (conn->type == BT_CONN_TYPE_LE) {
4074 return bt_smp_auth_cb_overlay(conn, cb);
4075 }
4076
4077 return -ENOTSUP;
4078 }
4079 #endif
4080
bt_conn_auth_info_cb_register(struct bt_conn_auth_info_cb * cb)4081 int bt_conn_auth_info_cb_register(struct bt_conn_auth_info_cb *cb)
4082 {
4083 CHECKIF(cb == NULL) {
4084 return -EINVAL;
4085 }
4086
4087 if (sys_slist_find(&bt_auth_info_cbs, &cb->node, NULL)) {
4088 return -EALREADY;
4089 }
4090
4091 sys_slist_append(&bt_auth_info_cbs, &cb->node);
4092
4093 return 0;
4094 }
4095
bt_conn_auth_info_cb_unregister(struct bt_conn_auth_info_cb * cb)4096 int bt_conn_auth_info_cb_unregister(struct bt_conn_auth_info_cb *cb)
4097 {
4098 CHECKIF(cb == NULL) {
4099 return -EINVAL;
4100 }
4101
4102 if (!sys_slist_find_and_remove(&bt_auth_info_cbs, &cb->node)) {
4103 return -EALREADY;
4104 }
4105
4106 return 0;
4107 }
4108
bt_conn_auth_passkey_entry(struct bt_conn * conn,unsigned int passkey)4109 int bt_conn_auth_passkey_entry(struct bt_conn *conn, unsigned int passkey)
4110 {
4111 if (!bt_conn_is_type(conn, BT_CONN_TYPE_LE | BT_CONN_TYPE_BR)) {
4112 LOG_DBG("Invalid connection type: %u for %p", conn->type, conn);
4113 return -EINVAL;
4114 }
4115
4116 if (IS_ENABLED(CONFIG_BT_SMP) && conn->type == BT_CONN_TYPE_LE) {
4117 return bt_smp_auth_passkey_entry(conn, passkey);
4118 }
4119
4120 if (IS_ENABLED(CONFIG_BT_CLASSIC) && conn->type == BT_CONN_TYPE_BR) {
4121 if (!bt_auth) {
4122 return -EINVAL;
4123 }
4124
4125 return bt_ssp_auth_passkey_entry(conn, passkey);
4126 }
4127
4128 return -EINVAL;
4129 }
4130
4131 #if defined(CONFIG_BT_PASSKEY_KEYPRESS)
bt_conn_auth_keypress_notify(struct bt_conn * conn,enum bt_conn_auth_keypress type)4132 int bt_conn_auth_keypress_notify(struct bt_conn *conn,
4133 enum bt_conn_auth_keypress type)
4134 {
4135 if (!bt_conn_is_type(conn, BT_CONN_TYPE_LE)) {
4136 LOG_DBG("Invalid connection type: %u for %p", conn->type, conn);
4137 return -EINVAL;
4138 }
4139
4140 if (IS_ENABLED(CONFIG_BT_SMP)) {
4141 return bt_smp_auth_keypress_notify(conn, type);
4142 }
4143
4144 LOG_ERR("Not implemented for conn type %d", conn->type);
4145 return -EINVAL;
4146 }
4147 #endif
4148
bt_conn_auth_passkey_confirm(struct bt_conn * conn)4149 int bt_conn_auth_passkey_confirm(struct bt_conn *conn)
4150 {
4151 if (!bt_conn_is_type(conn, BT_CONN_TYPE_LE | BT_CONN_TYPE_BR)) {
4152 LOG_DBG("Invalid connection type: %u for %p", conn->type, conn);
4153 return -EINVAL;
4154 }
4155
4156 if (IS_ENABLED(CONFIG_BT_SMP) && conn->type == BT_CONN_TYPE_LE) {
4157 return bt_smp_auth_passkey_confirm(conn);
4158 }
4159
4160 if (IS_ENABLED(CONFIG_BT_CLASSIC) && conn->type == BT_CONN_TYPE_BR) {
4161 if (!bt_auth) {
4162 return -EINVAL;
4163 }
4164
4165 return bt_ssp_auth_passkey_confirm(conn);
4166 }
4167
4168 return -EINVAL;
4169 }
4170
bt_conn_auth_cancel(struct bt_conn * conn)4171 int bt_conn_auth_cancel(struct bt_conn *conn)
4172 {
4173 if (!bt_conn_is_type(conn, BT_CONN_TYPE_LE | BT_CONN_TYPE_BR)) {
4174 LOG_DBG("Invalid connection type: %u for %p", conn->type, conn);
4175 return -EINVAL;
4176 }
4177
4178 if (IS_ENABLED(CONFIG_BT_SMP) && conn->type == BT_CONN_TYPE_LE) {
4179 return bt_smp_auth_cancel(conn);
4180 }
4181
4182 if (IS_ENABLED(CONFIG_BT_CLASSIC) && conn->type == BT_CONN_TYPE_BR) {
4183 if (!bt_auth) {
4184 return -EINVAL;
4185 }
4186
4187 return bt_ssp_auth_cancel(conn);
4188 }
4189
4190 return -EINVAL;
4191 }
4192
bt_conn_auth_pairing_confirm(struct bt_conn * conn)4193 int bt_conn_auth_pairing_confirm(struct bt_conn *conn)
4194 {
4195 if (!bt_conn_is_type(conn, BT_CONN_TYPE_LE | BT_CONN_TYPE_BR)) {
4196 LOG_DBG("Invalid connection type: %u for %p", conn->type, conn);
4197 return -EINVAL;
4198 }
4199
4200 if (IS_ENABLED(CONFIG_BT_SMP) && conn->type == BT_CONN_TYPE_LE) {
4201 return bt_smp_auth_pairing_confirm(conn);
4202 }
4203
4204 if (IS_ENABLED(CONFIG_BT_CLASSIC) && conn->type == BT_CONN_TYPE_BR) {
4205 if (!bt_auth) {
4206 return -EINVAL;
4207 }
4208
4209 return bt_ssp_auth_pairing_confirm(conn);
4210 }
4211
4212 return -EINVAL;
4213 }
4214 #endif /* CONFIG_BT_SMP || CONFIG_BT_CLASSIC */
4215
bt_conn_lookup_index(uint8_t index)4216 struct bt_conn *bt_conn_lookup_index(uint8_t index)
4217 {
4218 if (index >= ARRAY_SIZE(acl_conns)) {
4219 return NULL;
4220 }
4221
4222 return bt_conn_ref(&acl_conns[index]);
4223 }
4224
bt_conn_init(void)4225 int bt_conn_init(void)
4226 {
4227 int err, i;
4228
4229 k_fifo_init(&free_tx);
4230 for (i = 0; i < ARRAY_SIZE(conn_tx); i++) {
4231 k_fifo_put(&free_tx, &conn_tx[i]);
4232 }
4233
4234 bt_att_init();
4235
4236 err = bt_smp_init();
4237 if (err) {
4238 return err;
4239 }
4240
4241 bt_l2cap_init();
4242
4243 /* Initialize background scan */
4244 if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
4245 for (i = 0; i < ARRAY_SIZE(acl_conns); i++) {
4246 struct bt_conn *conn = bt_conn_ref(&acl_conns[i]);
4247
4248 if (!conn) {
4249 continue;
4250 }
4251
4252 #if !defined(CONFIG_BT_FILTER_ACCEPT_LIST)
4253 if (atomic_test_bit(conn->flags,
4254 BT_CONN_AUTO_CONNECT)) {
4255 /* Only the default identity is supported */
4256 conn->id = BT_ID_DEFAULT;
4257 bt_conn_set_state(conn,
4258 BT_CONN_SCAN_BEFORE_INITIATING);
4259 }
4260 #endif /* !defined(CONFIG_BT_FILTER_ACCEPT_LIST) */
4261
4262 bt_conn_unref(conn);
4263 }
4264 }
4265
4266 return 0;
4267 }
4268
4269 #if defined(CONFIG_BT_DF_CONNECTION_CTE_RX)
bt_hci_le_df_connection_iq_report_common(uint8_t event,struct net_buf * buf)4270 void bt_hci_le_df_connection_iq_report_common(uint8_t event, struct net_buf *buf)
4271 {
4272 struct bt_df_conn_iq_samples_report iq_report;
4273 struct bt_conn *conn;
4274 int err;
4275
4276 if (event == BT_HCI_EVT_LE_CONNECTION_IQ_REPORT) {
4277 err = hci_df_prepare_connection_iq_report(buf, &iq_report, &conn);
4278 if (err) {
4279 LOG_ERR("Prepare CTE conn IQ report failed %d", err);
4280 return;
4281 }
4282 } else if (IS_ENABLED(CONFIG_BT_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES) &&
4283 event == BT_HCI_EVT_VS_LE_CONNECTION_IQ_REPORT) {
4284 err = hci_df_vs_prepare_connection_iq_report(buf, &iq_report, &conn);
4285 if (err) {
4286 LOG_ERR("Prepare CTE conn IQ report failed %d", err);
4287 return;
4288 }
4289 } else {
4290 LOG_ERR("Unhandled VS connection IQ report");
4291 return;
4292 }
4293
4294 struct bt_conn_cb *callback;
4295
4296 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
4297 if (callback->cte_report_cb) {
4298 callback->cte_report_cb(conn, &iq_report);
4299 }
4300 }
4301
4302 STRUCT_SECTION_FOREACH(bt_conn_cb, cb)
4303 {
4304 if (cb->cte_report_cb) {
4305 cb->cte_report_cb(conn, &iq_report);
4306 }
4307 }
4308
4309 bt_conn_unref(conn);
4310 }
4311
bt_hci_le_df_connection_iq_report(struct net_buf * buf)4312 void bt_hci_le_df_connection_iq_report(struct net_buf *buf)
4313 {
4314 bt_hci_le_df_connection_iq_report_common(BT_HCI_EVT_LE_CONNECTION_IQ_REPORT, buf);
4315 }
4316
4317 #if defined(CONFIG_BT_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES)
bt_hci_le_vs_df_connection_iq_report(struct net_buf * buf)4318 void bt_hci_le_vs_df_connection_iq_report(struct net_buf *buf)
4319 {
4320 bt_hci_le_df_connection_iq_report_common(BT_HCI_EVT_VS_LE_CONNECTION_IQ_REPORT, buf);
4321 }
4322 #endif /* CONFIG_BT_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES */
4323 #endif /* CONFIG_BT_DF_CONNECTION_CTE_RX */
4324
4325 #if defined(CONFIG_BT_DF_CONNECTION_CTE_REQ)
bt_hci_le_df_cte_req_failed(struct net_buf * buf)4326 void bt_hci_le_df_cte_req_failed(struct net_buf *buf)
4327 {
4328 struct bt_df_conn_iq_samples_report iq_report;
4329 struct bt_conn *conn;
4330 int err;
4331
4332 err = hci_df_prepare_conn_cte_req_failed(buf, &iq_report, &conn);
4333 if (err) {
4334 LOG_ERR("Prepare CTE REQ failed IQ report failed %d", err);
4335 return;
4336 }
4337
4338 struct bt_conn_cb *callback;
4339
4340 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
4341 if (callback->cte_report_cb) {
4342 callback->cte_report_cb(conn, &iq_report);
4343 }
4344 }
4345
4346 STRUCT_SECTION_FOREACH(bt_conn_cb, cb)
4347 {
4348 if (cb->cte_report_cb) {
4349 cb->cte_report_cb(conn, &iq_report);
4350 }
4351 }
4352
4353 bt_conn_unref(conn);
4354 }
4355 #endif /* CONFIG_BT_DF_CONNECTION_CTE_REQ */
4356
4357 #endif /* CONFIG_BT_CONN */
4358
4359 #if defined(CONFIG_BT_CONN_TX_NOTIFY_WQ)
bt_conn_tx_workq_init(void)4360 static int bt_conn_tx_workq_init(void)
4361 {
4362 const struct k_work_queue_config cfg = {
4363 .name = "BT CONN TX WQ",
4364 .no_yield = false,
4365 .essential = false,
4366 };
4367
4368 k_work_queue_init(&conn_tx_workq);
4369 k_work_queue_start(&conn_tx_workq, conn_tx_workq_thread_stack,
4370 K_THREAD_STACK_SIZEOF(conn_tx_workq_thread_stack),
4371 K_PRIO_COOP(CONFIG_BT_CONN_TX_NOTIFY_WQ_PRIO), &cfg);
4372
4373 return 0;
4374 }
4375
4376 SYS_INIT(bt_conn_tx_workq_init, POST_KERNEL, CONFIG_BT_CONN_TX_NOTIFY_WQ_INIT_PRIORITY);
4377 #endif /* CONFIG_BT_CONN_TX_NOTIFY_WQ */
4378