1 /* conn.c - Bluetooth connection handling */
2
3 /*
4 * Copyright (c) 2015-2016 Intel Corporation
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #include <zephyr/kernel.h>
10 #include <string.h>
11 #include <errno.h>
12 #include <stdbool.h>
13 #include <zephyr/sys/atomic.h>
14 #include <zephyr/sys/byteorder.h>
15 #include <zephyr/sys/check.h>
16 #include <zephyr/sys/iterable_sections.h>
17 #include <zephyr/sys/util.h>
18 #include <zephyr/sys/util_macro.h>
19 #include <zephyr/sys/slist.h>
20 #include <zephyr/debug/stack.h>
21 #include <zephyr/sys/__assert.h>
22
23 #include <zephyr/bluetooth/hci.h>
24 #include <zephyr/bluetooth/bluetooth.h>
25 #include <zephyr/bluetooth/direction.h>
26 #include <zephyr/bluetooth/conn.h>
27 #include <zephyr/bluetooth/hci_vs.h>
28 #include <zephyr/bluetooth/att.h>
29
30 #include "common/assert.h"
31 #include "common/bt_str.h"
32
33 #include "buf_view.h"
34 #include "addr_internal.h"
35 #include "hci_core.h"
36 #include "id.h"
37 #include "adv.h"
38 #include "scan.h"
39 #include "conn_internal.h"
40 #include "l2cap_internal.h"
41 #include "keys.h"
42 #include "smp.h"
43 #include "classic/ssp.h"
44 #include "att_internal.h"
45 #include "iso_internal.h"
46 #include "direction_internal.h"
47 #include "classic/sco_internal.h"
48
49 #define LOG_LEVEL CONFIG_BT_CONN_LOG_LEVEL
50 #include <zephyr/logging/log.h>
51 LOG_MODULE_REGISTER(bt_conn);
52
53 K_FIFO_DEFINE(free_tx);
54
55 #if defined(CONFIG_BT_CONN_TX_NOTIFY_WQ)
56 static struct k_work_q conn_tx_workq;
57 static K_KERNEL_STACK_DEFINE(conn_tx_workq_thread_stack, CONFIG_BT_CONN_TX_NOTIFY_WQ_STACK_SIZE);
58 #endif /* CONFIG_BT_CONN_TX_NOTIFY_WQ */
59
60 static void tx_free(struct bt_conn_tx *tx);
61
conn_tx_destroy(struct bt_conn * conn,struct bt_conn_tx * tx)62 static void conn_tx_destroy(struct bt_conn *conn, struct bt_conn_tx *tx)
63 {
64 __ASSERT_NO_MSG(tx);
65
66 bt_conn_tx_cb_t cb = tx->cb;
67 void *user_data = tx->user_data;
68
69 LOG_DBG("conn %p tx %p cb %p ud %p", conn, tx, cb, user_data);
70
71 /* Free up TX metadata before calling callback in case the callback
72 * tries to allocate metadata
73 */
74 tx_free(tx);
75
76 if (cb) {
77 cb(conn, user_data, -ESHUTDOWN);
78 }
79 }
80
81 #if defined(CONFIG_BT_CONN_TX)
82 static void tx_complete_work(struct k_work *work);
83 #endif /* CONFIG_BT_CONN_TX */
84
85 static void notify_recycled_conn_slot(void);
86
87 void bt_tx_irq_raise(void);
88
89 /* Group Connected BT_CONN only in this */
90 #if defined(CONFIG_BT_CONN)
91 /* Peripheral timeout to initialize Connection Parameter Update procedure */
92 #define CONN_UPDATE_TIMEOUT K_MSEC(CONFIG_BT_CONN_PARAM_UPDATE_TIMEOUT)
93
94 static void deferred_work(struct k_work *work);
95 static void notify_connected(struct bt_conn *conn);
96
97 static struct bt_conn acl_conns[CONFIG_BT_MAX_CONN];
98 NET_BUF_POOL_DEFINE(acl_tx_pool, CONFIG_BT_L2CAP_TX_BUF_COUNT,
99 BT_L2CAP_BUF_SIZE(CONFIG_BT_L2CAP_TX_MTU),
100 CONFIG_BT_CONN_TX_USER_DATA_SIZE, NULL);
101
102 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC)
103 const struct bt_conn_auth_cb *bt_auth;
104 sys_slist_t bt_auth_info_cbs = SYS_SLIST_STATIC_INIT(&bt_auth_info_cbs);
105 #endif /* CONFIG_BT_SMP || CONFIG_BT_CLASSIC */
106
107
108 static sys_slist_t conn_cbs = SYS_SLIST_STATIC_INIT(&conn_cbs);
109
110 static struct bt_conn_tx conn_tx[CONFIG_BT_CONN_TX_MAX];
111
112 #if defined(CONFIG_BT_CLASSIC)
113 static int bt_hci_connect_br_cancel(struct bt_conn *conn);
114
115 static struct bt_conn sco_conns[CONFIG_BT_MAX_SCO_CONN];
116 #endif /* CONFIG_BT_CLASSIC */
117 #endif /* CONFIG_BT_CONN */
118
119 #if defined(CONFIG_BT_CONN_TX)
120 void frag_destroy(struct net_buf *buf);
121
122 /* Storage for fragments (views) into the upper layers' PDUs. */
123 /* TODO: remove user-data requirements */
124 NET_BUF_POOL_FIXED_DEFINE(fragments, CONFIG_BT_CONN_FRAG_COUNT, 0,
125 CONFIG_BT_CONN_TX_USER_DATA_SIZE, frag_destroy);
126
127 struct frag_md {
128 struct bt_buf_view_meta view_meta;
129 };
130 struct frag_md frag_md_pool[CONFIG_BT_CONN_FRAG_COUNT];
131
get_frag_md(struct net_buf * fragment)132 struct frag_md *get_frag_md(struct net_buf *fragment)
133 {
134 return &frag_md_pool[net_buf_id(fragment)];
135 }
136
frag_destroy(struct net_buf * frag)137 void frag_destroy(struct net_buf *frag)
138 {
139 /* allow next view to be allocated (and unlock the parent buf) */
140 bt_buf_destroy_view(frag, &get_frag_md(frag)->view_meta);
141
142 LOG_DBG("");
143
144 /* Kick the TX processor to send the rest of the frags. */
145 bt_tx_irq_raise();
146 }
147
get_data_frag(struct net_buf * outside,size_t winsize)148 static struct net_buf *get_data_frag(struct net_buf *outside, size_t winsize)
149 {
150 struct net_buf *window;
151
152 __ASSERT_NO_MSG(!bt_buf_has_view(outside));
153
154 /* Keeping a ref is the caller's responsibility */
155 window = net_buf_alloc_len(&fragments, 0, K_NO_WAIT);
156 if (!window) {
157 return window;
158 }
159
160 window = bt_buf_make_view(window, outside,
161 winsize, &get_frag_md(window)->view_meta);
162
163 LOG_DBG("get-acl-frag: outside %p window %p size %zu", outside, window, winsize);
164
165 return window;
166 }
167 #else /* !CONFIG_BT_CONN_TX */
get_data_frag(struct net_buf * outside,size_t winsize)168 static struct net_buf *get_data_frag(struct net_buf *outside, size_t winsize)
169 {
170 ARG_UNUSED(outside);
171 ARG_UNUSED(winsize);
172
173 /* This will never get called. It's only to allow compilation to take
174 * place and the later linker stage to remove this implementation.
175 */
176
177 return NULL;
178 }
179 #endif /* CONFIG_BT_CONN_TX */
180
181 #if defined(CONFIG_BT_ISO)
182 extern struct bt_conn iso_conns[CONFIG_BT_ISO_MAX_CHAN];
183
184 /* Callback TX buffers for ISO */
185 static struct bt_conn_tx iso_tx[CONFIG_BT_ISO_TX_BUF_COUNT];
186
bt_conn_iso_init(void)187 int bt_conn_iso_init(void)
188 {
189 for (size_t i = 0; i < ARRAY_SIZE(iso_tx); i++) {
190 k_fifo_put(&free_tx, &iso_tx[i]);
191 }
192
193 return 0;
194 }
195 #endif /* CONFIG_BT_ISO */
196
bt_conn_get_pkts(struct bt_conn * conn)197 struct k_sem *bt_conn_get_pkts(struct bt_conn *conn)
198 {
199 #if defined(CONFIG_BT_CLASSIC)
200 if (conn->type == BT_CONN_TYPE_BR || !bt_dev.le.acl_mtu) {
201 return &bt_dev.br.pkts;
202 }
203 #endif /* CONFIG_BT_CLASSIC */
204
205 #if defined(CONFIG_BT_ISO)
206 /* Use ISO pkts semaphore if LE Read Buffer Size command returned
207 * dedicated ISO buffers.
208 */
209 if (conn->type == BT_CONN_TYPE_ISO) {
210 if (bt_dev.le.iso_mtu && bt_dev.le.iso_limit != 0) {
211 return &bt_dev.le.iso_pkts;
212 }
213
214 return NULL;
215 }
216 #endif /* CONFIG_BT_ISO */
217
218 #if defined(CONFIG_BT_CONN)
219 if (bt_dev.le.acl_mtu) {
220 return &bt_dev.le.acl_pkts;
221 }
222 #endif /* CONFIG_BT_CONN */
223
224 return NULL;
225 }
226
state2str(bt_conn_state_t state)227 static inline const char *state2str(bt_conn_state_t state)
228 {
229 switch (state) {
230 case BT_CONN_DISCONNECTED:
231 return "disconnected";
232 case BT_CONN_DISCONNECT_COMPLETE:
233 return "disconnect-complete";
234 case BT_CONN_INITIATING:
235 return "initiating";
236 case BT_CONN_SCAN_BEFORE_INITIATING:
237 return "scan-before-initiating";
238 case BT_CONN_INITIATING_FILTER_LIST:
239 return "initiating-filter-list";
240 case BT_CONN_ADV_CONNECTABLE:
241 return "adv-connectable";
242 case BT_CONN_ADV_DIR_CONNECTABLE:
243 return "adv-dir-connectable";
244 case BT_CONN_CONNECTED:
245 return "connected";
246 case BT_CONN_DISCONNECTING:
247 return "disconnecting";
248 default:
249 return "(unknown)";
250 }
251 }
252
tx_free(struct bt_conn_tx * tx)253 static void tx_free(struct bt_conn_tx *tx)
254 {
255 LOG_DBG("%p", tx);
256 tx->cb = NULL;
257 tx->user_data = NULL;
258 k_fifo_put(&free_tx, tx);
259 }
260
261 #if defined(CONFIG_BT_CONN_TX)
tx_notify_workqueue_get(void)262 static struct k_work_q *tx_notify_workqueue_get(void)
263 {
264 #if defined(CONFIG_BT_CONN_TX_NOTIFY_WQ)
265 return &conn_tx_workq;
266 #else
267 return &k_sys_work_q;
268 #endif /* CONFIG_BT_CONN_TX_NOTIFY_WQ */
269 }
270
tx_notify_process(struct bt_conn * conn)271 static void tx_notify_process(struct bt_conn *conn)
272 {
273 /* TX notify processing is done only from a single thread. */
274 __ASSERT_NO_MSG(k_current_get() == k_work_queue_thread_get(tx_notify_workqueue_get()));
275
276 LOG_DBG("conn %p", (void *)conn);
277
278 while (1) {
279 struct bt_conn_tx *tx = NULL;
280 unsigned int key;
281 bt_conn_tx_cb_t cb;
282 void *user_data;
283
284 key = irq_lock();
285 if (!sys_slist_is_empty(&conn->tx_complete)) {
286 const sys_snode_t *node = sys_slist_get_not_empty(&conn->tx_complete);
287
288 tx = CONTAINER_OF(node, struct bt_conn_tx, node);
289 }
290 irq_unlock(key);
291
292 if (!tx) {
293 return;
294 }
295
296 LOG_DBG("tx %p cb %p user_data %p", tx, tx->cb, tx->user_data);
297
298 /* Copy over the params */
299 cb = tx->cb;
300 user_data = tx->user_data;
301
302 /* Free up TX notify since there may be user waiting */
303 tx_free(tx);
304
305 /* Run the callback, at this point it should be safe to
306 * allocate new buffers since the TX should have been
307 * unblocked by tx_free.
308 */
309 if (cb) {
310 cb(conn, user_data, 0);
311 }
312
313 LOG_DBG("raise TX IRQ");
314 bt_tx_irq_raise();
315 }
316 }
317 #endif /* CONFIG_BT_CONN_TX */
318
bt_conn_tx_notify(struct bt_conn * conn,bool wait_for_completion)319 void bt_conn_tx_notify(struct bt_conn *conn, bool wait_for_completion)
320 {
321 #if defined(CONFIG_BT_CONN_TX)
322 /* Ensure that function is called only from a single context. */
323 if (k_current_get() == k_work_queue_thread_get(tx_notify_workqueue_get())) {
324 tx_notify_process(conn);
325 } else {
326 struct k_work_sync sync;
327 int err;
328
329 err = k_work_submit_to_queue(tx_notify_workqueue_get(), &conn->tx_complete_work);
330 __ASSERT(err >= 0, "couldn't submit (err %d)", err);
331
332 if (wait_for_completion) {
333 (void)k_work_flush(&conn->tx_complete_work, &sync);
334 }
335 }
336 #else
337 ARG_UNUSED(conn);
338 ARG_UNUSED(wait_for_completion);
339 #endif /* CONFIG_BT_CONN_TX */
340 }
341
bt_conn_new(struct bt_conn * conns,size_t size)342 struct bt_conn *bt_conn_new(struct bt_conn *conns, size_t size)
343 {
344 struct bt_conn *conn = NULL;
345 int i;
346
347 for (i = 0; i < size; i++) {
348 if (atomic_cas(&conns[i].ref, 0, 1)) {
349 conn = &conns[i];
350 break;
351 }
352 }
353
354 if (!conn) {
355 return NULL;
356 }
357
358 (void)memset(conn, 0, offsetof(struct bt_conn, ref));
359
360 #if defined(CONFIG_BT_CONN)
361 k_work_init_delayable(&conn->deferred_work, deferred_work);
362 #endif /* CONFIG_BT_CONN */
363 #if defined(CONFIG_BT_CONN_TX)
364 k_work_init(&conn->tx_complete_work, tx_complete_work);
365 #endif /* CONFIG_BT_CONN_TX */
366
367 return conn;
368 }
369
bt_conn_reset_rx_state(struct bt_conn * conn)370 void bt_conn_reset_rx_state(struct bt_conn *conn)
371 {
372 if (!conn->rx) {
373 return;
374 }
375
376 net_buf_unref(conn->rx);
377 conn->rx = NULL;
378 }
379
bt_acl_recv(struct bt_conn * conn,struct net_buf * buf,uint8_t flags)380 static void bt_acl_recv(struct bt_conn *conn, struct net_buf *buf, uint8_t flags)
381 {
382 uint16_t acl_total_len;
383
384 bt_acl_set_ncp_sent(buf, false);
385
386 /* Check packet boundary flags */
387 switch (flags) {
388 case BT_ACL_START:
389 if (conn->rx) {
390 LOG_ERR("Unexpected first L2CAP frame");
391 bt_conn_reset_rx_state(conn);
392 }
393
394 LOG_DBG("First, len %u final %u", buf->len,
395 (buf->len < sizeof(uint16_t)) ? 0 : sys_get_le16(buf->data));
396
397 conn->rx = net_buf_ref(buf);
398 break;
399 case BT_ACL_CONT:
400 if (!conn->rx) {
401 LOG_ERR("Unexpected L2CAP continuation");
402 bt_conn_reset_rx_state(conn);
403 net_buf_unref(buf);
404 return;
405 }
406
407 if (!buf->len) {
408 LOG_DBG("Empty ACL_CONT");
409 net_buf_unref(buf);
410 return;
411 }
412
413 if (buf->len > net_buf_tailroom(conn->rx)) {
414 LOG_ERR("Not enough buffer space for L2CAP data");
415
416 /* Frame is not complete but we still pass it to L2CAP
417 * so that it may handle error on protocol level
418 * eg disconnect channel.
419 */
420 bt_l2cap_recv(conn, conn->rx, false);
421 conn->rx = NULL;
422 net_buf_unref(buf);
423 return;
424 }
425
426 net_buf_add_mem(conn->rx, buf->data, buf->len);
427 break;
428 default:
429 /* BT_ACL_START_NO_FLUSH and BT_ACL_COMPLETE are not allowed on
430 * LE-U from Controller to Host.
431 * Only BT_ACL_POINT_TO_POINT is supported.
432 */
433 LOG_ERR("Unexpected ACL flags (0x%02x)", flags);
434 bt_conn_reset_rx_state(conn);
435 net_buf_unref(buf);
436 return;
437 }
438
439 if (conn->rx->len < sizeof(uint16_t)) {
440 /* Still not enough data received to retrieve the L2CAP header
441 * length field.
442 */
443 bt_send_one_host_num_completed_packets(conn->handle);
444 bt_acl_set_ncp_sent(buf, true);
445 net_buf_unref(buf);
446
447 return;
448 }
449
450 acl_total_len = sys_get_le16(conn->rx->data) + sizeof(struct bt_l2cap_hdr);
451
452 if (conn->rx->len < acl_total_len) {
453 /* L2CAP frame not complete. */
454 bt_send_one_host_num_completed_packets(conn->handle);
455 bt_acl_set_ncp_sent(buf, true);
456 net_buf_unref(buf);
457
458 return;
459 }
460
461 net_buf_unref(buf);
462
463 if (conn->rx->len > acl_total_len) {
464 LOG_ERR("ACL len mismatch (%u > %u)", conn->rx->len, acl_total_len);
465 bt_conn_reset_rx_state(conn);
466 return;
467 }
468
469 /* L2CAP frame complete. */
470 buf = conn->rx;
471 conn->rx = NULL;
472
473 __ASSERT(buf->ref == 1, "buf->ref %d", buf->ref);
474
475 LOG_DBG("Successfully parsed %u byte L2CAP packet", buf->len);
476 bt_l2cap_recv(conn, buf, true);
477 }
478
bt_conn_recv(struct bt_conn * conn,struct net_buf * buf,uint8_t flags)479 void bt_conn_recv(struct bt_conn *conn, struct net_buf *buf, uint8_t flags)
480 {
481 /* Make sure we notify any pending TX callbacks before processing
482 * new data for this connection.
483 *
484 * Always do so from the same context for sanity. In this case that will
485 * be either a dedicated Bluetooth connection TX workqueue or system workqueue.
486 */
487 bt_conn_tx_notify(conn, true);
488
489 LOG_DBG("handle %u len %u flags %02x", conn->handle, buf->len, flags);
490
491 if (IS_ENABLED(CONFIG_BT_ISO_RX) && conn->type == BT_CONN_TYPE_ISO) {
492 bt_iso_recv(conn, buf, flags);
493 return;
494 } else if (IS_ENABLED(CONFIG_BT_CONN)) {
495 bt_acl_recv(conn, buf, flags);
496 } else {
497 __ASSERT(false, "Invalid connection type %u", conn->type);
498 }
499 }
500
dont_have_tx_context(struct bt_conn * conn)501 static bool dont_have_tx_context(struct bt_conn *conn)
502 {
503 return k_fifo_is_empty(&free_tx);
504 }
505
conn_tx_alloc(void)506 static struct bt_conn_tx *conn_tx_alloc(void)
507 {
508 struct bt_conn_tx *ret = k_fifo_get(&free_tx, K_NO_WAIT);
509
510 LOG_DBG("%p", ret);
511
512 return ret;
513 }
514
515 enum {
516 FRAG_START,
517 FRAG_CONT,
518 FRAG_SINGLE,
519 FRAG_END
520 };
521
send_acl(struct bt_conn * conn,struct net_buf * buf,uint8_t flags)522 static int send_acl(struct bt_conn *conn, struct net_buf *buf, uint8_t flags)
523 {
524 struct bt_hci_acl_hdr *hdr;
525
526 switch (flags) {
527 case FRAG_START:
528 case FRAG_SINGLE:
529 flags = BT_ACL_START_NO_FLUSH;
530 break;
531 case FRAG_CONT:
532 case FRAG_END:
533 flags = BT_ACL_CONT;
534 break;
535 default:
536 return -EINVAL;
537 }
538
539 hdr = net_buf_push(buf, sizeof(*hdr));
540 hdr->handle = sys_cpu_to_le16(bt_acl_handle_pack(conn->handle, flags));
541 hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
542
543 bt_buf_set_type(buf, BT_BUF_ACL_OUT);
544
545 return bt_send(buf);
546 }
547
contains_iso_timestamp(struct net_buf * buf)548 static enum bt_iso_timestamp contains_iso_timestamp(struct net_buf *buf)
549 {
550 enum bt_iso_timestamp ts;
551
552 if (net_buf_headroom(buf) ==
553 (BT_BUF_ISO_SIZE(0) - sizeof(struct bt_hci_iso_sdu_ts_hdr))) {
554 ts = BT_ISO_TS_PRESENT;
555 } else {
556 ts = BT_ISO_TS_ABSENT;
557 }
558
559 return ts;
560 }
561
send_iso(struct bt_conn * conn,struct net_buf * buf,uint8_t flags)562 static int send_iso(struct bt_conn *conn, struct net_buf *buf, uint8_t flags)
563 {
564 struct bt_hci_iso_hdr *hdr;
565 enum bt_iso_timestamp ts;
566
567 switch (flags) {
568 case FRAG_START:
569 flags = BT_ISO_START;
570 break;
571 case FRAG_CONT:
572 flags = BT_ISO_CONT;
573 break;
574 case FRAG_SINGLE:
575 flags = BT_ISO_SINGLE;
576 break;
577 case FRAG_END:
578 flags = BT_ISO_END;
579 break;
580 default:
581 return -EINVAL;
582 }
583
584 /* The TS bit is set by `iso.c:conn_iso_send`. This special byte
585 * prepends the whole SDU, and won't be there for individual fragments.
586 *
587 * Conveniently, it is only legal to set the TS bit on the first HCI
588 * fragment, so we don't have to pass this extra metadata around for
589 * every fragment, only the first one.
590 */
591 if (flags == BT_ISO_SINGLE || flags == BT_ISO_START) {
592 ts = contains_iso_timestamp(buf);
593 } else {
594 ts = BT_ISO_TS_ABSENT;
595 }
596
597 hdr = net_buf_push(buf, sizeof(*hdr));
598 hdr->handle = sys_cpu_to_le16(bt_iso_handle_pack(conn->handle, flags, ts));
599 hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
600
601 bt_buf_set_type(buf, BT_BUF_ISO_OUT);
602
603 return bt_send(buf);
604 }
605
conn_mtu(struct bt_conn * conn)606 static inline uint16_t conn_mtu(struct bt_conn *conn)
607 {
608 #if defined(CONFIG_BT_CLASSIC)
609 if (conn->type == BT_CONN_TYPE_BR ||
610 (conn->type != BT_CONN_TYPE_ISO && !bt_dev.le.acl_mtu)) {
611 return bt_dev.br.mtu;
612 }
613 #endif /* CONFIG_BT_CLASSIC */
614 #if defined(CONFIG_BT_ISO)
615 if (conn->type == BT_CONN_TYPE_ISO) {
616 return bt_dev.le.iso_mtu;
617 }
618 #endif /* CONFIG_BT_ISO */
619 #if defined(CONFIG_BT_CONN)
620 return bt_dev.le.acl_mtu;
621 #else
622 return 0;
623 #endif /* CONFIG_BT_CONN */
624 }
625
is_classic_conn(struct bt_conn * conn)626 static bool is_classic_conn(struct bt_conn *conn)
627 {
628 return (IS_ENABLED(CONFIG_BT_CLASSIC) &&
629 conn->type == BT_CONN_TYPE_BR);
630 }
631
is_iso_tx_conn(struct bt_conn * conn)632 static bool is_iso_tx_conn(struct bt_conn *conn)
633 {
634 return IS_ENABLED(CONFIG_BT_ISO_TX) &&
635 conn->type == BT_CONN_TYPE_ISO;
636 }
637
is_le_conn(struct bt_conn * conn)638 static bool is_le_conn(struct bt_conn *conn)
639 {
640 return IS_ENABLED(CONFIG_BT_CONN) && conn->type == BT_CONN_TYPE_LE;
641 }
642
is_acl_conn(struct bt_conn * conn)643 static bool is_acl_conn(struct bt_conn *conn)
644 {
645 return is_le_conn(conn) || is_classic_conn(conn);
646 }
647
send_buf(struct bt_conn * conn,struct net_buf * buf,size_t len,void * cb,void * ud)648 static int send_buf(struct bt_conn *conn, struct net_buf *buf,
649 size_t len, void *cb, void *ud)
650 {
651 struct net_buf *frag = NULL;
652 struct bt_conn_tx *tx = NULL;
653 uint8_t flags;
654 int err;
655
656 if (buf->len == 0) {
657 __ASSERT_NO_MSG(0);
658
659 return -EMSGSIZE;
660 }
661
662 if (bt_buf_has_view(buf)) {
663 __ASSERT_NO_MSG(0);
664
665 return -EIO;
666 }
667
668 LOG_DBG("conn %p buf %p len %zu buf->len %u cb %p ud %p",
669 conn, buf, len, buf->len, cb, ud);
670
671 /* Acquire the right to send 1 packet to the controller */
672 if (k_sem_take(bt_conn_get_pkts(conn), K_NO_WAIT)) {
673 /* This shouldn't happen now that we acquire the resources
674 * before calling `send_buf` (in `get_conn_ready`). We say
675 * "acquire" as `tx_processor()` is not re-entrant and the
676 * thread is non-preemptible. So the sem value shouldn't change.
677 */
678 __ASSERT(0, "No controller bufs");
679
680 return -ENOMEM;
681 }
682
683 /* Allocate and set the TX context */
684 tx = conn_tx_alloc();
685
686 /* See big comment above */
687 if (!tx) {
688 __ASSERT(0, "No TX context");
689
690 return -ENOMEM;
691 }
692
693 tx->cb = cb;
694 tx->user_data = ud;
695
696 uint16_t frag_len = MIN(conn_mtu(conn), len);
697
698 __ASSERT_NO_MSG(buf->ref == 1);
699
700 if (buf->len > frag_len) {
701 LOG_DBG("keep %p around", buf);
702 frag = get_data_frag(net_buf_ref(buf), frag_len);
703 } else {
704 LOG_DBG("move %p ref in", buf);
705 /* Move the ref into `frag` for the last TX. That way `buf` will
706 * get destroyed when `frag` is destroyed.
707 */
708 frag = get_data_frag(buf, frag_len);
709 }
710
711 /* Caller is supposed to check we have all resources to send */
712 __ASSERT_NO_MSG(frag != NULL);
713
714 /* If the current buffer doesn't fit a controller buffer */
715 if (len > conn_mtu(conn)) {
716 flags = conn->next_is_frag ? FRAG_CONT : FRAG_START;
717 conn->next_is_frag = true;
718 } else {
719 flags = conn->next_is_frag ? FRAG_END : FRAG_SINGLE;
720 conn->next_is_frag = false;
721 }
722
723 LOG_DBG("send frag: buf %p len %d", buf, frag_len);
724
725 /* At this point, the buffer is either a fragment or a full HCI packet.
726 * The flags are also valid.
727 */
728 LOG_DBG("conn %p buf %p len %u flags 0x%02x",
729 conn, frag, frag->len, flags);
730
731 /* Keep track of sent buffers. We have to append _before_
732 * sending, as we might get pre-empted if the HCI driver calls
733 * k_yield() before returning.
734 *
735 * In that case, the controller could also send a num-complete-packets
736 * event and our handler will be confused that there is no corresponding
737 * callback node in the `tx_pending` list.
738 */
739 atomic_inc(&conn->in_ll);
740 sys_slist_append(&conn->tx_pending, &tx->node);
741
742 if (is_iso_tx_conn(conn)) {
743 err = send_iso(conn, frag, flags);
744 } else if (is_acl_conn(conn)) {
745 err = send_acl(conn, frag, flags);
746 } else {
747 err = -EINVAL; /* Some animals disable asserts (╯°□°)╯︵ ┻━┻ */
748 __ASSERT(false, "Invalid connection type %u", conn->type);
749 }
750
751 if (!err) {
752 return 0;
753 }
754
755 /* Remove buf from pending list */
756 atomic_dec(&conn->in_ll);
757 (void)sys_slist_find_and_remove(&conn->tx_pending, &tx->node);
758
759 LOG_ERR("Unable to send to driver (err %d)", err);
760
761 /* If we get here, something has seriously gone wrong: the `parent` buf
762 * (of which the current fragment belongs) should also be destroyed.
763 */
764 net_buf_unref(frag);
765
766 /* `buf` might not get destroyed right away, and its `tx`
767 * pointer will still be reachable. Make sure that we don't try
768 * to use the destroyed context later.
769 */
770 conn_tx_destroy(conn, tx);
771 k_sem_give(bt_conn_get_pkts(conn));
772
773 /* Merge HCI driver errors */
774 return -EIO;
775 }
776
777 static struct k_poll_signal conn_change =
778 K_POLL_SIGNAL_INITIALIZER(conn_change);
779
conn_destroy(struct bt_conn * conn,void * data)780 static void conn_destroy(struct bt_conn *conn, void *data)
781 {
782 if (conn->state == BT_CONN_CONNECTED ||
783 conn->state == BT_CONN_DISCONNECTING) {
784 bt_conn_set_state(conn, BT_CONN_DISCONNECT_COMPLETE);
785 }
786
787 if (conn->state != BT_CONN_DISCONNECTED) {
788 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
789 }
790 }
791
bt_conn_cleanup_all(void)792 void bt_conn_cleanup_all(void)
793 {
794 bt_conn_foreach(BT_CONN_TYPE_ALL, conn_destroy, NULL);
795 }
796
797 #if defined(CONFIG_BT_CONN)
798 /* Returns true if L2CAP has data to send on this conn */
acl_has_data(struct bt_conn * conn)799 static bool acl_has_data(struct bt_conn *conn)
800 {
801 return sys_slist_peek_head(&conn->l2cap_data_ready) != NULL;
802 }
803 #endif /* defined(CONFIG_BT_CONN) */
804
805 /* Connection "Scheduler" of sorts:
806 *
807 * Will try to get the optimal number of queued buffers for the connection.
808 *
809 * Partitions the controller's buffers to each connection according to some
810 * heuristic. This is made to be tunable, fairness, simplicity, throughput etc.
811 *
812 * In the future, this will be a hook exposed to the application.
813 */
should_stop_tx(struct bt_conn * conn)814 static bool should_stop_tx(struct bt_conn *conn)
815 {
816 LOG_DBG("%p", conn);
817
818 if (conn->state != BT_CONN_CONNECTED) {
819 return true;
820 }
821
822 /* TODO: This function should be overridable by the application: they
823 * should be able to provide their own heuristic.
824 */
825 if (!conn->has_data(conn)) {
826 LOG_DBG("No more data for %p", conn);
827 return true;
828 }
829
830 /* Queue only 3 buffers per-conn for now */
831 if (atomic_get(&conn->in_ll) < 3) {
832 /* The goal of this heuristic is to allow the link-layer to
833 * extend an ACL connection event as long as the application
834 * layer can provide data.
835 *
836 * Here we chose three buffers, as some LLs need two enqueued
837 * packets to be able to set the more-data bit, and one more
838 * buffer to allow refilling by the app while one of them is
839 * being sent over-the-air.
840 */
841 return false;
842 }
843
844 return true;
845 }
846
bt_conn_data_ready(struct bt_conn * conn)847 void bt_conn_data_ready(struct bt_conn *conn)
848 {
849 LOG_DBG("DR");
850
851 /* The TX processor will call the `pull_cb` to get the buf */
852 if (!atomic_set(&conn->_conn_ready_lock, 1)) {
853 /* Attach a reference to the `bt_dev.le.conn_ready` list.
854 *
855 * This reference will be consumed when the conn is popped off
856 * the list (in `get_conn_ready`).
857 */
858 bt_conn_ref(conn);
859 sys_slist_append(&bt_dev.le.conn_ready,
860 &conn->_conn_ready);
861 LOG_DBG("raised");
862 } else {
863 LOG_DBG("already in list");
864 }
865
866 /* Kick the TX processor */
867 bt_tx_irq_raise();
868 }
869
cannot_send_to_controller(struct bt_conn * conn)870 static bool cannot_send_to_controller(struct bt_conn *conn)
871 {
872 return k_sem_count_get(bt_conn_get_pkts(conn)) == 0;
873 }
874
dont_have_viewbufs(void)875 static bool dont_have_viewbufs(void)
876 {
877 #if defined(CONFIG_BT_CONN_TX)
878 /* The LIFO only tracks buffers that have been destroyed at least once,
879 * hence the uninit check beforehand.
880 */
881 if (fragments.uninit_count > 0) {
882 /* If there are uninitialized bufs, we are guaranteed allocation. */
883 return false;
884 }
885
886 /* In practice k_fifo == k_lifo ABI. */
887 return k_fifo_is_empty(&fragments.free);
888
889 #else /* !CONFIG_BT_CONN_TX */
890 return false;
891 #endif /* CONFIG_BT_CONN_TX */
892 }
893
dont_have_methods(struct bt_conn * conn)894 __maybe_unused static bool dont_have_methods(struct bt_conn *conn)
895 {
896 return (conn->tx_data_pull == NULL) ||
897 (conn->get_and_clear_cb == NULL) ||
898 (conn->has_data == NULL);
899 }
900
get_conn_ready(void)901 struct bt_conn *get_conn_ready(void)
902 {
903 /* Here we only peek: we pop the conn (and insert it at the back if it
904 * still has data) after the QoS function returns false.
905 */
906 sys_snode_t *node = sys_slist_peek_head(&bt_dev.le.conn_ready);
907
908 if (node == NULL) {
909 return NULL;
910 }
911
912 /* `conn` borrows from the list node. That node is _not_ popped yet.
913 *
914 * If we end up not popping that conn off the list, we have to make sure
915 * to increase the refcount before returning a pointer to that
916 * connection out of this function.
917 */
918 struct bt_conn *conn = CONTAINER_OF(node, struct bt_conn, _conn_ready);
919
920 if (dont_have_viewbufs()) {
921 /* We will get scheduled again when the (view) buffers are freed. If you
922 * hit this a lot, try increasing `CONFIG_BT_CONN_FRAG_COUNT`
923 */
924 LOG_DBG("no view bufs");
925 return NULL;
926 }
927
928 if (cannot_send_to_controller(conn)) {
929 /* We will get scheduled again when the buffers are freed. */
930 LOG_DBG("no LL bufs for %p", conn);
931 return NULL;
932 }
933
934 if (dont_have_tx_context(conn)) {
935 /* We will get scheduled again when TX contexts are available. */
936 LOG_DBG("no TX contexts");
937 return NULL;
938 }
939
940 CHECKIF(dont_have_methods(conn)) {
941 LOG_DBG("conn %p (type %d) is missing mandatory methods",
942 conn, conn->type);
943
944 return NULL;
945 }
946
947 if (should_stop_tx(conn)) {
948 /* Move reference off the list and into the `conn` variable. */
949 __maybe_unused sys_snode_t *s = sys_slist_get(&bt_dev.le.conn_ready);
950
951 __ASSERT_NO_MSG(s == node);
952 (void)atomic_set(&conn->_conn_ready_lock, 0);
953
954 /* Append connection to list if it still has data */
955 if (conn->has_data(conn)) {
956 LOG_DBG("appending %p to back of TX queue", conn);
957 bt_conn_data_ready(conn);
958 }
959
960 return conn;
961 }
962
963 return bt_conn_ref(conn);
964 }
965
966 /* Crazy that this file is compiled even if this is not true, but here we are. */
967 #if defined(CONFIG_BT_CONN)
acl_get_and_clear_cb(struct bt_conn * conn,struct net_buf * buf,bt_conn_tx_cb_t * cb,void ** ud)968 static void acl_get_and_clear_cb(struct bt_conn *conn, struct net_buf *buf,
969 bt_conn_tx_cb_t *cb, void **ud)
970 {
971 __ASSERT_NO_MSG(is_acl_conn(conn));
972
973 *cb = closure_cb(buf->user_data);
974 *ud = closure_data(buf->user_data);
975 memset(buf->user_data, 0, buf->user_data_size);
976 }
977 #endif /* defined(CONFIG_BT_CONN) */
978
979 /* Acts as a "null-routed" bt_send(). This fn will decrease the refcount of
980 * `buf` and call the user callback with an error code.
981 */
destroy_and_callback(struct bt_conn * conn,struct net_buf * buf,bt_conn_tx_cb_t cb,void * ud)982 static void destroy_and_callback(struct bt_conn *conn,
983 struct net_buf *buf,
984 bt_conn_tx_cb_t cb,
985 void *ud)
986 {
987 if (!cb) {
988 conn->get_and_clear_cb(conn, buf, &cb, &ud);
989 }
990
991 LOG_DBG("pop: cb %p userdata %p", cb, ud);
992
993 /* bt_send() would've done an unref. Do it here also, so the buffer is
994 * hopefully destroyed and the user callback can allocate a new one.
995 */
996 net_buf_unref(buf);
997
998 if (cb) {
999 cb(conn, ud, -ESHUTDOWN);
1000 }
1001 }
1002
1003 static volatile bool _suspend_tx;
1004
1005 #if defined(CONFIG_BT_TESTING)
bt_conn_suspend_tx(bool suspend)1006 void bt_conn_suspend_tx(bool suspend)
1007 {
1008 _suspend_tx = suspend;
1009
1010 LOG_DBG("%sing all data TX", suspend ? "suspend" : "resum");
1011
1012 bt_tx_irq_raise();
1013 }
1014 #endif /* CONFIG_BT_TESTING */
1015
bt_conn_tx_processor(void)1016 void bt_conn_tx_processor(void)
1017 {
1018 LOG_DBG("start");
1019 struct bt_conn *conn;
1020 struct net_buf *buf;
1021 bt_conn_tx_cb_t cb = NULL;
1022 size_t buf_len;
1023 void *ud = NULL;
1024
1025 if (!IS_ENABLED(CONFIG_BT_CONN_TX)) {
1026 /* Mom, can we have a real compiler? */
1027 return;
1028 }
1029
1030 if (IS_ENABLED(CONFIG_BT_TESTING) && _suspend_tx) {
1031 return;
1032 }
1033
1034 conn = get_conn_ready();
1035
1036 if (!conn) {
1037 LOG_DBG("no connection wants to do stuff");
1038 return;
1039 }
1040
1041 LOG_DBG("processing conn %p", conn);
1042
1043 if (conn->state != BT_CONN_CONNECTED) {
1044 LOG_WRN("conn %p: not connected", conn);
1045
1046 /* Call the user callbacks & destroy (final-unref) the buffers
1047 * we were supposed to send.
1048 */
1049 buf = conn->tx_data_pull(conn, SIZE_MAX, &buf_len);
1050 while (buf) {
1051 destroy_and_callback(conn, buf, cb, ud);
1052 buf = conn->tx_data_pull(conn, SIZE_MAX, &buf_len);
1053 }
1054
1055 goto exit;
1056 }
1057
1058 /* now that we are guaranteed resources, we can pull data from the upper
1059 * layer (L2CAP or ISO).
1060 */
1061 buf = conn->tx_data_pull(conn, conn_mtu(conn), &buf_len);
1062 if (!buf) {
1063 /* Either there is no more data, or the buffer is already in-use
1064 * by a view on it. In both cases, the TX processor will be
1065 * triggered again, either by the view's destroy callback, or by
1066 * the upper layer when it has more data.
1067 */
1068 LOG_DBG("no buf returned");
1069
1070 goto exit;
1071 }
1072
1073 bool last_buf = conn_mtu(conn) >= buf_len;
1074
1075 if (last_buf) {
1076 /* Only pull the callback info from the last buffer.
1077 * We still allocate one TX context per-fragment though.
1078 */
1079 conn->get_and_clear_cb(conn, buf, &cb, &ud);
1080 LOG_DBG("pop: cb %p userdata %p", cb, ud);
1081 }
1082
1083 LOG_DBG("TX process: conn %p buf %p (%s)",
1084 conn, buf, last_buf ? "last" : "frag");
1085
1086 int err = send_buf(conn, buf, buf_len, cb, ud);
1087
1088 if (err) {
1089 /* -EIO means `unrecoverable error`. It can be an assertion that
1090 * failed or an error from the HCI driver.
1091 *
1092 * -ENOMEM means we thought we had all the resources to send the
1093 * buf (ie. TX context + controller buffer) but one of them was
1094 * not available. This is likely due to a failure of
1095 * assumption, likely that we have been pre-empted somehow and
1096 * that `tx_processor()` has been re-entered.
1097 *
1098 * In both cases, we destroy the buffer and mark the connection
1099 * as dead.
1100 */
1101 LOG_ERR("Fatal error (%d). Disconnecting %p", err, conn);
1102 destroy_and_callback(conn, buf, cb, ud);
1103 bt_conn_disconnect(conn, BT_HCI_ERR_REMOTE_USER_TERM_CONN);
1104
1105 goto exit;
1106 }
1107
1108 /* Always kick the TX work. It will self-suspend if it doesn't get
1109 * resources or there is nothing left to send.
1110 */
1111 bt_tx_irq_raise();
1112
1113 exit:
1114 /* Give back the ref that `get_conn_ready()` gave us */
1115 bt_conn_unref(conn);
1116 }
1117
process_unack_tx(struct bt_conn * conn)1118 static void process_unack_tx(struct bt_conn *conn)
1119 {
1120 LOG_DBG("%p", conn);
1121
1122 /* Return any unacknowledged packets */
1123 while (1) {
1124 struct bt_conn_tx *tx;
1125 sys_snode_t *node;
1126
1127 node = sys_slist_get(&conn->tx_pending);
1128
1129 if (!node) {
1130 bt_tx_irq_raise();
1131 return;
1132 }
1133
1134 tx = CONTAINER_OF(node, struct bt_conn_tx, node);
1135
1136 conn_tx_destroy(conn, tx);
1137 k_sem_give(bt_conn_get_pkts(conn));
1138 }
1139 }
1140
conn_lookup_handle(struct bt_conn * conns,size_t size,uint16_t handle)1141 struct bt_conn *conn_lookup_handle(struct bt_conn *conns, size_t size,
1142 uint16_t handle)
1143 {
1144 int i;
1145
1146 for (i = 0; i < size; i++) {
1147 struct bt_conn *conn = bt_conn_ref(&conns[i]);
1148
1149 if (!conn) {
1150 continue;
1151 }
1152
1153 /* We only care about connections with a valid handle */
1154 if (!bt_conn_is_handle_valid(conn)) {
1155 bt_conn_unref(conn);
1156 continue;
1157 }
1158
1159 if (conn->handle != handle) {
1160 bt_conn_unref(conn);
1161 continue;
1162 }
1163
1164 return conn;
1165 }
1166
1167 return NULL;
1168 }
1169
bt_conn_set_state(struct bt_conn * conn,bt_conn_state_t state)1170 void bt_conn_set_state(struct bt_conn *conn, bt_conn_state_t state)
1171 {
1172 bt_conn_state_t old_state;
1173
1174 LOG_DBG("%s -> %s", state2str(conn->state), state2str(state));
1175
1176 if (conn->state == state) {
1177 LOG_WRN("no transition %s", state2str(state));
1178 return;
1179 }
1180
1181 old_state = conn->state;
1182 conn->state = state;
1183
1184 /* Actions needed for exiting the old state */
1185 switch (old_state) {
1186 case BT_CONN_DISCONNECTED:
1187 /* Take a reference for the first state transition after
1188 * bt_conn_add_le() and keep it until reaching DISCONNECTED
1189 * again.
1190 */
1191 if (conn->type != BT_CONN_TYPE_ISO) {
1192 bt_conn_ref(conn);
1193 }
1194 break;
1195 case BT_CONN_INITIATING:
1196 if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1197 conn->type == BT_CONN_TYPE_LE) {
1198 k_work_cancel_delayable(&conn->deferred_work);
1199 }
1200 break;
1201 default:
1202 break;
1203 }
1204
1205 /* Actions needed for entering the new state */
1206 switch (conn->state) {
1207 case BT_CONN_CONNECTED:
1208 if (conn->type == BT_CONN_TYPE_SCO) {
1209 if (IS_ENABLED(CONFIG_BT_CLASSIC)) {
1210 bt_sco_connected(conn);
1211 }
1212 break;
1213 }
1214 k_poll_signal_raise(&conn_change, 0);
1215
1216 if (IS_ENABLED(CONFIG_BT_ISO) &&
1217 conn->type == BT_CONN_TYPE_ISO) {
1218 bt_iso_connected(conn);
1219 break;
1220 }
1221
1222 #if defined(CONFIG_BT_CONN)
1223 sys_slist_init(&conn->channels);
1224
1225 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
1226 conn->role == BT_CONN_ROLE_PERIPHERAL) {
1227
1228 #if defined(CONFIG_BT_GAP_AUTO_UPDATE_CONN_PARAMS)
1229 if (conn->type == BT_CONN_TYPE_LE) {
1230 conn->le.conn_param_retry_countdown =
1231 CONFIG_BT_CONN_PARAM_RETRY_COUNT;
1232 }
1233 #endif /* CONFIG_BT_GAP_AUTO_UPDATE_CONN_PARAMS */
1234
1235 k_work_schedule(&conn->deferred_work,
1236 CONN_UPDATE_TIMEOUT);
1237 }
1238 #endif /* CONFIG_BT_CONN */
1239
1240 break;
1241 case BT_CONN_DISCONNECTED:
1242 #if defined(CONFIG_BT_CONN)
1243 if (conn->type == BT_CONN_TYPE_SCO) {
1244 if (IS_ENABLED(CONFIG_BT_CLASSIC)) {
1245 bt_sco_disconnected(conn);
1246 }
1247 bt_conn_unref(conn);
1248 break;
1249 }
1250
1251 /* Notify disconnection and queue a dummy buffer to wake
1252 * up and stop the tx thread for states where it was
1253 * running.
1254 */
1255 switch (old_state) {
1256 case BT_CONN_DISCONNECT_COMPLETE:
1257 /* Any previously scheduled deferred work now becomes invalid
1258 * so cancel it here, before we yield to tx thread.
1259 */
1260 k_work_cancel_delayable(&conn->deferred_work);
1261
1262 bt_conn_tx_notify(conn, true);
1263
1264 bt_conn_reset_rx_state(conn);
1265
1266 LOG_DBG("trigger disconnect work");
1267 k_work_reschedule(&conn->deferred_work, K_NO_WAIT);
1268
1269 /* The last ref will be dropped during cleanup */
1270 break;
1271 case BT_CONN_INITIATING:
1272 /* LE Create Connection command failed. This might be
1273 * directly from the API, don't notify application in
1274 * this case.
1275 */
1276 if (conn->err) {
1277 notify_connected(conn);
1278 }
1279
1280 bt_conn_unref(conn);
1281 break;
1282 case BT_CONN_SCAN_BEFORE_INITIATING:
1283 /* This indicates that connection establishment
1284 * has been stopped. This could either be triggered by
1285 * the application through bt_conn_disconnect or by
1286 * timeout set by bt_conn_le_create_param.timeout.
1287 */
1288 if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
1289 int err = bt_le_scan_user_remove(BT_LE_SCAN_USER_CONN);
1290
1291 if (err) {
1292 LOG_WRN("Error while removing conn user from scanner (%d)",
1293 err);
1294 }
1295
1296 if (conn->err) {
1297 notify_connected(conn);
1298 }
1299 }
1300 bt_conn_unref(conn);
1301 break;
1302 case BT_CONN_ADV_DIR_CONNECTABLE:
1303 /* this indicate Directed advertising stopped */
1304 if (conn->err) {
1305 notify_connected(conn);
1306 }
1307
1308 bt_conn_unref(conn);
1309 break;
1310 case BT_CONN_INITIATING_FILTER_LIST:
1311 /* this indicates LE Create Connection with filter
1312 * policy has been stopped. This can only be triggered
1313 * by the application, so don't notify.
1314 */
1315 bt_conn_unref(conn);
1316 break;
1317 case BT_CONN_ADV_CONNECTABLE:
1318 /* This can only happen when application stops the
1319 * advertiser, conn->err is never set in this case.
1320 */
1321 bt_conn_unref(conn);
1322 break;
1323 case BT_CONN_CONNECTED:
1324 case BT_CONN_DISCONNECTING:
1325 case BT_CONN_DISCONNECTED:
1326 /* Cannot happen. */
1327 LOG_WRN("Invalid (%u) old state", state);
1328 break;
1329 }
1330 break;
1331 case BT_CONN_INITIATING_FILTER_LIST:
1332 break;
1333 case BT_CONN_ADV_CONNECTABLE:
1334 break;
1335 case BT_CONN_SCAN_BEFORE_INITIATING:
1336 break;
1337 case BT_CONN_ADV_DIR_CONNECTABLE:
1338 break;
1339 case BT_CONN_INITIATING:
1340 if (conn->type == BT_CONN_TYPE_SCO) {
1341 break;
1342 }
1343 /*
1344 * Timer is needed only for LE. For other link types controller
1345 * will handle connection timeout.
1346 */
1347 if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1348 conn->type == BT_CONN_TYPE_LE &&
1349 bt_dev.create_param.timeout != 0) {
1350 k_work_schedule(&conn->deferred_work,
1351 K_MSEC(10 * bt_dev.create_param.timeout));
1352 }
1353
1354 break;
1355 case BT_CONN_DISCONNECTING:
1356 break;
1357 #endif /* CONFIG_BT_CONN */
1358 case BT_CONN_DISCONNECT_COMPLETE:
1359 if (conn->err == BT_HCI_ERR_CONN_FAIL_TO_ESTAB) {
1360 /* No ACK or data was ever received. The peripheral may be
1361 * unaware of the connection attempt.
1362 *
1363 * Beware of confusing higher layer errors. Anything that looks
1364 * like it's from the remote is synthetic.
1365 */
1366 LOG_WRN("conn %p failed to establish. RF noise?", conn);
1367 }
1368
1369 process_unack_tx(conn);
1370 break;
1371 default:
1372 LOG_WRN("no valid (%u) state was set", state);
1373
1374 break;
1375 }
1376 }
1377
bt_conn_lookup_handle(uint16_t handle,enum bt_conn_type type)1378 struct bt_conn *bt_conn_lookup_handle(uint16_t handle, enum bt_conn_type type)
1379 {
1380 struct bt_conn *conn;
1381
1382 #if defined(CONFIG_BT_CONN)
1383 conn = conn_lookup_handle(acl_conns, ARRAY_SIZE(acl_conns), handle);
1384 if (conn) {
1385 goto found;
1386 }
1387 #endif /* CONFIG_BT_CONN */
1388
1389 #if defined(CONFIG_BT_ISO)
1390 conn = conn_lookup_handle(iso_conns, ARRAY_SIZE(iso_conns), handle);
1391 if (conn) {
1392 goto found;
1393 }
1394 #endif
1395
1396 #if defined(CONFIG_BT_CLASSIC)
1397 conn = conn_lookup_handle(sco_conns, ARRAY_SIZE(sco_conns), handle);
1398 if (conn) {
1399 goto found;
1400 }
1401 #endif
1402
1403 found:
1404 if (conn) {
1405 if (type & conn->type) {
1406 return conn;
1407 }
1408 LOG_WRN("incompatible handle %u", handle);
1409 bt_conn_unref(conn);
1410 }
1411 return NULL;
1412 }
1413
bt_hci_conn_lookup_handle(uint16_t handle)1414 struct bt_conn *bt_hci_conn_lookup_handle(uint16_t handle)
1415 {
1416 return bt_conn_lookup_handle(handle, BT_CONN_TYPE_ALL);
1417 }
1418
bt_conn_foreach(enum bt_conn_type type,void (* func)(struct bt_conn * conn,void * data),void * data)1419 void bt_conn_foreach(enum bt_conn_type type,
1420 void (*func)(struct bt_conn *conn, void *data),
1421 void *data)
1422 {
1423 int i;
1424
1425 #if defined(CONFIG_BT_CONN)
1426 for (i = 0; i < ARRAY_SIZE(acl_conns); i++) {
1427 struct bt_conn *conn = bt_conn_ref(&acl_conns[i]);
1428
1429 if (!conn) {
1430 continue;
1431 }
1432
1433 if (!(conn->type & type)) {
1434 bt_conn_unref(conn);
1435 continue;
1436 }
1437
1438 func(conn, data);
1439 bt_conn_unref(conn);
1440 }
1441 #if defined(CONFIG_BT_CLASSIC)
1442 if (type & BT_CONN_TYPE_SCO) {
1443 for (i = 0; i < ARRAY_SIZE(sco_conns); i++) {
1444 struct bt_conn *conn = bt_conn_ref(&sco_conns[i]);
1445
1446 if (!conn) {
1447 continue;
1448 }
1449
1450 func(conn, data);
1451 bt_conn_unref(conn);
1452 }
1453 }
1454 #endif /* defined(CONFIG_BT_CLASSIC) */
1455 #endif /* CONFIG_BT_CONN */
1456
1457 #if defined(CONFIG_BT_ISO)
1458 if (type & BT_CONN_TYPE_ISO) {
1459 for (i = 0; i < ARRAY_SIZE(iso_conns); i++) {
1460 struct bt_conn *conn = bt_conn_ref(&iso_conns[i]);
1461
1462 if (!conn) {
1463 continue;
1464 }
1465
1466 func(conn, data);
1467 bt_conn_unref(conn);
1468 }
1469 }
1470 #endif /* defined(CONFIG_BT_ISO) */
1471 }
1472
bt_conn_ref(struct bt_conn * conn)1473 struct bt_conn *bt_conn_ref(struct bt_conn *conn)
1474 {
1475 atomic_val_t old;
1476
1477 __ASSERT_NO_MSG(conn);
1478
1479 /* Reference counter must be checked to avoid incrementing ref from
1480 * zero, then we should return NULL instead.
1481 * Loop on clear-and-set in case someone has modified the reference
1482 * count since the read, and start over again when that happens.
1483 */
1484 do {
1485 old = atomic_get(&conn->ref);
1486
1487 if (!old) {
1488 return NULL;
1489 }
1490 } while (!atomic_cas(&conn->ref, old, old + 1));
1491
1492 LOG_DBG("handle %u ref %ld -> %ld", conn->handle, old, old + 1);
1493
1494 return conn;
1495 }
1496
1497 static K_SEM_DEFINE(pending_recycled_events, 0, K_SEM_MAX_LIMIT);
1498
recycled_work_handler(struct k_work * work)1499 static void recycled_work_handler(struct k_work *work)
1500 {
1501 if (k_sem_take(&pending_recycled_events, K_NO_WAIT) == 0) {
1502 notify_recycled_conn_slot();
1503 k_work_submit(work);
1504 }
1505 }
1506
1507 static K_WORK_DEFINE(recycled_work, recycled_work_handler);
1508
bt_conn_unref(struct bt_conn * conn)1509 void bt_conn_unref(struct bt_conn *conn)
1510 {
1511 atomic_val_t old;
1512 bool deallocated;
1513 enum bt_conn_type conn_type;
1514 uint8_t conn_role;
1515 uint16_t conn_handle;
1516
1517 __ASSERT(conn, "Invalid connection reference");
1518
1519 /* Storing parameters of interest so we don't access the object
1520 * after decrementing its ref-count
1521 */
1522 conn_type = conn->type;
1523 conn_role = conn->role;
1524 conn_handle = conn->handle;
1525
1526 old = atomic_dec(&conn->ref);
1527 /* Prevent from accessing connection object */
1528 conn = NULL;
1529 deallocated = (atomic_get(&old) == 1);
1530
1531 LOG_DBG("handle %u ref %ld -> %ld", conn_handle, old, (old - 1));
1532
1533 __ASSERT(old > 0, "Conn reference counter is 0");
1534
1535 /* Slot has been freed and can be taken. No guarantees are made on requests
1536 * to claim connection object as only the first claim will be served.
1537 */
1538 if (deallocated) {
1539 k_sem_give(&pending_recycled_events);
1540 k_work_submit(&recycled_work);
1541 }
1542
1543 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn_type == BT_CONN_TYPE_LE &&
1544 conn_role == BT_CONN_ROLE_PERIPHERAL && deallocated) {
1545 bt_le_adv_resume();
1546 }
1547 }
1548
bt_conn_index(const struct bt_conn * conn)1549 uint8_t bt_conn_index(const struct bt_conn *conn)
1550 {
1551 ptrdiff_t index = 0;
1552
1553 switch (conn->type) {
1554 #if defined(CONFIG_BT_ISO)
1555 case BT_CONN_TYPE_ISO:
1556 index = conn - iso_conns;
1557 __ASSERT(index >= 0 && index < ARRAY_SIZE(iso_conns),
1558 "Invalid bt_conn pointer");
1559 break;
1560 #endif
1561 #if defined(CONFIG_BT_CLASSIC)
1562 case BT_CONN_TYPE_SCO:
1563 index = conn - sco_conns;
1564 __ASSERT(index >= 0 && index < ARRAY_SIZE(sco_conns),
1565 "Invalid bt_conn pointer");
1566 break;
1567 #endif
1568 default:
1569 #if defined(CONFIG_BT_CONN)
1570 index = conn - acl_conns;
1571 __ASSERT(index >= 0 && index < ARRAY_SIZE(acl_conns),
1572 "Invalid bt_conn pointer");
1573 #else
1574 __ASSERT(false, "Invalid connection type %u", conn->type);
1575 #endif /* CONFIG_BT_CONN */
1576 break;
1577 }
1578
1579 return (uint8_t)index;
1580 }
1581
1582
1583 #if defined(CONFIG_NET_BUF_LOG)
bt_conn_create_pdu_timeout_debug(struct net_buf_pool * pool,size_t reserve,k_timeout_t timeout,const char * func,int line)1584 struct net_buf *bt_conn_create_pdu_timeout_debug(struct net_buf_pool *pool,
1585 size_t reserve,
1586 k_timeout_t timeout,
1587 const char *func, int line)
1588 #else
1589 struct net_buf *bt_conn_create_pdu_timeout(struct net_buf_pool *pool,
1590 size_t reserve, k_timeout_t timeout)
1591 #endif
1592 {
1593 struct net_buf *buf;
1594
1595 /*
1596 * PDU must not be allocated from ISR as we block with 'K_FOREVER'
1597 * during the allocation
1598 */
1599 __ASSERT_NO_MSG(!k_is_in_isr());
1600
1601 if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
1602 k_current_get() == k_work_queue_thread_get(&k_sys_work_q)) {
1603 LOG_WRN("Timeout discarded. No blocking in syswq.");
1604 timeout = K_NO_WAIT;
1605 }
1606
1607 if (!pool) {
1608 #if defined(CONFIG_BT_CONN)
1609 pool = &acl_tx_pool;
1610 #else
1611 return NULL;
1612 #endif /* CONFIG_BT_CONN */
1613 }
1614
1615 if (IS_ENABLED(CONFIG_BT_CONN_LOG_LEVEL_DBG)) {
1616 #if defined(CONFIG_NET_BUF_LOG)
1617 buf = net_buf_alloc_fixed_debug(pool, K_NO_WAIT, func, line);
1618 #else
1619 buf = net_buf_alloc(pool, K_NO_WAIT);
1620 #endif
1621 if (!buf) {
1622 LOG_WRN("Unable to allocate buffer with K_NO_WAIT");
1623 #if defined(CONFIG_NET_BUF_LOG)
1624 buf = net_buf_alloc_fixed_debug(pool, timeout, func,
1625 line);
1626 #else
1627 buf = net_buf_alloc(pool, timeout);
1628 #endif
1629 }
1630 } else {
1631 #if defined(CONFIG_NET_BUF_LOG)
1632 buf = net_buf_alloc_fixed_debug(pool, timeout, func,
1633 line);
1634 #else
1635 buf = net_buf_alloc(pool, timeout);
1636 #endif
1637 }
1638
1639 if (!buf) {
1640 LOG_WRN("Unable to allocate buffer within timeout");
1641 return NULL;
1642 }
1643
1644 reserve += sizeof(struct bt_hci_acl_hdr) + BT_BUF_RESERVE;
1645 net_buf_reserve(buf, reserve);
1646
1647 return buf;
1648 }
1649
1650 #if defined(CONFIG_BT_CONN_TX)
tx_complete_work(struct k_work * work)1651 static void tx_complete_work(struct k_work *work)
1652 {
1653 struct bt_conn *conn = CONTAINER_OF(work, struct bt_conn, tx_complete_work);
1654
1655 tx_notify_process(conn);
1656 }
1657 #endif /* CONFIG_BT_CONN_TX */
1658
notify_recycled_conn_slot(void)1659 static void notify_recycled_conn_slot(void)
1660 {
1661 #if defined(CONFIG_BT_CONN)
1662 struct bt_conn_cb *callback;
1663
1664 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
1665 if (callback->recycled) {
1666 callback->recycled();
1667 }
1668 }
1669
1670 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
1671 if (cb->recycled) {
1672 cb->recycled();
1673 }
1674 }
1675 #endif
1676 }
1677
1678 #if !defined(CONFIG_BT_CONN)
bt_conn_disconnect(struct bt_conn * conn,uint8_t reason)1679 int bt_conn_disconnect(struct bt_conn *conn, uint8_t reason)
1680 {
1681 ARG_UNUSED(conn);
1682 ARG_UNUSED(reason);
1683
1684 /* Dummy implementation to satisfy the compiler */
1685
1686 return 0;
1687 }
1688 #endif /* !CONFIG_BT_CONN */
1689
1690 /* Group Connected BT_CONN only in this */
1691 #if defined(CONFIG_BT_CONN)
1692
1693 /* We don't want the application to get a PHY update callback upon connection
1694 * establishment on 2M PHY. Therefore we must prevent issuing LE Set PHY
1695 * in this scenario.
1696 *
1697 * It is ifdef'd because the struct fields don't exist in some configs.
1698 */
uses_symmetric_2mbit_phy(struct bt_conn * conn)1699 static bool uses_symmetric_2mbit_phy(struct bt_conn *conn)
1700 {
1701 #if defined(CONFIG_BT_USER_PHY_UPDATE)
1702 if (IS_ENABLED(CONFIG_BT_EXT_ADV)) {
1703 if (conn->le.phy.tx_phy == BT_HCI_LE_PHY_2M &&
1704 conn->le.phy.rx_phy == BT_HCI_LE_PHY_2M) {
1705 return true;
1706 }
1707 }
1708 #else
1709 ARG_UNUSED(conn);
1710 #endif
1711
1712 return false;
1713 }
1714
can_initiate_feature_exchange(struct bt_conn * conn)1715 static bool can_initiate_feature_exchange(struct bt_conn *conn)
1716 {
1717 /* Spec says both central and peripheral can send the command. However,
1718 * peripheral-initiated feature exchange is an optional feature.
1719 *
1720 * We provide an optimization if we are in the same image as the
1721 * controller, as we know at compile time whether it supports or not
1722 * peripheral feature exchange.
1723 */
1724
1725 if (IS_ENABLED(CONFIG_BT_CENTRAL) && (conn->role == BT_HCI_ROLE_CENTRAL)) {
1726 return true;
1727 }
1728
1729 if (IS_ENABLED(CONFIG_HAS_BT_CTLR) && IS_ENABLED(CONFIG_BT_CTLR_PER_INIT_FEAT_XCHG)) {
1730 return true;
1731 }
1732
1733 return BT_FEAT_LE_PER_INIT_FEAT_XCHG(bt_dev.le.features);
1734 }
1735
perform_auto_initiated_procedures(struct bt_conn * conn,void * unused)1736 static void perform_auto_initiated_procedures(struct bt_conn *conn, void *unused)
1737 {
1738 int err;
1739
1740 ARG_UNUSED(unused);
1741
1742 LOG_DBG("[%p] Running auto-initiated procedures", conn);
1743
1744 if (conn->state != BT_CONN_CONNECTED) {
1745 /* It is possible that connection was disconnected directly from
1746 * connected callback so we must check state before doing
1747 * connection parameters update.
1748 */
1749 return;
1750 }
1751
1752 if (atomic_test_and_set_bit(conn->flags, BT_CONN_AUTO_INIT_PROCEDURES_DONE)) {
1753 /* We have already run the auto-initiated procedures */
1754 return;
1755 }
1756
1757 if (!atomic_test_bit(conn->flags, BT_CONN_LE_FEATURES_EXCHANGED) &&
1758 can_initiate_feature_exchange(conn)) {
1759 err = bt_hci_le_read_remote_features(conn);
1760 if (err) {
1761 LOG_ERR("Failed read remote features (%d)", err);
1762 }
1763 if (conn->state != BT_CONN_CONNECTED) {
1764 return;
1765 }
1766 }
1767
1768 if (IS_ENABLED(CONFIG_BT_REMOTE_VERSION) &&
1769 !atomic_test_bit(conn->flags, BT_CONN_AUTO_VERSION_INFO)) {
1770 err = bt_hci_read_remote_version(conn);
1771 if (err) {
1772 LOG_ERR("Failed read remote version (%d)", err);
1773 }
1774 if (conn->state != BT_CONN_CONNECTED) {
1775 return;
1776 }
1777 }
1778
1779 if (IS_ENABLED(CONFIG_BT_AUTO_PHY_UPDATE) && BT_FEAT_LE_PHY_2M(bt_dev.le.features) &&
1780 !uses_symmetric_2mbit_phy(conn)) {
1781 err = bt_le_set_phy(conn, 0U, BT_HCI_LE_PHY_PREFER_2M, BT_HCI_LE_PHY_PREFER_2M,
1782 BT_HCI_LE_PHY_CODED_ANY);
1783 if (err) {
1784 LOG_ERR("Failed LE Set PHY (%d)", err);
1785 }
1786 if (conn->state != BT_CONN_CONNECTED) {
1787 return;
1788 }
1789 }
1790
1791 /* Data length should be automatically updated to the maximum by the
1792 * controller. Not updating it is a quirk and this is the workaround.
1793 */
1794 if (IS_ENABLED(CONFIG_BT_AUTO_DATA_LEN_UPDATE) && BT_FEAT_LE_DLE(bt_dev.le.features) &&
1795 bt_drv_quirk_no_auto_dle()) {
1796 uint16_t tx_octets, tx_time;
1797
1798 err = bt_hci_le_read_max_data_len(&tx_octets, &tx_time);
1799 if (!err) {
1800 err = bt_le_set_data_len(conn, tx_octets, tx_time);
1801 if (err) {
1802 LOG_ERR("Failed to set data len (%d)", err);
1803 }
1804 }
1805 }
1806
1807 LOG_DBG("[%p] Successfully ran auto-initiated procedures", conn);
1808 }
1809
1810 /* Executes procedures after a connection is established:
1811 * - read remote features
1812 * - read remote version
1813 * - update PHY
1814 * - update data length
1815 */
auto_initiated_procedures(struct k_work * unused)1816 static void auto_initiated_procedures(struct k_work *unused)
1817 {
1818 ARG_UNUSED(unused);
1819
1820 bt_conn_foreach(BT_CONN_TYPE_LE, perform_auto_initiated_procedures, NULL);
1821 }
1822
1823 static K_WORK_DEFINE(procedures_on_connect, auto_initiated_procedures);
1824
schedule_auto_initiated_procedures(struct bt_conn * conn)1825 static void schedule_auto_initiated_procedures(struct bt_conn *conn)
1826 {
1827 LOG_DBG("[%p] Scheduling auto-init procedures", conn);
1828 k_work_submit(&procedures_on_connect);
1829 }
1830
bt_conn_connected(struct bt_conn * conn)1831 void bt_conn_connected(struct bt_conn *conn)
1832 {
1833 schedule_auto_initiated_procedures(conn);
1834 bt_l2cap_connected(conn);
1835 notify_connected(conn);
1836 }
1837
conn_disconnect(struct bt_conn * conn,uint8_t reason)1838 static int conn_disconnect(struct bt_conn *conn, uint8_t reason)
1839 {
1840 int err;
1841
1842 err = bt_hci_disconnect(conn->handle, reason);
1843 if (err) {
1844 return err;
1845 }
1846
1847 if (conn->state == BT_CONN_CONNECTED) {
1848 bt_conn_set_state(conn, BT_CONN_DISCONNECTING);
1849 }
1850
1851 return 0;
1852 }
1853
bt_conn_disconnect(struct bt_conn * conn,uint8_t reason)1854 int bt_conn_disconnect(struct bt_conn *conn, uint8_t reason)
1855 {
1856 switch (conn->state) {
1857 case BT_CONN_SCAN_BEFORE_INITIATING:
1858 conn->err = reason;
1859 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
1860 if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
1861 return bt_le_scan_user_add(BT_LE_SCAN_USER_CONN);
1862 }
1863 return 0;
1864 case BT_CONN_INITIATING:
1865 if (conn->type == BT_CONN_TYPE_LE) {
1866 if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
1867 k_work_cancel_delayable(&conn->deferred_work);
1868 return bt_le_create_conn_cancel();
1869 }
1870 }
1871 #if defined(CONFIG_BT_ISO)
1872 else if (conn->type == BT_CONN_TYPE_ISO) {
1873 return conn_disconnect(conn, reason);
1874 }
1875 #endif /* CONFIG_BT_ISO */
1876 #if defined(CONFIG_BT_CLASSIC)
1877 else if (conn->type == BT_CONN_TYPE_BR) {
1878 return bt_hci_connect_br_cancel(conn);
1879 }
1880 #endif /* CONFIG_BT_CLASSIC */
1881 else {
1882 __ASSERT(false, "Invalid conn type %u", conn->type);
1883 }
1884
1885 return 0;
1886 case BT_CONN_CONNECTED:
1887 return conn_disconnect(conn, reason);
1888 case BT_CONN_DISCONNECTING:
1889 return 0;
1890 case BT_CONN_DISCONNECTED:
1891 default:
1892 return -ENOTCONN;
1893 }
1894 }
1895
notify_connected(struct bt_conn * conn)1896 static void notify_connected(struct bt_conn *conn)
1897 {
1898 struct bt_conn_cb *callback;
1899
1900 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
1901
1902 if (callback->connected) {
1903 callback->connected(conn, conn->err);
1904 }
1905 }
1906
1907 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
1908 if (cb->connected) {
1909 cb->connected(conn, conn->err);
1910 }
1911 }
1912 }
1913
notify_disconnected(struct bt_conn * conn)1914 static void notify_disconnected(struct bt_conn *conn)
1915 {
1916 struct bt_conn_cb *callback;
1917
1918 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
1919 if (callback->disconnected) {
1920 callback->disconnected(conn, conn->err);
1921 }
1922 }
1923
1924 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
1925 if (cb->disconnected) {
1926 cb->disconnected(conn, conn->err);
1927 }
1928 }
1929 }
1930
1931 #if defined(CONFIG_BT_REMOTE_INFO)
notify_remote_info(struct bt_conn * conn)1932 void notify_remote_info(struct bt_conn *conn)
1933 {
1934 struct bt_conn_remote_info remote_info;
1935 int err;
1936
1937 err = bt_conn_get_remote_info(conn, &remote_info);
1938 if (err) {
1939 LOG_DBG("Notify remote info failed %d", err);
1940 return;
1941 }
1942
1943 struct bt_conn_cb *callback;
1944
1945 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
1946 if (callback->remote_info_available) {
1947 callback->remote_info_available(conn, &remote_info);
1948 }
1949 }
1950
1951 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
1952 if (cb->remote_info_available) {
1953 cb->remote_info_available(conn, &remote_info);
1954 }
1955 }
1956 }
1957 #endif /* defined(CONFIG_BT_REMOTE_INFO) */
1958
notify_le_param_updated(struct bt_conn * conn)1959 void notify_le_param_updated(struct bt_conn *conn)
1960 {
1961 /* If new connection parameters meet requirement of pending
1962 * parameters don't send peripheral conn param request anymore on timeout
1963 */
1964 if (atomic_test_bit(conn->flags, BT_CONN_PERIPHERAL_PARAM_SET) &&
1965 conn->le.interval >= conn->le.interval_min &&
1966 conn->le.interval <= conn->le.interval_max &&
1967 conn->le.latency == conn->le.pending_latency &&
1968 conn->le.timeout == conn->le.pending_timeout) {
1969 atomic_clear_bit(conn->flags, BT_CONN_PERIPHERAL_PARAM_SET);
1970 }
1971
1972 struct bt_conn_cb *callback;
1973
1974 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
1975 if (callback->le_param_updated) {
1976 callback->le_param_updated(conn, conn->le.interval,
1977 conn->le.latency,
1978 conn->le.timeout);
1979 }
1980 }
1981
1982 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
1983 if (cb->le_param_updated) {
1984 cb->le_param_updated(conn, conn->le.interval,
1985 conn->le.latency,
1986 conn->le.timeout);
1987 }
1988 }
1989 }
1990
1991 #if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
notify_le_data_len_updated(struct bt_conn * conn)1992 void notify_le_data_len_updated(struct bt_conn *conn)
1993 {
1994 struct bt_conn_cb *callback;
1995
1996 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
1997 if (callback->le_data_len_updated) {
1998 callback->le_data_len_updated(conn, &conn->le.data_len);
1999 }
2000 }
2001
2002 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
2003 if (cb->le_data_len_updated) {
2004 cb->le_data_len_updated(conn, &conn->le.data_len);
2005 }
2006 }
2007 }
2008 #endif
2009
2010 #if defined(CONFIG_BT_USER_PHY_UPDATE)
notify_le_phy_updated(struct bt_conn * conn)2011 void notify_le_phy_updated(struct bt_conn *conn)
2012 {
2013 struct bt_conn_cb *callback;
2014
2015 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
2016 if (callback->le_phy_updated) {
2017 callback->le_phy_updated(conn, &conn->le.phy);
2018 }
2019 }
2020
2021 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
2022 if (cb->le_phy_updated) {
2023 cb->le_phy_updated(conn, &conn->le.phy);
2024 }
2025 }
2026 }
2027 #endif
2028
le_param_req(struct bt_conn * conn,struct bt_le_conn_param * param)2029 bool le_param_req(struct bt_conn *conn, struct bt_le_conn_param *param)
2030 {
2031 if (!bt_le_conn_params_valid(param)) {
2032 return false;
2033 }
2034
2035 struct bt_conn_cb *callback;
2036
2037 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
2038 if (!callback->le_param_req) {
2039 continue;
2040 }
2041
2042 if (!callback->le_param_req(conn, param)) {
2043 return false;
2044 }
2045
2046 /* The callback may modify the parameters so we need to
2047 * double-check that it returned valid parameters.
2048 */
2049 if (!bt_le_conn_params_valid(param)) {
2050 return false;
2051 }
2052 }
2053
2054 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
2055 if (!cb->le_param_req) {
2056 continue;
2057 }
2058
2059 if (!cb->le_param_req(conn, param)) {
2060 return false;
2061 }
2062
2063 /* The callback may modify the parameters so we need to
2064 * double-check that it returned valid parameters.
2065 */
2066 if (!bt_le_conn_params_valid(param)) {
2067 return false;
2068 }
2069 }
2070
2071 /* Default to accepting if there's no app callback */
2072 return true;
2073 }
2074
send_conn_le_param_update(struct bt_conn * conn,const struct bt_le_conn_param * param)2075 static int send_conn_le_param_update(struct bt_conn *conn,
2076 const struct bt_le_conn_param *param)
2077 {
2078 LOG_DBG("conn %p features 0x%02x params (%d-%d %d %d)", conn, conn->le.features[0],
2079 param->interval_min, param->interval_max, param->latency, param->timeout);
2080
2081 /* Proceed only if connection parameters contains valid values*/
2082 if (!bt_le_conn_params_valid(param)) {
2083 return -EINVAL;
2084 }
2085
2086 /* Use LE connection parameter request if both local and remote support
2087 * it; or if local role is central then use LE connection update.
2088 */
2089 if ((BT_FEAT_LE_CONN_PARAM_REQ_PROC(bt_dev.le.features) &&
2090 BT_FEAT_LE_CONN_PARAM_REQ_PROC(conn->le.features) &&
2091 !atomic_test_bit(conn->flags, BT_CONN_PERIPHERAL_PARAM_L2CAP)) ||
2092 (conn->role == BT_HCI_ROLE_CENTRAL)) {
2093 int rc;
2094
2095 rc = bt_conn_le_conn_update(conn, param);
2096
2097 /* store those in case of fallback to L2CAP */
2098 if (rc == 0) {
2099 conn->le.interval_min = param->interval_min;
2100 conn->le.interval_max = param->interval_max;
2101 conn->le.pending_latency = param->latency;
2102 conn->le.pending_timeout = param->timeout;
2103 }
2104
2105 return rc;
2106 }
2107
2108 /* If remote central does not support LL Connection Parameters Request
2109 * Procedure
2110 */
2111 return bt_l2cap_update_conn_param(conn, param);
2112 }
2113
2114 #if defined(CONFIG_BT_ISO_UNICAST)
conn_lookup_iso(struct bt_conn * conn)2115 static struct bt_conn *conn_lookup_iso(struct bt_conn *conn)
2116 {
2117 int i;
2118
2119 for (i = 0; i < ARRAY_SIZE(iso_conns); i++) {
2120 struct bt_conn *iso = bt_conn_ref(&iso_conns[i]);
2121
2122 if (iso == NULL) {
2123 continue;
2124 }
2125
2126 if (iso->iso.acl == conn) {
2127 return iso;
2128 }
2129
2130 bt_conn_unref(iso);
2131 }
2132
2133 return NULL;
2134 }
2135 #endif /* CONFIG_BT_ISO */
2136
2137 #if defined(CONFIG_BT_CLASSIC)
conn_lookup_sco(struct bt_conn * conn)2138 static struct bt_conn *conn_lookup_sco(struct bt_conn *conn)
2139 {
2140 int i;
2141
2142 for (i = 0; i < ARRAY_SIZE(sco_conns); i++) {
2143 struct bt_conn *sco = bt_conn_ref(&sco_conns[i]);
2144
2145 if (sco == NULL) {
2146 continue;
2147 }
2148
2149 if (sco->sco.acl == conn) {
2150 return sco;
2151 }
2152
2153 bt_conn_unref(sco);
2154 }
2155
2156 return NULL;
2157 }
2158 #endif /* CONFIG_BT_CLASSIC */
2159
deferred_work(struct k_work * work)2160 static void deferred_work(struct k_work *work)
2161 {
2162 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
2163 struct bt_conn *conn = CONTAINER_OF(dwork, struct bt_conn, deferred_work);
2164 const struct bt_le_conn_param *param;
2165
2166 LOG_DBG("conn %p", conn);
2167
2168 if (conn->state == BT_CONN_DISCONNECTED) {
2169 #if defined(CONFIG_BT_ISO_UNICAST)
2170 struct bt_conn *iso;
2171
2172 if (conn->type == BT_CONN_TYPE_ISO) {
2173 /* bt_iso_disconnected is responsible for unref'ing the
2174 * connection pointer, as it is conditional on whether
2175 * the connection is a central or peripheral.
2176 */
2177 bt_iso_disconnected(conn);
2178 return;
2179 }
2180
2181 /* Mark all ISO channels associated
2182 * with ACL conn as not connected, and
2183 * remove ACL reference
2184 */
2185 iso = conn_lookup_iso(conn);
2186 while (iso != NULL) {
2187 struct bt_iso_chan *chan = iso->iso.chan;
2188
2189 if (chan != NULL) {
2190 bt_iso_chan_set_state(chan,
2191 BT_ISO_STATE_DISCONNECTING);
2192 }
2193
2194 bt_iso_cleanup_acl(iso);
2195
2196 bt_conn_unref(iso);
2197 iso = conn_lookup_iso(conn);
2198 }
2199 #endif
2200 #if defined(CONFIG_BT_CLASSIC)
2201 struct bt_conn *sco;
2202
2203 /* Mark all SCO channels associated
2204 * with ACL conn as not connected, and
2205 * remove ACL reference
2206 */
2207 sco = conn_lookup_sco(conn);
2208 while (sco != NULL) {
2209 struct bt_sco_chan *chan = sco->sco.chan;
2210
2211 if (chan != NULL) {
2212 bt_sco_chan_set_state(chan,
2213 BT_SCO_STATE_DISCONNECTING);
2214 }
2215
2216 bt_sco_cleanup_acl(sco);
2217
2218 bt_conn_unref(sco);
2219 sco = conn_lookup_sco(conn);
2220 }
2221 #endif /* CONFIG_BT_CLASSIC */
2222 bt_l2cap_disconnected(conn);
2223 notify_disconnected(conn);
2224
2225 /* Release the reference we took for the very first
2226 * state transition.
2227 */
2228 bt_conn_unref(conn);
2229 return;
2230 }
2231
2232 if (conn->type != BT_CONN_TYPE_LE) {
2233 return;
2234 }
2235
2236 if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
2237 conn->role == BT_CONN_ROLE_CENTRAL) {
2238 /* we don't call bt_conn_disconnect as it would also clear
2239 * auto connect flag if it was set, instead just cancel
2240 * connection directly
2241 */
2242 bt_le_create_conn_cancel();
2243 return;
2244 }
2245
2246 /* if application set own params use those, otherwise use defaults. */
2247 if (atomic_test_and_clear_bit(conn->flags,
2248 BT_CONN_PERIPHERAL_PARAM_SET)) {
2249 int err;
2250
2251 param = BT_LE_CONN_PARAM(conn->le.interval_min,
2252 conn->le.interval_max,
2253 conn->le.pending_latency,
2254 conn->le.pending_timeout);
2255
2256 err = send_conn_le_param_update(conn, param);
2257 if (!err) {
2258 atomic_clear_bit(conn->flags,
2259 BT_CONN_PERIPHERAL_PARAM_AUTO_UPDATE);
2260 } else {
2261 LOG_WRN("Send LE param update failed (err %d)", err);
2262 }
2263 } else if (IS_ENABLED(CONFIG_BT_GAP_AUTO_UPDATE_CONN_PARAMS)) {
2264 #if defined(CONFIG_BT_GAP_PERIPHERAL_PREF_PARAMS)
2265 int err;
2266
2267 param = BT_LE_CONN_PARAM(
2268 CONFIG_BT_PERIPHERAL_PREF_MIN_INT,
2269 CONFIG_BT_PERIPHERAL_PREF_MAX_INT,
2270 CONFIG_BT_PERIPHERAL_PREF_LATENCY,
2271 CONFIG_BT_PERIPHERAL_PREF_TIMEOUT);
2272
2273 err = send_conn_le_param_update(conn, param);
2274 if (!err) {
2275 atomic_set_bit(conn->flags,
2276 BT_CONN_PERIPHERAL_PARAM_AUTO_UPDATE);
2277 } else {
2278 LOG_WRN("Send auto LE param update failed (err %d)",
2279 err);
2280 }
2281 #endif
2282 }
2283
2284 atomic_set_bit(conn->flags, BT_CONN_PERIPHERAL_PARAM_UPDATE);
2285 }
2286
acl_conn_new(void)2287 static struct bt_conn *acl_conn_new(void)
2288 {
2289 return bt_conn_new(acl_conns, ARRAY_SIZE(acl_conns));
2290 }
2291
2292 #if defined(CONFIG_BT_CLASSIC)
bt_sco_cleanup(struct bt_conn * sco_conn)2293 void bt_sco_cleanup(struct bt_conn *sco_conn)
2294 {
2295 bt_sco_cleanup_acl(sco_conn);
2296 bt_conn_unref(sco_conn);
2297 }
2298
sco_conn_new(void)2299 static struct bt_conn *sco_conn_new(void)
2300 {
2301 return bt_conn_new(sco_conns, ARRAY_SIZE(sco_conns));
2302 }
2303
bt_conn_create_br(const bt_addr_t * peer,const struct bt_br_conn_param * param)2304 struct bt_conn *bt_conn_create_br(const bt_addr_t *peer,
2305 const struct bt_br_conn_param *param)
2306 {
2307 struct bt_hci_cp_connect *cp;
2308 struct bt_conn *conn;
2309 struct net_buf *buf;
2310
2311 conn = bt_conn_lookup_addr_br(peer);
2312 if (conn) {
2313 switch (conn->state) {
2314 case BT_CONN_INITIATING:
2315 case BT_CONN_CONNECTED:
2316 return conn;
2317 default:
2318 bt_conn_unref(conn);
2319 return NULL;
2320 }
2321 }
2322
2323 conn = bt_conn_add_br(peer);
2324 if (!conn) {
2325 return NULL;
2326 }
2327
2328 buf = bt_hci_cmd_create(BT_HCI_OP_CONNECT, sizeof(*cp));
2329 if (!buf) {
2330 bt_conn_unref(conn);
2331 return NULL;
2332 }
2333
2334 cp = net_buf_add(buf, sizeof(*cp));
2335
2336 (void)memset(cp, 0, sizeof(*cp));
2337
2338 memcpy(&cp->bdaddr, peer, sizeof(cp->bdaddr));
2339 cp->packet_type = sys_cpu_to_le16(0xcc18); /* DM1 DH1 DM3 DH5 DM5 DH5 */
2340 cp->pscan_rep_mode = 0x02; /* R2 */
2341 cp->allow_role_switch = param->allow_role_switch ? 0x01 : 0x00;
2342 cp->clock_offset = 0x0000; /* TODO used cached clock offset */
2343
2344 if (bt_hci_cmd_send_sync(BT_HCI_OP_CONNECT, buf, NULL) < 0) {
2345 bt_conn_unref(conn);
2346 return NULL;
2347 }
2348
2349 bt_conn_set_state(conn, BT_CONN_INITIATING);
2350 conn->role = BT_CONN_ROLE_CENTRAL;
2351
2352 return conn;
2353 }
2354
bt_conn_lookup_addr_sco(const bt_addr_t * peer)2355 struct bt_conn *bt_conn_lookup_addr_sco(const bt_addr_t *peer)
2356 {
2357 int i;
2358
2359 for (i = 0; i < ARRAY_SIZE(sco_conns); i++) {
2360 struct bt_conn *conn = bt_conn_ref(&sco_conns[i]);
2361
2362 if (!conn) {
2363 continue;
2364 }
2365
2366 if (conn->type != BT_CONN_TYPE_SCO) {
2367 bt_conn_unref(conn);
2368 continue;
2369 }
2370
2371 if (!bt_addr_eq(peer, &conn->sco.acl->br.dst)) {
2372 bt_conn_unref(conn);
2373 continue;
2374 }
2375
2376 return conn;
2377 }
2378
2379 return NULL;
2380 }
2381
bt_conn_lookup_addr_br(const bt_addr_t * peer)2382 struct bt_conn *bt_conn_lookup_addr_br(const bt_addr_t *peer)
2383 {
2384 int i;
2385
2386 for (i = 0; i < ARRAY_SIZE(acl_conns); i++) {
2387 struct bt_conn *conn = bt_conn_ref(&acl_conns[i]);
2388
2389 if (!conn) {
2390 continue;
2391 }
2392
2393 if (conn->type != BT_CONN_TYPE_BR) {
2394 bt_conn_unref(conn);
2395 continue;
2396 }
2397
2398 if (!bt_addr_eq(peer, &conn->br.dst)) {
2399 bt_conn_unref(conn);
2400 continue;
2401 }
2402
2403 return conn;
2404 }
2405
2406 return NULL;
2407 }
2408
bt_conn_add_sco(const bt_addr_t * peer,int link_type)2409 struct bt_conn *bt_conn_add_sco(const bt_addr_t *peer, int link_type)
2410 {
2411 struct bt_conn *sco_conn = sco_conn_new();
2412
2413 if (!sco_conn) {
2414 return NULL;
2415 }
2416
2417 sco_conn->sco.acl = bt_conn_lookup_addr_br(peer);
2418 if (!sco_conn->sco.acl) {
2419 bt_conn_unref(sco_conn);
2420 return NULL;
2421 }
2422
2423 sco_conn->type = BT_CONN_TYPE_SCO;
2424
2425 if (link_type == BT_HCI_SCO) {
2426 if (BT_FEAT_LMP_ESCO_CAPABLE(bt_dev.features)) {
2427 sco_conn->sco.pkt_type = (bt_dev.br.esco_pkt_type &
2428 ESCO_PKT_MASK);
2429 } else {
2430 sco_conn->sco.pkt_type = (bt_dev.br.esco_pkt_type &
2431 SCO_PKT_MASK);
2432 }
2433 } else if (link_type == BT_HCI_ESCO) {
2434 sco_conn->sco.pkt_type = (bt_dev.br.esco_pkt_type &
2435 ~EDR_ESCO_PKT_MASK);
2436 }
2437
2438 return sco_conn;
2439 }
2440
bt_conn_add_br(const bt_addr_t * peer)2441 struct bt_conn *bt_conn_add_br(const bt_addr_t *peer)
2442 {
2443 struct bt_conn *conn = acl_conn_new();
2444
2445 if (!conn) {
2446 return NULL;
2447 }
2448
2449 bt_addr_copy(&conn->br.dst, peer);
2450 conn->type = BT_CONN_TYPE_BR;
2451 conn->tx_data_pull = l2cap_br_data_pull;
2452 conn->get_and_clear_cb = acl_get_and_clear_cb;
2453 conn->has_data = acl_has_data;
2454
2455 return conn;
2456 }
2457
bt_hci_connect_br_cancel(struct bt_conn * conn)2458 static int bt_hci_connect_br_cancel(struct bt_conn *conn)
2459 {
2460 struct bt_hci_cp_connect_cancel *cp;
2461 struct bt_hci_rp_connect_cancel *rp;
2462 struct net_buf *buf, *rsp;
2463 int err;
2464
2465 buf = bt_hci_cmd_create(BT_HCI_OP_CONNECT_CANCEL, sizeof(*cp));
2466 if (!buf) {
2467 return -ENOBUFS;
2468 }
2469
2470 cp = net_buf_add(buf, sizeof(*cp));
2471 memcpy(&cp->bdaddr, &conn->br.dst, sizeof(cp->bdaddr));
2472
2473 err = bt_hci_cmd_send_sync(BT_HCI_OP_CONNECT_CANCEL, buf, &rsp);
2474 if (err) {
2475 return err;
2476 }
2477
2478 rp = (void *)rsp->data;
2479
2480 err = rp->status ? -EIO : 0;
2481
2482 net_buf_unref(rsp);
2483
2484 return err;
2485 }
2486
2487 #endif /* CONFIG_BT_CLASSIC */
2488
2489 #if defined(CONFIG_BT_SMP)
bt_conn_ltk_present(const struct bt_conn * conn)2490 bool bt_conn_ltk_present(const struct bt_conn *conn)
2491 {
2492 const struct bt_keys *keys = conn->le.keys;
2493
2494 if (!keys) {
2495 keys = bt_keys_find_addr(conn->id, &conn->le.dst);
2496 }
2497
2498 if (keys) {
2499 if (conn->role == BT_HCI_ROLE_CENTRAL) {
2500 return keys->keys & (BT_KEYS_LTK_P256 | BT_KEYS_PERIPH_LTK);
2501 } else {
2502 return keys->keys & (BT_KEYS_LTK_P256 | BT_KEYS_LTK);
2503 }
2504 }
2505
2506 return false;
2507 }
2508
bt_conn_identity_resolved(struct bt_conn * conn)2509 void bt_conn_identity_resolved(struct bt_conn *conn)
2510 {
2511 const bt_addr_le_t *rpa;
2512
2513 if (conn->role == BT_HCI_ROLE_CENTRAL) {
2514 rpa = &conn->le.resp_addr;
2515 } else {
2516 rpa = &conn->le.init_addr;
2517 }
2518
2519
2520 struct bt_conn_cb *callback;
2521
2522 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
2523 if (callback->identity_resolved) {
2524 callback->identity_resolved(conn, rpa, &conn->le.dst);
2525 }
2526 }
2527
2528 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
2529 if (cb->identity_resolved) {
2530 cb->identity_resolved(conn, rpa, &conn->le.dst);
2531 }
2532 }
2533 }
2534
bt_conn_le_start_encryption(struct bt_conn * conn,uint8_t rand[8],uint8_t ediv[2],const uint8_t * ltk,size_t len)2535 int bt_conn_le_start_encryption(struct bt_conn *conn, uint8_t rand[8],
2536 uint8_t ediv[2], const uint8_t *ltk, size_t len)
2537 {
2538 struct bt_hci_cp_le_start_encryption *cp;
2539 struct net_buf *buf;
2540
2541 if (len > sizeof(cp->ltk)) {
2542 return -EINVAL;
2543 }
2544
2545 buf = bt_hci_cmd_create(BT_HCI_OP_LE_START_ENCRYPTION, sizeof(*cp));
2546 if (!buf) {
2547 return -ENOBUFS;
2548 }
2549
2550 cp = net_buf_add(buf, sizeof(*cp));
2551 cp->handle = sys_cpu_to_le16(conn->handle);
2552 memcpy(&cp->rand, rand, sizeof(cp->rand));
2553 memcpy(&cp->ediv, ediv, sizeof(cp->ediv));
2554
2555 memcpy(cp->ltk, ltk, len);
2556 if (len < sizeof(cp->ltk)) {
2557 (void)memset(cp->ltk + len, 0, sizeof(cp->ltk) - len);
2558 }
2559
2560 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_START_ENCRYPTION, buf, NULL);
2561 }
2562 #endif /* CONFIG_BT_SMP */
2563
2564 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC)
bt_conn_enc_key_size(const struct bt_conn * conn)2565 uint8_t bt_conn_enc_key_size(const struct bt_conn *conn)
2566 {
2567 if (!conn->encrypt) {
2568 return 0;
2569 }
2570
2571 if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
2572 conn->type == BT_CONN_TYPE_BR) {
2573 struct bt_hci_cp_read_encryption_key_size *cp;
2574 struct bt_hci_rp_read_encryption_key_size *rp;
2575 struct net_buf *buf;
2576 struct net_buf *rsp;
2577 uint8_t key_size;
2578
2579 buf = bt_hci_cmd_create(BT_HCI_OP_READ_ENCRYPTION_KEY_SIZE,
2580 sizeof(*cp));
2581 if (!buf) {
2582 return 0;
2583 }
2584
2585 cp = net_buf_add(buf, sizeof(*cp));
2586 cp->handle = sys_cpu_to_le16(conn->handle);
2587
2588 if (bt_hci_cmd_send_sync(BT_HCI_OP_READ_ENCRYPTION_KEY_SIZE,
2589 buf, &rsp)) {
2590 return 0;
2591 }
2592
2593 rp = (void *)rsp->data;
2594
2595 key_size = rp->status ? 0 : rp->key_size;
2596
2597 net_buf_unref(rsp);
2598
2599 return key_size;
2600 }
2601
2602 if (IS_ENABLED(CONFIG_BT_SMP)) {
2603 return conn->le.keys ? conn->le.keys->enc_size : 0;
2604 }
2605
2606 return 0;
2607 }
2608
reset_pairing(struct bt_conn * conn)2609 static void reset_pairing(struct bt_conn *conn)
2610 {
2611 #if defined(CONFIG_BT_CLASSIC)
2612 if (conn->type == BT_CONN_TYPE_BR) {
2613 atomic_clear_bit(conn->flags, BT_CONN_BR_PAIRING);
2614 atomic_clear_bit(conn->flags, BT_CONN_BR_PAIRED);
2615 atomic_clear_bit(conn->flags, BT_CONN_BR_PAIRING_INITIATOR);
2616 atomic_clear_bit(conn->flags, BT_CONN_BR_LEGACY_SECURE);
2617 atomic_clear_bit(conn->flags, BT_CONN_BR_GENERAL_BONDING);
2618 }
2619 #endif /* CONFIG_BT_CLASSIC */
2620
2621 /* Reset required security level to current operational */
2622 conn->required_sec_level = conn->sec_level;
2623 }
2624
bt_conn_security_changed(struct bt_conn * conn,uint8_t hci_err,enum bt_security_err err)2625 void bt_conn_security_changed(struct bt_conn *conn, uint8_t hci_err,
2626 enum bt_security_err err)
2627 {
2628 reset_pairing(conn);
2629 bt_l2cap_security_changed(conn, hci_err);
2630 if (IS_ENABLED(CONFIG_BT_ISO_CENTRAL)) {
2631 bt_iso_security_changed(conn, hci_err);
2632 }
2633
2634 struct bt_conn_cb *callback;
2635
2636 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
2637 if (callback->security_changed) {
2638 callback->security_changed(conn, conn->sec_level, err);
2639 }
2640 }
2641
2642 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
2643 if (cb->security_changed) {
2644 cb->security_changed(conn, conn->sec_level, err);
2645 }
2646 }
2647
2648 #if defined(CONFIG_BT_KEYS_OVERWRITE_OLDEST)
2649 if (!err && conn->sec_level >= BT_SECURITY_L2) {
2650 if (conn->type == BT_CONN_TYPE_LE) {
2651 bt_keys_update_usage(conn->id, bt_conn_get_dst(conn));
2652 }
2653
2654 #if defined(CONFIG_BT_CLASSIC)
2655 if (conn->type == BT_CONN_TYPE_BR) {
2656 bt_keys_link_key_update_usage(&conn->br.dst);
2657 }
2658 #endif /* CONFIG_BT_CLASSIC */
2659
2660 }
2661 #endif
2662 }
2663
start_security(struct bt_conn * conn)2664 static int start_security(struct bt_conn *conn)
2665 {
2666 if (IS_ENABLED(CONFIG_BT_CLASSIC) && conn->type == BT_CONN_TYPE_BR) {
2667 return bt_ssp_start_security(conn);
2668 }
2669
2670 if (IS_ENABLED(CONFIG_BT_SMP)) {
2671 return bt_smp_start_security(conn);
2672 }
2673
2674 return -EINVAL;
2675 }
2676
bt_conn_set_security(struct bt_conn * conn,bt_security_t sec)2677 int bt_conn_set_security(struct bt_conn *conn, bt_security_t sec)
2678 {
2679 bool force_pair;
2680 int err;
2681
2682 if (conn->state != BT_CONN_CONNECTED) {
2683 return -ENOTCONN;
2684 }
2685
2686 force_pair = sec & BT_SECURITY_FORCE_PAIR;
2687 sec &= ~BT_SECURITY_FORCE_PAIR;
2688
2689 if (IS_ENABLED(CONFIG_BT_SMP_SC_ONLY)) {
2690 sec = BT_SECURITY_L4;
2691 }
2692
2693 if (IS_ENABLED(CONFIG_BT_SMP_OOB_LEGACY_PAIR_ONLY)) {
2694 sec = BT_SECURITY_L3;
2695 }
2696
2697 /* nothing to do */
2698 if (!force_pair && (conn->sec_level >= sec || conn->required_sec_level >= sec)) {
2699 return 0;
2700 }
2701
2702 atomic_set_bit_to(conn->flags, BT_CONN_FORCE_PAIR, force_pair);
2703 conn->required_sec_level = sec;
2704
2705 err = start_security(conn);
2706
2707 /* reset required security level in case of error */
2708 if (err) {
2709 conn->required_sec_level = conn->sec_level;
2710 }
2711
2712 return err;
2713 }
2714
bt_conn_get_security(const struct bt_conn * conn)2715 bt_security_t bt_conn_get_security(const struct bt_conn *conn)
2716 {
2717 return conn->sec_level;
2718 }
2719 #else
bt_conn_get_security(const struct bt_conn * conn)2720 bt_security_t bt_conn_get_security(const struct bt_conn *conn)
2721 {
2722 return BT_SECURITY_L1;
2723 }
2724 #endif /* CONFIG_BT_SMP */
2725
bt_conn_cb_register(struct bt_conn_cb * cb)2726 int bt_conn_cb_register(struct bt_conn_cb *cb)
2727 {
2728 if (sys_slist_find(&conn_cbs, &cb->_node, NULL)) {
2729 return -EEXIST;
2730 }
2731
2732 sys_slist_append(&conn_cbs, &cb->_node);
2733
2734 return 0;
2735 }
2736
bt_conn_cb_unregister(struct bt_conn_cb * cb)2737 int bt_conn_cb_unregister(struct bt_conn_cb *cb)
2738 {
2739 CHECKIF(cb == NULL) {
2740 return -EINVAL;
2741 }
2742
2743 if (!sys_slist_find_and_remove(&conn_cbs, &cb->_node)) {
2744 return -ENOENT;
2745 }
2746
2747 return 0;
2748 }
2749
bt_conn_exists_le(uint8_t id,const bt_addr_le_t * peer)2750 bool bt_conn_exists_le(uint8_t id, const bt_addr_le_t *peer)
2751 {
2752 struct bt_conn *conn = bt_conn_lookup_addr_le(id, peer);
2753
2754 if (conn) {
2755 /* Connection object already exists.
2756 * If the connection state is not "disconnected",then the
2757 * connection was created but has not yet been disconnected.
2758 * If the connection state is "disconnected" then the connection
2759 * still has valid references. The last reference of the stack
2760 * is released after the disconnected callback.
2761 */
2762 LOG_WRN("Found valid connection (%p) with address %s in %s state ", conn,
2763 bt_addr_le_str(peer), state2str(conn->state));
2764 bt_conn_unref(conn);
2765 return true;
2766 }
2767
2768 return false;
2769 }
2770
bt_conn_add_le(uint8_t id,const bt_addr_le_t * peer)2771 struct bt_conn *bt_conn_add_le(uint8_t id, const bt_addr_le_t *peer)
2772 {
2773 struct bt_conn *conn = acl_conn_new();
2774
2775 if (!conn) {
2776 return NULL;
2777 }
2778
2779 conn->id = id;
2780 bt_addr_le_copy(&conn->le.dst, peer);
2781 #if defined(CONFIG_BT_SMP)
2782 conn->sec_level = BT_SECURITY_L1;
2783 conn->required_sec_level = BT_SECURITY_L1;
2784 #endif /* CONFIG_BT_SMP */
2785 conn->type = BT_CONN_TYPE_LE;
2786 conn->tx_data_pull = l2cap_data_pull;
2787 conn->get_and_clear_cb = acl_get_and_clear_cb;
2788 conn->has_data = acl_has_data;
2789 conn->le.interval_min = BT_GAP_INIT_CONN_INT_MIN;
2790 conn->le.interval_max = BT_GAP_INIT_CONN_INT_MAX;
2791
2792 return conn;
2793 }
2794
bt_conn_is_peer_addr_le(const struct bt_conn * conn,uint8_t id,const bt_addr_le_t * peer)2795 bool bt_conn_is_peer_addr_le(const struct bt_conn *conn, uint8_t id,
2796 const bt_addr_le_t *peer)
2797 {
2798 if (id != conn->id) {
2799 return false;
2800 }
2801
2802 /* Check against conn dst address as it may be the identity address */
2803 if (bt_addr_le_eq(peer, &conn->le.dst)) {
2804 return true;
2805 }
2806
2807 /* Check against initial connection address */
2808 if (conn->role == BT_HCI_ROLE_CENTRAL) {
2809 return bt_addr_le_eq(peer, &conn->le.resp_addr);
2810 }
2811
2812 return bt_addr_le_eq(peer, &conn->le.init_addr);
2813 }
2814
bt_conn_lookup_addr_le(uint8_t id,const bt_addr_le_t * peer)2815 struct bt_conn *bt_conn_lookup_addr_le(uint8_t id, const bt_addr_le_t *peer)
2816 {
2817 int i;
2818
2819 for (i = 0; i < ARRAY_SIZE(acl_conns); i++) {
2820 struct bt_conn *conn = bt_conn_ref(&acl_conns[i]);
2821
2822 if (!conn) {
2823 continue;
2824 }
2825
2826 if (conn->type != BT_CONN_TYPE_LE) {
2827 bt_conn_unref(conn);
2828 continue;
2829 }
2830
2831 if (!bt_conn_is_peer_addr_le(conn, id, peer)) {
2832 bt_conn_unref(conn);
2833 continue;
2834 }
2835
2836 return conn;
2837 }
2838
2839 return NULL;
2840 }
2841
bt_conn_lookup_state_le(uint8_t id,const bt_addr_le_t * peer,const bt_conn_state_t state)2842 struct bt_conn *bt_conn_lookup_state_le(uint8_t id, const bt_addr_le_t *peer,
2843 const bt_conn_state_t state)
2844 {
2845 int i;
2846
2847 for (i = 0; i < ARRAY_SIZE(acl_conns); i++) {
2848 struct bt_conn *conn = bt_conn_ref(&acl_conns[i]);
2849
2850 if (!conn) {
2851 continue;
2852 }
2853
2854 if (conn->type != BT_CONN_TYPE_LE) {
2855 bt_conn_unref(conn);
2856 continue;
2857 }
2858
2859 if (peer && !bt_conn_is_peer_addr_le(conn, id, peer)) {
2860 bt_conn_unref(conn);
2861 continue;
2862 }
2863
2864 if (!(conn->state == state && conn->id == id)) {
2865 bt_conn_unref(conn);
2866 continue;
2867 }
2868
2869 return conn;
2870 }
2871
2872 return NULL;
2873 }
2874
bt_conn_get_dst(const struct bt_conn * conn)2875 const bt_addr_le_t *bt_conn_get_dst(const struct bt_conn *conn)
2876 {
2877 return &conn->le.dst;
2878 }
2879
conn_internal_to_public_state(bt_conn_state_t state)2880 static enum bt_conn_state conn_internal_to_public_state(bt_conn_state_t state)
2881 {
2882 switch (state) {
2883 case BT_CONN_DISCONNECTED:
2884 case BT_CONN_DISCONNECT_COMPLETE:
2885 return BT_CONN_STATE_DISCONNECTED;
2886 case BT_CONN_SCAN_BEFORE_INITIATING:
2887 case BT_CONN_INITIATING_FILTER_LIST:
2888 case BT_CONN_ADV_CONNECTABLE:
2889 case BT_CONN_ADV_DIR_CONNECTABLE:
2890 case BT_CONN_INITIATING:
2891 return BT_CONN_STATE_CONNECTING;
2892 case BT_CONN_CONNECTED:
2893 return BT_CONN_STATE_CONNECTED;
2894 case BT_CONN_DISCONNECTING:
2895 return BT_CONN_STATE_DISCONNECTING;
2896 default:
2897 __ASSERT(false, "Invalid conn state %u", state);
2898 return 0;
2899 }
2900 }
2901
bt_conn_get_info(const struct bt_conn * conn,struct bt_conn_info * info)2902 int bt_conn_get_info(const struct bt_conn *conn, struct bt_conn_info *info)
2903 {
2904 info->type = conn->type;
2905 info->role = conn->role;
2906 info->id = conn->id;
2907 info->state = conn_internal_to_public_state(conn->state);
2908 info->security.flags = 0;
2909 info->security.level = bt_conn_get_security(conn);
2910 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC)
2911 info->security.enc_key_size = bt_conn_enc_key_size(conn);
2912 #else
2913 info->security.enc_key_size = 0;
2914 #endif /* CONFIG_BT_SMP || CONFIG_BT_CLASSIC */
2915
2916 switch (conn->type) {
2917 case BT_CONN_TYPE_LE:
2918 info->le.dst = &conn->le.dst;
2919 info->le.src = &bt_dev.id_addr[conn->id];
2920 if (conn->role == BT_HCI_ROLE_CENTRAL) {
2921 info->le.local = &conn->le.init_addr;
2922 info->le.remote = &conn->le.resp_addr;
2923 } else {
2924 info->le.local = &conn->le.resp_addr;
2925 info->le.remote = &conn->le.init_addr;
2926 }
2927 info->le.interval = conn->le.interval;
2928 info->le.latency = conn->le.latency;
2929 info->le.timeout = conn->le.timeout;
2930 #if defined(CONFIG_BT_USER_PHY_UPDATE)
2931 info->le.phy = &conn->le.phy;
2932 #endif
2933 #if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
2934 info->le.data_len = &conn->le.data_len;
2935 #endif
2936 #if defined(CONFIG_BT_SUBRATING)
2937 info->le.subrate = &conn->le.subrate;
2938 #endif
2939 if (conn->le.keys && (conn->le.keys->flags & BT_KEYS_SC)) {
2940 info->security.flags |= BT_SECURITY_FLAG_SC;
2941 }
2942 if (conn->le.keys && (conn->le.keys->flags & BT_KEYS_OOB)) {
2943 info->security.flags |= BT_SECURITY_FLAG_OOB;
2944 }
2945 return 0;
2946 #if defined(CONFIG_BT_CLASSIC)
2947 case BT_CONN_TYPE_BR:
2948 info->br.dst = &conn->br.dst;
2949 return 0;
2950 #endif
2951 #if defined(CONFIG_BT_ISO)
2952 case BT_CONN_TYPE_ISO:
2953 if (IS_ENABLED(CONFIG_BT_ISO_UNICAST) &&
2954 conn->iso.info.type == BT_ISO_CHAN_TYPE_CONNECTED && conn->iso.acl != NULL) {
2955 info->le.dst = &conn->iso.acl->le.dst;
2956 info->le.src = &bt_dev.id_addr[conn->iso.acl->id];
2957 } else {
2958 info->le.src = BT_ADDR_LE_NONE;
2959 info->le.dst = BT_ADDR_LE_NONE;
2960 }
2961 return 0;
2962 #endif
2963 default:
2964 break;
2965 }
2966
2967 return -EINVAL;
2968 }
2969
bt_conn_get_remote_info(struct bt_conn * conn,struct bt_conn_remote_info * remote_info)2970 int bt_conn_get_remote_info(struct bt_conn *conn,
2971 struct bt_conn_remote_info *remote_info)
2972 {
2973 if (!atomic_test_bit(conn->flags, BT_CONN_LE_FEATURES_EXCHANGED) ||
2974 (IS_ENABLED(CONFIG_BT_REMOTE_VERSION) &&
2975 !atomic_test_bit(conn->flags, BT_CONN_AUTO_VERSION_INFO))) {
2976 return -EBUSY;
2977 }
2978
2979 remote_info->type = conn->type;
2980 #if defined(CONFIG_BT_REMOTE_VERSION)
2981 /* The conn->rv values will be just zeroes if the operation failed */
2982 remote_info->version = conn->rv.version;
2983 remote_info->manufacturer = conn->rv.manufacturer;
2984 remote_info->subversion = conn->rv.subversion;
2985 #else
2986 remote_info->version = 0;
2987 remote_info->manufacturer = 0;
2988 remote_info->subversion = 0;
2989 #endif
2990
2991 switch (conn->type) {
2992 case BT_CONN_TYPE_LE:
2993 remote_info->le.features = conn->le.features;
2994 return 0;
2995 #if defined(CONFIG_BT_CLASSIC)
2996 case BT_CONN_TYPE_BR:
2997 /* TODO: Make sure the HCI commands to read br features and
2998 * extended features has finished. */
2999 return -ENOTSUP;
3000 #endif
3001 default:
3002 return -EINVAL;
3003 }
3004 }
3005
3006 /* Read Transmit Power Level HCI command */
bt_conn_get_tx_power_level(struct bt_conn * conn,uint8_t type,int8_t * tx_power_level)3007 static int bt_conn_get_tx_power_level(struct bt_conn *conn, uint8_t type,
3008 int8_t *tx_power_level)
3009 {
3010 int err;
3011 struct bt_hci_rp_read_tx_power_level *rp;
3012 struct net_buf *rsp;
3013 struct bt_hci_cp_read_tx_power_level *cp;
3014 struct net_buf *buf;
3015
3016 buf = bt_hci_cmd_create(BT_HCI_OP_READ_TX_POWER_LEVEL, sizeof(*cp));
3017 if (!buf) {
3018 return -ENOBUFS;
3019 }
3020
3021 cp = net_buf_add(buf, sizeof(*cp));
3022 cp->type = type;
3023 cp->handle = sys_cpu_to_le16(conn->handle);
3024
3025 err = bt_hci_cmd_send_sync(BT_HCI_OP_READ_TX_POWER_LEVEL, buf, &rsp);
3026 if (err) {
3027 return err;
3028 }
3029
3030 rp = (void *) rsp->data;
3031 *tx_power_level = rp->tx_power_level;
3032 net_buf_unref(rsp);
3033
3034 return 0;
3035 }
3036
3037 #if defined(CONFIG_BT_TRANSMIT_POWER_CONTROL)
notify_tx_power_report(struct bt_conn * conn,struct bt_conn_le_tx_power_report report)3038 void notify_tx_power_report(struct bt_conn *conn,
3039 struct bt_conn_le_tx_power_report report)
3040 {
3041 struct bt_conn_cb *callback;
3042
3043 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3044 if (callback->tx_power_report) {
3045 callback->tx_power_report(conn, &report);
3046 }
3047 }
3048
3049 STRUCT_SECTION_FOREACH(bt_conn_cb, cb)
3050 {
3051 if (cb->tx_power_report) {
3052 cb->tx_power_report(conn, &report);
3053 }
3054 }
3055 }
3056
bt_conn_le_enhanced_get_tx_power_level(struct bt_conn * conn,struct bt_conn_le_tx_power * tx_power)3057 int bt_conn_le_enhanced_get_tx_power_level(struct bt_conn *conn,
3058 struct bt_conn_le_tx_power *tx_power)
3059 {
3060 int err;
3061 struct bt_hci_rp_le_read_tx_power_level *rp;
3062 struct net_buf *rsp;
3063 struct bt_hci_cp_le_read_tx_power_level *cp;
3064 struct net_buf *buf;
3065
3066 if (!tx_power->phy) {
3067 return -EINVAL;
3068 }
3069
3070 buf = bt_hci_cmd_create(BT_HCI_OP_LE_ENH_READ_TX_POWER_LEVEL, sizeof(*cp));
3071 if (!buf) {
3072 return -ENOBUFS;
3073 }
3074
3075 cp = net_buf_add(buf, sizeof(*cp));
3076 cp->handle = sys_cpu_to_le16(conn->handle);
3077 cp->phy = tx_power->phy;
3078
3079 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_ENH_READ_TX_POWER_LEVEL, buf, &rsp);
3080 if (err) {
3081 return err;
3082 }
3083
3084 rp = (void *) rsp->data;
3085 tx_power->phy = rp->phy;
3086 tx_power->current_level = rp->current_tx_power_level;
3087 tx_power->max_level = rp->max_tx_power_level;
3088 net_buf_unref(rsp);
3089
3090 return 0;
3091 }
3092
bt_conn_le_get_remote_tx_power_level(struct bt_conn * conn,enum bt_conn_le_tx_power_phy phy)3093 int bt_conn_le_get_remote_tx_power_level(struct bt_conn *conn,
3094 enum bt_conn_le_tx_power_phy phy)
3095 {
3096 struct bt_hci_cp_le_read_tx_power_level *cp;
3097 struct net_buf *buf;
3098
3099 if (!phy) {
3100 return -EINVAL;
3101 }
3102
3103 buf = bt_hci_cmd_create(BT_HCI_OP_LE_READ_REMOTE_TX_POWER_LEVEL, sizeof(*cp));
3104 if (!buf) {
3105 return -ENOBUFS;
3106 }
3107
3108 cp = net_buf_add(buf, sizeof(*cp));
3109 cp->handle = sys_cpu_to_le16(conn->handle);
3110 cp->phy = phy;
3111
3112 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_REMOTE_TX_POWER_LEVEL, buf, NULL);
3113 }
3114
bt_conn_le_set_tx_power_report_enable(struct bt_conn * conn,bool local_enable,bool remote_enable)3115 int bt_conn_le_set_tx_power_report_enable(struct bt_conn *conn,
3116 bool local_enable,
3117 bool remote_enable)
3118 {
3119 struct bt_hci_cp_le_set_tx_power_report_enable *cp;
3120 struct net_buf *buf;
3121
3122 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_TX_POWER_REPORT_ENABLE, sizeof(*cp));
3123 if (!buf) {
3124 return -ENOBUFS;
3125 }
3126
3127 cp = net_buf_add(buf, sizeof(*cp));
3128 cp->handle = sys_cpu_to_le16(conn->handle);
3129 cp->local_enable = local_enable ? BT_HCI_LE_TX_POWER_REPORT_ENABLE :
3130 BT_HCI_LE_TX_POWER_REPORT_DISABLE;
3131 cp->remote_enable = remote_enable ? BT_HCI_LE_TX_POWER_REPORT_ENABLE :
3132 BT_HCI_LE_TX_POWER_REPORT_DISABLE;
3133
3134 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_TX_POWER_REPORT_ENABLE, buf, NULL);
3135 }
3136 #endif /* CONFIG_BT_TRANSMIT_POWER_CONTROL */
3137
bt_conn_le_get_tx_power_level(struct bt_conn * conn,struct bt_conn_le_tx_power * tx_power_level)3138 int bt_conn_le_get_tx_power_level(struct bt_conn *conn,
3139 struct bt_conn_le_tx_power *tx_power_level)
3140 {
3141 int err;
3142
3143 if (tx_power_level->phy != 0) {
3144 if (IS_ENABLED(CONFIG_BT_TRANSMIT_POWER_CONTROL)) {
3145 return bt_conn_le_enhanced_get_tx_power_level(conn, tx_power_level);
3146 } else {
3147 return -ENOTSUP;
3148 }
3149 }
3150
3151 err = bt_conn_get_tx_power_level(conn, BT_TX_POWER_LEVEL_CURRENT,
3152 &tx_power_level->current_level);
3153 if (err) {
3154 return err;
3155 }
3156
3157 err = bt_conn_get_tx_power_level(conn, BT_TX_POWER_LEVEL_MAX,
3158 &tx_power_level->max_level);
3159 return err;
3160 }
3161
3162 #if defined(CONFIG_BT_PATH_LOSS_MONITORING)
notify_path_loss_threshold_report(struct bt_conn * conn,struct bt_conn_le_path_loss_threshold_report report)3163 void notify_path_loss_threshold_report(struct bt_conn *conn,
3164 struct bt_conn_le_path_loss_threshold_report report)
3165 {
3166 struct bt_conn_cb *callback;
3167
3168 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3169 if (callback->path_loss_threshold_report) {
3170 callback->path_loss_threshold_report(conn, &report);
3171 }
3172 }
3173
3174 STRUCT_SECTION_FOREACH(bt_conn_cb, cb)
3175 {
3176 if (cb->path_loss_threshold_report) {
3177 cb->path_loss_threshold_report(conn, &report);
3178 }
3179 }
3180 }
3181
bt_conn_le_set_path_loss_mon_param(struct bt_conn * conn,const struct bt_conn_le_path_loss_reporting_param * params)3182 int bt_conn_le_set_path_loss_mon_param(struct bt_conn *conn,
3183 const struct bt_conn_le_path_loss_reporting_param *params)
3184 {
3185 struct bt_hci_cp_le_set_path_loss_reporting_parameters *cp;
3186 struct net_buf *buf;
3187
3188 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_PATH_LOSS_REPORTING_PARAMETERS, sizeof(*cp));
3189 if (!buf) {
3190 return -ENOBUFS;
3191 }
3192
3193 cp = net_buf_add(buf, sizeof(*cp));
3194 cp->handle = sys_cpu_to_le16(conn->handle);
3195 cp->high_threshold = params->high_threshold;
3196 cp->high_hysteresis = params->high_hysteresis;
3197 cp->low_threshold = params->low_threshold;
3198 cp->low_hysteresis = params->low_hysteresis;
3199 cp->min_time_spent = sys_cpu_to_le16(params->min_time_spent);
3200
3201 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_PATH_LOSS_REPORTING_PARAMETERS, buf, NULL);
3202 }
3203
bt_conn_le_set_path_loss_mon_enable(struct bt_conn * conn,bool reporting_enable)3204 int bt_conn_le_set_path_loss_mon_enable(struct bt_conn *conn, bool reporting_enable)
3205 {
3206 struct bt_hci_cp_le_set_path_loss_reporting_enable *cp;
3207 struct net_buf *buf;
3208
3209 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_PATH_LOSS_REPORTING_ENABLE, sizeof(*cp));
3210 if (!buf) {
3211 return -ENOBUFS;
3212 }
3213
3214 cp = net_buf_add(buf, sizeof(*cp));
3215 cp->handle = sys_cpu_to_le16(conn->handle);
3216 cp->enable = reporting_enable ? BT_HCI_LE_PATH_LOSS_REPORTING_ENABLE :
3217 BT_HCI_LE_PATH_LOSS_REPORTING_DISABLE;
3218
3219 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_PATH_LOSS_REPORTING_ENABLE, buf, NULL);
3220 }
3221 #endif /* CONFIG_BT_PATH_LOSS_MONITORING */
3222
3223 #if defined(CONFIG_BT_SUBRATING)
notify_subrate_change(struct bt_conn * conn,const struct bt_conn_le_subrate_changed params)3224 void notify_subrate_change(struct bt_conn *conn,
3225 const struct bt_conn_le_subrate_changed params)
3226 {
3227 struct bt_conn_cb *callback;
3228
3229 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3230 if (callback->subrate_changed) {
3231 callback->subrate_changed(conn, ¶ms);
3232 }
3233 }
3234
3235 STRUCT_SECTION_FOREACH(bt_conn_cb, cb)
3236 {
3237 if (cb->subrate_changed) {
3238 cb->subrate_changed(conn, ¶ms);
3239 }
3240 }
3241 }
3242
le_subrate_common_params_valid(const struct bt_conn_le_subrate_param * param)3243 static bool le_subrate_common_params_valid(const struct bt_conn_le_subrate_param *param)
3244 {
3245 /* All limits according to BT Core spec 5.4 [Vol 4, Part E, 7.8.123] */
3246
3247 if (param->subrate_min < 0x0001 || param->subrate_min > 0x01F4 ||
3248 param->subrate_max < 0x0001 || param->subrate_max > 0x01F4 ||
3249 param->subrate_min > param->subrate_max) {
3250 return false;
3251 }
3252
3253 if (param->max_latency > 0x01F3 ||
3254 param->subrate_max * (param->max_latency + 1) > 500) {
3255 return false;
3256 }
3257
3258 if (param->continuation_number > 0x01F3 ||
3259 param->continuation_number >= param->subrate_max) {
3260 return false;
3261 }
3262
3263 if (param->supervision_timeout < 0x000A ||
3264 param->supervision_timeout > 0xC80) {
3265 return false;
3266 }
3267
3268 return true;
3269 }
3270
bt_conn_le_subrate_set_defaults(const struct bt_conn_le_subrate_param * params)3271 int bt_conn_le_subrate_set_defaults(const struct bt_conn_le_subrate_param *params)
3272 {
3273 struct bt_hci_cp_le_set_default_subrate *cp;
3274 struct net_buf *buf;
3275
3276 if (!IS_ENABLED(CONFIG_BT_CENTRAL)) {
3277 return -ENOTSUP;
3278 }
3279
3280 if (!le_subrate_common_params_valid(params)) {
3281 return -EINVAL;
3282 }
3283
3284 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_DEFAULT_SUBRATE, sizeof(*cp));
3285 if (!buf) {
3286 return -ENOBUFS;
3287 }
3288
3289 cp = net_buf_add(buf, sizeof(*cp));
3290 cp->subrate_min = sys_cpu_to_le16(params->subrate_min);
3291 cp->subrate_max = sys_cpu_to_le16(params->subrate_max);
3292 cp->max_latency = sys_cpu_to_le16(params->max_latency);
3293 cp->continuation_number = sys_cpu_to_le16(params->continuation_number);
3294 cp->supervision_timeout = sys_cpu_to_le16(params->supervision_timeout);
3295
3296 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_DEFAULT_SUBRATE, buf, NULL);
3297 }
3298
bt_conn_le_subrate_request(struct bt_conn * conn,const struct bt_conn_le_subrate_param * params)3299 int bt_conn_le_subrate_request(struct bt_conn *conn,
3300 const struct bt_conn_le_subrate_param *params)
3301 {
3302 struct bt_hci_cp_le_subrate_request *cp;
3303 struct net_buf *buf;
3304
3305 if (!le_subrate_common_params_valid(params)) {
3306 return -EINVAL;
3307 }
3308
3309 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SUBRATE_REQUEST, sizeof(*cp));
3310 if (!buf) {
3311 return -ENOBUFS;
3312 }
3313
3314 cp = net_buf_add(buf, sizeof(*cp));
3315 cp->handle = sys_cpu_to_le16(conn->handle);
3316 cp->subrate_min = sys_cpu_to_le16(params->subrate_min);
3317 cp->subrate_max = sys_cpu_to_le16(params->subrate_max);
3318 cp->max_latency = sys_cpu_to_le16(params->max_latency);
3319 cp->continuation_number = sys_cpu_to_le16(params->continuation_number);
3320 cp->supervision_timeout = sys_cpu_to_le16(params->supervision_timeout);
3321
3322 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SUBRATE_REQUEST, buf, NULL);
3323 }
3324 #endif /* CONFIG_BT_SUBRATING */
3325
3326 #if defined(CONFIG_BT_CHANNEL_SOUNDING)
notify_remote_cs_capabilities(struct bt_conn * conn,struct bt_conn_le_cs_capabilities params)3327 void notify_remote_cs_capabilities(struct bt_conn *conn, struct bt_conn_le_cs_capabilities params)
3328 {
3329 struct bt_conn_cb *callback;
3330
3331 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3332 if (callback->le_cs_remote_capabilities_available) {
3333 callback->le_cs_remote_capabilities_available(conn, ¶ms);
3334 }
3335 }
3336
3337 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
3338 if (cb->le_cs_remote_capabilities_available) {
3339 cb->le_cs_remote_capabilities_available(conn, ¶ms);
3340 }
3341 }
3342 }
3343
notify_remote_cs_fae_table(struct bt_conn * conn,struct bt_conn_le_cs_fae_table params)3344 void notify_remote_cs_fae_table(struct bt_conn *conn, struct bt_conn_le_cs_fae_table params)
3345 {
3346 struct bt_conn_cb *callback;
3347
3348 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3349 if (callback->le_cs_remote_fae_table_available) {
3350 callback->le_cs_remote_fae_table_available(conn, ¶ms);
3351 }
3352 }
3353
3354 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
3355 if (cb->le_cs_remote_fae_table_available) {
3356 cb->le_cs_remote_fae_table_available(conn, ¶ms);
3357 }
3358 }
3359 }
3360
notify_cs_config_created(struct bt_conn * conn,struct bt_conn_le_cs_config * params)3361 void notify_cs_config_created(struct bt_conn *conn, struct bt_conn_le_cs_config *params)
3362 {
3363 struct bt_conn_cb *callback;
3364
3365 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3366 if (callback->le_cs_config_created) {
3367 callback->le_cs_config_created(conn, params);
3368 }
3369 }
3370
3371 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
3372 if (cb->le_cs_config_created) {
3373 cb->le_cs_config_created(conn, params);
3374 }
3375 }
3376 }
3377
notify_cs_config_removed(struct bt_conn * conn,uint8_t config_id)3378 void notify_cs_config_removed(struct bt_conn *conn, uint8_t config_id)
3379 {
3380 struct bt_conn_cb *callback;
3381
3382 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3383 if (callback->le_cs_config_removed) {
3384 callback->le_cs_config_removed(conn, config_id);
3385 }
3386 }
3387
3388 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
3389 if (cb->le_cs_config_removed) {
3390 cb->le_cs_config_removed(conn, config_id);
3391 }
3392 }
3393 }
3394
notify_cs_security_enable_available(struct bt_conn * conn)3395 void notify_cs_security_enable_available(struct bt_conn *conn)
3396 {
3397 struct bt_conn_cb *callback;
3398
3399 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3400 if (callback->le_cs_security_enabled) {
3401 callback->le_cs_security_enabled(conn);
3402 }
3403 }
3404
3405 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
3406 if (cb->le_cs_security_enabled) {
3407 cb->le_cs_security_enabled(conn);
3408 }
3409 }
3410 }
3411
notify_cs_procedure_enable_available(struct bt_conn * conn,struct bt_conn_le_cs_procedure_enable_complete * params)3412 void notify_cs_procedure_enable_available(struct bt_conn *conn,
3413 struct bt_conn_le_cs_procedure_enable_complete *params)
3414 {
3415 struct bt_conn_cb *callback;
3416
3417 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3418 if (callback->le_cs_procedure_enabled) {
3419 callback->le_cs_procedure_enabled(conn, params);
3420 }
3421 }
3422
3423 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
3424 if (cb->le_cs_procedure_enabled) {
3425 cb->le_cs_procedure_enabled(conn, params);
3426 }
3427 }
3428 }
3429
notify_cs_subevent_result(struct bt_conn * conn,struct bt_conn_le_cs_subevent_result * result)3430 void notify_cs_subevent_result(struct bt_conn *conn, struct bt_conn_le_cs_subevent_result *result)
3431 {
3432 struct bt_conn_cb *callback;
3433
3434 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3435 if (callback->le_cs_subevent_data_available) {
3436 callback->le_cs_subevent_data_available(conn, result);
3437 }
3438 }
3439
3440 STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
3441 if (cb->le_cs_subevent_data_available) {
3442 cb->le_cs_subevent_data_available(conn, result);
3443 }
3444 }
3445 }
3446 #endif /* CONFIG_BT_CHANNEL_SOUNDING */
3447
bt_conn_le_param_update(struct bt_conn * conn,const struct bt_le_conn_param * param)3448 int bt_conn_le_param_update(struct bt_conn *conn,
3449 const struct bt_le_conn_param *param)
3450 {
3451 LOG_DBG("conn %p features 0x%02x params (%d-%d %d %d)", conn, conn->le.features[0],
3452 param->interval_min, param->interval_max, param->latency, param->timeout);
3453
3454 if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
3455 conn->role == BT_CONN_ROLE_CENTRAL) {
3456 return send_conn_le_param_update(conn, param);
3457 }
3458
3459 if (IS_ENABLED(CONFIG_BT_PERIPHERAL)) {
3460 /* if peripheral conn param update timer expired just send request */
3461 if (atomic_test_bit(conn->flags, BT_CONN_PERIPHERAL_PARAM_UPDATE)) {
3462 return send_conn_le_param_update(conn, param);
3463 }
3464
3465 /* store new conn params to be used by update timer */
3466 conn->le.interval_min = param->interval_min;
3467 conn->le.interval_max = param->interval_max;
3468 conn->le.pending_latency = param->latency;
3469 conn->le.pending_timeout = param->timeout;
3470 atomic_set_bit(conn->flags, BT_CONN_PERIPHERAL_PARAM_SET);
3471 }
3472
3473 return 0;
3474 }
3475
3476 #if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
bt_conn_le_data_len_update(struct bt_conn * conn,const struct bt_conn_le_data_len_param * param)3477 int bt_conn_le_data_len_update(struct bt_conn *conn,
3478 const struct bt_conn_le_data_len_param *param)
3479 {
3480 if (conn->le.data_len.tx_max_len == param->tx_max_len &&
3481 conn->le.data_len.tx_max_time == param->tx_max_time) {
3482 return -EALREADY;
3483 }
3484
3485 return bt_le_set_data_len(conn, param->tx_max_len, param->tx_max_time);
3486 }
3487 #endif /* CONFIG_BT_USER_DATA_LEN_UPDATE */
3488
3489 #if defined(CONFIG_BT_USER_PHY_UPDATE)
bt_conn_le_phy_update(struct bt_conn * conn,const struct bt_conn_le_phy_param * param)3490 int bt_conn_le_phy_update(struct bt_conn *conn,
3491 const struct bt_conn_le_phy_param *param)
3492 {
3493 uint8_t phy_opts, all_phys;
3494
3495 if ((param->options & BT_CONN_LE_PHY_OPT_CODED_S2) &&
3496 (param->options & BT_CONN_LE_PHY_OPT_CODED_S8)) {
3497 phy_opts = BT_HCI_LE_PHY_CODED_ANY;
3498 } else if (param->options & BT_CONN_LE_PHY_OPT_CODED_S2) {
3499 phy_opts = BT_HCI_LE_PHY_CODED_S2;
3500 } else if (param->options & BT_CONN_LE_PHY_OPT_CODED_S8) {
3501 phy_opts = BT_HCI_LE_PHY_CODED_S8;
3502 } else {
3503 phy_opts = BT_HCI_LE_PHY_CODED_ANY;
3504 }
3505
3506 all_phys = 0U;
3507 if (param->pref_tx_phy == BT_GAP_LE_PHY_NONE) {
3508 all_phys |= BT_HCI_LE_PHY_TX_ANY;
3509 }
3510
3511 if (param->pref_rx_phy == BT_GAP_LE_PHY_NONE) {
3512 all_phys |= BT_HCI_LE_PHY_RX_ANY;
3513 }
3514
3515 return bt_le_set_phy(conn, all_phys, param->pref_tx_phy,
3516 param->pref_rx_phy, phy_opts);
3517 }
3518 #endif
3519
3520 #if defined(CONFIG_BT_CENTRAL)
bt_conn_set_param_le(struct bt_conn * conn,const struct bt_le_conn_param * param)3521 static void bt_conn_set_param_le(struct bt_conn *conn,
3522 const struct bt_le_conn_param *param)
3523 {
3524 conn->le.interval_min = param->interval_min;
3525 conn->le.interval_max = param->interval_max;
3526 conn->le.latency = param->latency;
3527 conn->le.timeout = param->timeout;
3528 }
3529
create_param_setup(const struct bt_conn_le_create_param * param)3530 static void create_param_setup(const struct bt_conn_le_create_param *param)
3531 {
3532 bt_dev.create_param = *param;
3533
3534 bt_dev.create_param.timeout =
3535 (bt_dev.create_param.timeout != 0) ?
3536 bt_dev.create_param.timeout :
3537 (MSEC_PER_SEC / 10) * CONFIG_BT_CREATE_CONN_TIMEOUT;
3538
3539 bt_dev.create_param.interval_coded =
3540 (bt_dev.create_param.interval_coded != 0) ?
3541 bt_dev.create_param.interval_coded :
3542 bt_dev.create_param.interval;
3543
3544 bt_dev.create_param.window_coded =
3545 (bt_dev.create_param.window_coded != 0) ?
3546 bt_dev.create_param.window_coded :
3547 bt_dev.create_param.window;
3548 }
3549
3550 #if defined(CONFIG_BT_FILTER_ACCEPT_LIST)
bt_conn_le_create_auto(const struct bt_conn_le_create_param * create_param,const struct bt_le_conn_param * param)3551 int bt_conn_le_create_auto(const struct bt_conn_le_create_param *create_param,
3552 const struct bt_le_conn_param *param)
3553 {
3554 struct bt_conn *conn;
3555 int err;
3556
3557 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
3558 return -EAGAIN;
3559 }
3560
3561 if (!bt_le_conn_params_valid(param)) {
3562 return -EINVAL;
3563 }
3564
3565 conn = bt_conn_lookup_state_le(BT_ID_DEFAULT, BT_ADDR_LE_NONE,
3566 BT_CONN_INITIATING_FILTER_LIST);
3567 if (conn) {
3568 bt_conn_unref(conn);
3569 return -EALREADY;
3570 }
3571
3572 /* Scanning either to connect or explicit scan, either case scanner was
3573 * started by application and should not be stopped.
3574 */
3575 if (!BT_LE_STATES_SCAN_INIT(bt_dev.le.states) &&
3576 atomic_test_bit(bt_dev.flags, BT_DEV_SCANNING)) {
3577 return -EINVAL;
3578 }
3579
3580 if (atomic_test_bit(bt_dev.flags, BT_DEV_INITIATING)) {
3581 return -EINVAL;
3582 }
3583
3584 if (!bt_id_scan_random_addr_check()) {
3585 return -EINVAL;
3586 }
3587
3588 conn = bt_conn_add_le(BT_ID_DEFAULT, BT_ADDR_LE_NONE);
3589 if (!conn) {
3590 return -ENOMEM;
3591 }
3592
3593 bt_conn_set_param_le(conn, param);
3594 create_param_setup(create_param);
3595
3596 atomic_set_bit(conn->flags, BT_CONN_AUTO_CONNECT);
3597 bt_conn_set_state(conn, BT_CONN_INITIATING_FILTER_LIST);
3598
3599 err = bt_le_create_conn(conn);
3600 if (err) {
3601 LOG_ERR("Failed to start filtered scan");
3602 conn->err = 0;
3603 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
3604 bt_conn_unref(conn);
3605 return err;
3606 }
3607
3608 /* Since we don't give the application a reference to manage in
3609 * this case, we need to release this reference here.
3610 */
3611 bt_conn_unref(conn);
3612 return 0;
3613 }
3614
bt_conn_create_auto_stop(void)3615 int bt_conn_create_auto_stop(void)
3616 {
3617 struct bt_conn *conn;
3618 int err;
3619
3620 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
3621 return -EINVAL;
3622 }
3623
3624 conn = bt_conn_lookup_state_le(BT_ID_DEFAULT, BT_ADDR_LE_NONE,
3625 BT_CONN_INITIATING_FILTER_LIST);
3626 if (!conn) {
3627 return -EINVAL;
3628 }
3629
3630 if (!atomic_test_bit(bt_dev.flags, BT_DEV_INITIATING)) {
3631 return -EINVAL;
3632 }
3633
3634 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
3635 bt_conn_unref(conn);
3636
3637 err = bt_le_create_conn_cancel();
3638 if (err) {
3639 LOG_ERR("Failed to stop initiator");
3640 return err;
3641 }
3642
3643 return 0;
3644 }
3645 #endif /* defined(CONFIG_BT_FILTER_ACCEPT_LIST) */
3646
conn_le_create_common_checks(const bt_addr_le_t * peer,const struct bt_le_conn_param * conn_param)3647 static int conn_le_create_common_checks(const bt_addr_le_t *peer,
3648 const struct bt_le_conn_param *conn_param)
3649 {
3650
3651 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
3652 LOG_DBG("Conn check failed: BT dev not ready.");
3653 return -EAGAIN;
3654 }
3655
3656 if (!bt_le_conn_params_valid(conn_param)) {
3657 LOG_DBG("Conn check failed: invalid parameters.");
3658 return -EINVAL;
3659 }
3660
3661 if (!BT_LE_STATES_SCAN_INIT(bt_dev.le.states) && bt_le_explicit_scanner_running()) {
3662 LOG_DBG("Conn check failed: scanner was explicitly requested.");
3663 return -EAGAIN;
3664 }
3665
3666 if (atomic_test_bit(bt_dev.flags, BT_DEV_INITIATING)) {
3667 LOG_DBG("Conn check failed: device is already initiating.");
3668 return -EALREADY;
3669 }
3670
3671 if (!bt_id_scan_random_addr_check()) {
3672 LOG_DBG("Conn check failed: invalid random address.");
3673 return -EINVAL;
3674 }
3675
3676 if (bt_conn_exists_le(BT_ID_DEFAULT, peer)) {
3677 LOG_DBG("Conn check failed: ACL connection already exists.");
3678 return -EINVAL;
3679 }
3680
3681 return 0;
3682 }
3683
conn_le_create_helper(const bt_addr_le_t * peer,const struct bt_le_conn_param * conn_param)3684 static struct bt_conn *conn_le_create_helper(const bt_addr_le_t *peer,
3685 const struct bt_le_conn_param *conn_param)
3686 {
3687 bt_addr_le_t dst;
3688 struct bt_conn *conn;
3689
3690 if (bt_addr_le_is_resolved(peer)) {
3691 bt_addr_le_copy_resolved(&dst, peer);
3692 } else {
3693 bt_addr_le_copy(&dst, bt_lookup_id_addr(BT_ID_DEFAULT, peer));
3694 }
3695
3696 /* Only default identity supported for now */
3697 conn = bt_conn_add_le(BT_ID_DEFAULT, &dst);
3698 if (!conn) {
3699 return NULL;
3700 }
3701
3702 bt_conn_set_param_le(conn, conn_param);
3703
3704 return conn;
3705 }
3706
bt_conn_le_create(const bt_addr_le_t * peer,const struct bt_conn_le_create_param * create_param,const struct bt_le_conn_param * conn_param,struct bt_conn ** ret_conn)3707 int bt_conn_le_create(const bt_addr_le_t *peer, const struct bt_conn_le_create_param *create_param,
3708 const struct bt_le_conn_param *conn_param, struct bt_conn **ret_conn)
3709 {
3710 struct bt_conn *conn;
3711 int err;
3712
3713 CHECKIF(ret_conn == NULL) {
3714 return -EINVAL;
3715 }
3716
3717 CHECKIF(*ret_conn != NULL) {
3718 /* This rule helps application developers prevent leaks of connection references. If
3719 * a bt_conn variable is not null, it presumably holds a reference and must not be
3720 * overwritten. To avoid this warning, initialize the variables to null, and set
3721 * them to null when moving the reference.
3722 */
3723 LOG_WRN("*conn should be unreferenced and initialized to NULL");
3724
3725 if (IS_ENABLED(CONFIG_BT_CONN_CHECK_NULL_BEFORE_CREATE)) {
3726 return -EINVAL;
3727 }
3728 }
3729
3730 err = conn_le_create_common_checks(peer, conn_param);
3731 if (err) {
3732 return err;
3733 }
3734
3735 conn = conn_le_create_helper(peer, conn_param);
3736 if (!conn) {
3737 return -ENOMEM;
3738 }
3739
3740 if (BT_LE_STATES_SCAN_INIT(bt_dev.le.states) &&
3741 bt_le_explicit_scanner_running() &&
3742 !bt_le_explicit_scanner_uses_same_params(create_param)) {
3743 LOG_WRN("Use same scan and connection create params to obtain best performance");
3744 }
3745
3746 create_param_setup(create_param);
3747
3748 #if defined(CONFIG_BT_SMP)
3749 if (bt_dev.le.rl_entries > bt_dev.le.rl_size) {
3750 /* Use host-based identity resolving. */
3751 bt_conn_set_state(conn, BT_CONN_SCAN_BEFORE_INITIATING);
3752
3753 err = bt_le_scan_user_add(BT_LE_SCAN_USER_CONN);
3754 if (err) {
3755 bt_le_scan_user_remove(BT_LE_SCAN_USER_CONN);
3756 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
3757 bt_conn_unref(conn);
3758
3759 return err;
3760 }
3761
3762 *ret_conn = conn;
3763 return 0;
3764 }
3765 #endif
3766
3767 bt_conn_set_state(conn, BT_CONN_INITIATING);
3768
3769 err = bt_le_create_conn(conn);
3770 if (err) {
3771 conn->err = 0;
3772 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
3773 bt_conn_unref(conn);
3774
3775 /* Best-effort attempt to inform the scanner that the initiator stopped. */
3776 int scan_check_err = bt_le_scan_user_add(BT_LE_SCAN_USER_NONE);
3777
3778 if (scan_check_err) {
3779 LOG_WRN("Error while updating the scanner (%d)", scan_check_err);
3780 }
3781 return err;
3782 }
3783
3784 *ret_conn = conn;
3785 return 0;
3786 }
3787
bt_conn_le_create_synced(const struct bt_le_ext_adv * adv,const struct bt_conn_le_create_synced_param * synced_param,const struct bt_le_conn_param * conn_param,struct bt_conn ** ret_conn)3788 int bt_conn_le_create_synced(const struct bt_le_ext_adv *adv,
3789 const struct bt_conn_le_create_synced_param *synced_param,
3790 const struct bt_le_conn_param *conn_param, struct bt_conn **ret_conn)
3791 {
3792 struct bt_conn *conn;
3793 int err;
3794
3795 CHECKIF(ret_conn == NULL) {
3796 return -EINVAL;
3797 }
3798
3799 CHECKIF(*ret_conn != NULL) {
3800 /* This rule helps application developers prevent leaks of connection references. If
3801 * a bt_conn variable is not null, it presumably holds a reference and must not be
3802 * overwritten. To avoid this warning, initialize the variables to null, and set
3803 * them to null when moving the reference.
3804 */
3805 LOG_WRN("*conn should be unreferenced and initialized to NULL");
3806
3807 if (IS_ENABLED(CONFIG_BT_CONN_CHECK_NULL_BEFORE_CREATE)) {
3808 return -EINVAL;
3809 }
3810 }
3811
3812 err = conn_le_create_common_checks(synced_param->peer, conn_param);
3813 if (err) {
3814 return err;
3815 }
3816
3817 if (!atomic_test_bit(adv->flags, BT_PER_ADV_ENABLED)) {
3818 return -EINVAL;
3819 }
3820
3821 if (!BT_FEAT_LE_PAWR_ADVERTISER(bt_dev.le.features)) {
3822 return -ENOTSUP;
3823 }
3824
3825 if (synced_param->subevent >= BT_HCI_PAWR_SUBEVENT_MAX) {
3826 return -EINVAL;
3827 }
3828
3829 conn = conn_le_create_helper(synced_param->peer, conn_param);
3830 if (!conn) {
3831 return -ENOMEM;
3832 }
3833
3834 /* The connection creation timeout is not really useful for PAwR.
3835 * The controller will give a result for the connection attempt
3836 * within a periodic interval. We do not know the periodic interval
3837 * used, so disable the timeout.
3838 */
3839 bt_dev.create_param.timeout = 0;
3840 bt_conn_set_state(conn, BT_CONN_INITIATING);
3841
3842 err = bt_le_create_conn_synced(conn, adv, synced_param->subevent);
3843 if (err) {
3844 conn->err = 0;
3845 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
3846 bt_conn_unref(conn);
3847
3848 return err;
3849 }
3850
3851 *ret_conn = conn;
3852 return 0;
3853 }
3854
3855 #if !defined(CONFIG_BT_FILTER_ACCEPT_LIST)
bt_le_set_auto_conn(const bt_addr_le_t * addr,const struct bt_le_conn_param * param)3856 int bt_le_set_auto_conn(const bt_addr_le_t *addr,
3857 const struct bt_le_conn_param *param)
3858 {
3859 struct bt_conn *conn;
3860
3861 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
3862 return -EAGAIN;
3863 }
3864
3865 if (param && !bt_le_conn_params_valid(param)) {
3866 return -EINVAL;
3867 }
3868
3869 if (!bt_id_scan_random_addr_check()) {
3870 return -EINVAL;
3871 }
3872
3873 /* Only default identity is supported */
3874 conn = bt_conn_lookup_addr_le(BT_ID_DEFAULT, addr);
3875 if (!conn) {
3876 conn = bt_conn_add_le(BT_ID_DEFAULT, addr);
3877 if (!conn) {
3878 return -ENOMEM;
3879 }
3880 }
3881
3882 if (param) {
3883 bt_conn_set_param_le(conn, param);
3884
3885 if (!atomic_test_and_set_bit(conn->flags,
3886 BT_CONN_AUTO_CONNECT)) {
3887 bt_conn_ref(conn);
3888 }
3889 } else {
3890 if (atomic_test_and_clear_bit(conn->flags,
3891 BT_CONN_AUTO_CONNECT)) {
3892 bt_conn_unref(conn);
3893 if (conn->state == BT_CONN_SCAN_BEFORE_INITIATING) {
3894 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
3895 }
3896 }
3897 }
3898
3899 int err = 0;
3900 if (conn->state == BT_CONN_DISCONNECTED &&
3901 atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
3902 if (param) {
3903 bt_conn_set_state(conn, BT_CONN_SCAN_BEFORE_INITIATING);
3904 err = bt_le_scan_user_add(BT_LE_SCAN_USER_CONN);
3905 }
3906 }
3907
3908 bt_conn_unref(conn);
3909
3910 return err;
3911 }
3912 #endif /* !defined(CONFIG_BT_FILTER_ACCEPT_LIST) */
3913 #endif /* CONFIG_BT_CENTRAL */
3914
bt_conn_le_conn_update(struct bt_conn * conn,const struct bt_le_conn_param * param)3915 int bt_conn_le_conn_update(struct bt_conn *conn,
3916 const struct bt_le_conn_param *param)
3917 {
3918 struct hci_cp_le_conn_update *conn_update;
3919 struct net_buf *buf;
3920
3921 buf = bt_hci_cmd_create(BT_HCI_OP_LE_CONN_UPDATE,
3922 sizeof(*conn_update));
3923 if (!buf) {
3924 return -ENOBUFS;
3925 }
3926
3927 conn_update = net_buf_add(buf, sizeof(*conn_update));
3928 (void)memset(conn_update, 0, sizeof(*conn_update));
3929 conn_update->handle = sys_cpu_to_le16(conn->handle);
3930 conn_update->conn_interval_min = sys_cpu_to_le16(param->interval_min);
3931 conn_update->conn_interval_max = sys_cpu_to_le16(param->interval_max);
3932 conn_update->conn_latency = sys_cpu_to_le16(param->latency);
3933 conn_update->supervision_timeout = sys_cpu_to_le16(param->timeout);
3934
3935 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_CONN_UPDATE, buf, NULL);
3936 }
3937
3938 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC)
bt_conn_auth_cb_register(const struct bt_conn_auth_cb * cb)3939 int bt_conn_auth_cb_register(const struct bt_conn_auth_cb *cb)
3940 {
3941 if (!cb) {
3942 bt_auth = NULL;
3943 return 0;
3944 }
3945
3946 if (bt_auth) {
3947 return -EALREADY;
3948 }
3949
3950 /* The cancel callback must always be provided if the app provides
3951 * interactive callbacks.
3952 */
3953 if (!cb->cancel &&
3954 (cb->passkey_display || cb->passkey_entry || cb->passkey_confirm ||
3955 #if defined(CONFIG_BT_CLASSIC)
3956 cb->pincode_entry ||
3957 #endif
3958 cb->pairing_confirm)) {
3959 return -EINVAL;
3960 }
3961
3962 bt_auth = cb;
3963 return 0;
3964 }
3965
3966 #if defined(CONFIG_BT_SMP)
bt_conn_auth_cb_overlay(struct bt_conn * conn,const struct bt_conn_auth_cb * cb)3967 int bt_conn_auth_cb_overlay(struct bt_conn *conn, const struct bt_conn_auth_cb *cb)
3968 {
3969 CHECKIF(conn == NULL) {
3970 return -EINVAL;
3971 }
3972
3973 /* The cancel callback must always be provided if the app provides
3974 * interactive callbacks.
3975 */
3976 if (cb && !cb->cancel &&
3977 (cb->passkey_display || cb->passkey_entry || cb->passkey_confirm ||
3978 cb->pairing_confirm)) {
3979 return -EINVAL;
3980 }
3981
3982 if (conn->type == BT_CONN_TYPE_LE) {
3983 return bt_smp_auth_cb_overlay(conn, cb);
3984 }
3985
3986 return -ENOTSUP;
3987 }
3988 #endif
3989
bt_conn_auth_info_cb_register(struct bt_conn_auth_info_cb * cb)3990 int bt_conn_auth_info_cb_register(struct bt_conn_auth_info_cb *cb)
3991 {
3992 CHECKIF(cb == NULL) {
3993 return -EINVAL;
3994 }
3995
3996 if (sys_slist_find(&bt_auth_info_cbs, &cb->node, NULL)) {
3997 return -EALREADY;
3998 }
3999
4000 sys_slist_append(&bt_auth_info_cbs, &cb->node);
4001
4002 return 0;
4003 }
4004
bt_conn_auth_info_cb_unregister(struct bt_conn_auth_info_cb * cb)4005 int bt_conn_auth_info_cb_unregister(struct bt_conn_auth_info_cb *cb)
4006 {
4007 CHECKIF(cb == NULL) {
4008 return -EINVAL;
4009 }
4010
4011 if (!sys_slist_find_and_remove(&bt_auth_info_cbs, &cb->node)) {
4012 return -EALREADY;
4013 }
4014
4015 return 0;
4016 }
4017
bt_conn_auth_passkey_entry(struct bt_conn * conn,unsigned int passkey)4018 int bt_conn_auth_passkey_entry(struct bt_conn *conn, unsigned int passkey)
4019 {
4020 if (IS_ENABLED(CONFIG_BT_SMP) && conn->type == BT_CONN_TYPE_LE) {
4021 return bt_smp_auth_passkey_entry(conn, passkey);
4022 }
4023
4024 if (IS_ENABLED(CONFIG_BT_CLASSIC) && conn->type == BT_CONN_TYPE_BR) {
4025 if (!bt_auth) {
4026 return -EINVAL;
4027 }
4028
4029 return bt_ssp_auth_passkey_entry(conn, passkey);
4030 }
4031
4032 return -EINVAL;
4033 }
4034
4035 #if defined(CONFIG_BT_PASSKEY_KEYPRESS)
bt_conn_auth_keypress_notify(struct bt_conn * conn,enum bt_conn_auth_keypress type)4036 int bt_conn_auth_keypress_notify(struct bt_conn *conn,
4037 enum bt_conn_auth_keypress type)
4038 {
4039 if (IS_ENABLED(CONFIG_BT_SMP) && conn->type == BT_CONN_TYPE_LE) {
4040 return bt_smp_auth_keypress_notify(conn, type);
4041 }
4042
4043 LOG_ERR("Not implemented for conn type %d", conn->type);
4044 return -EINVAL;
4045 }
4046 #endif
4047
bt_conn_auth_passkey_confirm(struct bt_conn * conn)4048 int bt_conn_auth_passkey_confirm(struct bt_conn *conn)
4049 {
4050 if (IS_ENABLED(CONFIG_BT_SMP) && conn->type == BT_CONN_TYPE_LE) {
4051 return bt_smp_auth_passkey_confirm(conn);
4052 }
4053
4054 if (IS_ENABLED(CONFIG_BT_CLASSIC) && conn->type == BT_CONN_TYPE_BR) {
4055 if (!bt_auth) {
4056 return -EINVAL;
4057 }
4058
4059 return bt_ssp_auth_passkey_confirm(conn);
4060 }
4061
4062 return -EINVAL;
4063 }
4064
bt_conn_auth_cancel(struct bt_conn * conn)4065 int bt_conn_auth_cancel(struct bt_conn *conn)
4066 {
4067 if (IS_ENABLED(CONFIG_BT_SMP) && conn->type == BT_CONN_TYPE_LE) {
4068 return bt_smp_auth_cancel(conn);
4069 }
4070
4071 if (IS_ENABLED(CONFIG_BT_CLASSIC) && conn->type == BT_CONN_TYPE_BR) {
4072 if (!bt_auth) {
4073 return -EINVAL;
4074 }
4075
4076 return bt_ssp_auth_cancel(conn);
4077 }
4078
4079 return -EINVAL;
4080 }
4081
bt_conn_auth_pairing_confirm(struct bt_conn * conn)4082 int bt_conn_auth_pairing_confirm(struct bt_conn *conn)
4083 {
4084 if (IS_ENABLED(CONFIG_BT_SMP) && conn->type == BT_CONN_TYPE_LE) {
4085 return bt_smp_auth_pairing_confirm(conn);
4086 }
4087
4088 if (IS_ENABLED(CONFIG_BT_CLASSIC) && conn->type == BT_CONN_TYPE_BR) {
4089 if (!bt_auth) {
4090 return -EINVAL;
4091 }
4092
4093 return bt_ssp_auth_pairing_confirm(conn);
4094 }
4095
4096 return -EINVAL;
4097 }
4098 #endif /* CONFIG_BT_SMP || CONFIG_BT_CLASSIC */
4099
bt_conn_lookup_index(uint8_t index)4100 struct bt_conn *bt_conn_lookup_index(uint8_t index)
4101 {
4102 if (index >= ARRAY_SIZE(acl_conns)) {
4103 return NULL;
4104 }
4105
4106 return bt_conn_ref(&acl_conns[index]);
4107 }
4108
bt_conn_init(void)4109 int bt_conn_init(void)
4110 {
4111 int err, i;
4112
4113 k_fifo_init(&free_tx);
4114 for (i = 0; i < ARRAY_SIZE(conn_tx); i++) {
4115 k_fifo_put(&free_tx, &conn_tx[i]);
4116 }
4117
4118 bt_att_init();
4119
4120 err = bt_smp_init();
4121 if (err) {
4122 return err;
4123 }
4124
4125 bt_l2cap_init();
4126
4127 /* Initialize background scan */
4128 if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
4129 for (i = 0; i < ARRAY_SIZE(acl_conns); i++) {
4130 struct bt_conn *conn = bt_conn_ref(&acl_conns[i]);
4131
4132 if (!conn) {
4133 continue;
4134 }
4135
4136 #if !defined(CONFIG_BT_FILTER_ACCEPT_LIST)
4137 if (atomic_test_bit(conn->flags,
4138 BT_CONN_AUTO_CONNECT)) {
4139 /* Only the default identity is supported */
4140 conn->id = BT_ID_DEFAULT;
4141 bt_conn_set_state(conn,
4142 BT_CONN_SCAN_BEFORE_INITIATING);
4143 }
4144 #endif /* !defined(CONFIG_BT_FILTER_ACCEPT_LIST) */
4145
4146 bt_conn_unref(conn);
4147 }
4148 }
4149
4150 return 0;
4151 }
4152
4153 #if defined(CONFIG_BT_DF_CONNECTION_CTE_RX)
bt_hci_le_df_connection_iq_report_common(uint8_t event,struct net_buf * buf)4154 void bt_hci_le_df_connection_iq_report_common(uint8_t event, struct net_buf *buf)
4155 {
4156 struct bt_df_conn_iq_samples_report iq_report;
4157 struct bt_conn *conn;
4158 int err;
4159
4160 if (event == BT_HCI_EVT_LE_CONNECTION_IQ_REPORT) {
4161 err = hci_df_prepare_connection_iq_report(buf, &iq_report, &conn);
4162 if (err) {
4163 LOG_ERR("Prepare CTE conn IQ report failed %d", err);
4164 return;
4165 }
4166 } else if (IS_ENABLED(CONFIG_BT_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES) &&
4167 event == BT_HCI_EVT_VS_LE_CONNECTION_IQ_REPORT) {
4168 err = hci_df_vs_prepare_connection_iq_report(buf, &iq_report, &conn);
4169 if (err) {
4170 LOG_ERR("Prepare CTE conn IQ report failed %d", err);
4171 return;
4172 }
4173 } else {
4174 LOG_ERR("Unhandled VS connection IQ report");
4175 return;
4176 }
4177
4178 struct bt_conn_cb *callback;
4179
4180 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
4181 if (callback->cte_report_cb) {
4182 callback->cte_report_cb(conn, &iq_report);
4183 }
4184 }
4185
4186 STRUCT_SECTION_FOREACH(bt_conn_cb, cb)
4187 {
4188 if (cb->cte_report_cb) {
4189 cb->cte_report_cb(conn, &iq_report);
4190 }
4191 }
4192
4193 bt_conn_unref(conn);
4194 }
4195
bt_hci_le_df_connection_iq_report(struct net_buf * buf)4196 void bt_hci_le_df_connection_iq_report(struct net_buf *buf)
4197 {
4198 bt_hci_le_df_connection_iq_report_common(BT_HCI_EVT_LE_CONNECTION_IQ_REPORT, buf);
4199 }
4200
4201 #if defined(CONFIG_BT_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES)
bt_hci_le_vs_df_connection_iq_report(struct net_buf * buf)4202 void bt_hci_le_vs_df_connection_iq_report(struct net_buf *buf)
4203 {
4204 bt_hci_le_df_connection_iq_report_common(BT_HCI_EVT_VS_LE_CONNECTION_IQ_REPORT, buf);
4205 }
4206 #endif /* CONFIG_BT_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES */
4207 #endif /* CONFIG_BT_DF_CONNECTION_CTE_RX */
4208
4209 #if defined(CONFIG_BT_DF_CONNECTION_CTE_REQ)
bt_hci_le_df_cte_req_failed(struct net_buf * buf)4210 void bt_hci_le_df_cte_req_failed(struct net_buf *buf)
4211 {
4212 struct bt_df_conn_iq_samples_report iq_report;
4213 struct bt_conn *conn;
4214 int err;
4215
4216 err = hci_df_prepare_conn_cte_req_failed(buf, &iq_report, &conn);
4217 if (err) {
4218 LOG_ERR("Prepare CTE REQ failed IQ report failed %d", err);
4219 return;
4220 }
4221
4222 struct bt_conn_cb *callback;
4223
4224 SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
4225 if (callback->cte_report_cb) {
4226 callback->cte_report_cb(conn, &iq_report);
4227 }
4228 }
4229
4230 STRUCT_SECTION_FOREACH(bt_conn_cb, cb)
4231 {
4232 if (cb->cte_report_cb) {
4233 cb->cte_report_cb(conn, &iq_report);
4234 }
4235 }
4236
4237 bt_conn_unref(conn);
4238 }
4239 #endif /* CONFIG_BT_DF_CONNECTION_CTE_REQ */
4240
4241 #endif /* CONFIG_BT_CONN */
4242
4243 #if defined(CONFIG_BT_CONN_TX_NOTIFY_WQ)
bt_conn_tx_workq_init(void)4244 static int bt_conn_tx_workq_init(void)
4245 {
4246 const struct k_work_queue_config cfg = {
4247 .name = "BT CONN TX WQ",
4248 .no_yield = false,
4249 .essential = false,
4250 };
4251
4252 k_work_queue_init(&conn_tx_workq);
4253 k_work_queue_start(&conn_tx_workq, conn_tx_workq_thread_stack,
4254 K_THREAD_STACK_SIZEOF(conn_tx_workq_thread_stack),
4255 K_PRIO_COOP(CONFIG_BT_CONN_TX_NOTIFY_WQ_PRIO), &cfg);
4256
4257 return 0;
4258 }
4259
4260 SYS_INIT(bt_conn_tx_workq_init, POST_KERNEL, CONFIG_BT_CONN_TX_NOTIFY_WQ_INIT_PRIORITY);
4261 #endif /* CONFIG_BT_CONN_TX_NOTIFY_WQ */
4262