1 /* conn.c - Bluetooth connection handling */
2 
3 /*
4  * Copyright (c) 2015-2016 Intel Corporation
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 #include <zephyr/kernel.h>
10 #include <string.h>
11 #include <errno.h>
12 #include <stdbool.h>
13 #include <zephyr/sys/atomic.h>
14 #include <zephyr/sys/byteorder.h>
15 #include <zephyr/sys/check.h>
16 #include <zephyr/sys/iterable_sections.h>
17 #include <zephyr/sys/util.h>
18 #include <zephyr/sys/util_macro.h>
19 #include <zephyr/sys/slist.h>
20 #include <zephyr/debug/stack.h>
21 #include <zephyr/sys/__assert.h>
22 
23 #include <zephyr/bluetooth/hci.h>
24 #include <zephyr/bluetooth/bluetooth.h>
25 #include <zephyr/bluetooth/direction.h>
26 #include <zephyr/bluetooth/conn.h>
27 #include <zephyr/drivers/bluetooth/hci_driver.h>
28 #include <zephyr/bluetooth/att.h>
29 
30 #include "common/assert.h"
31 #include "common/bt_str.h"
32 
33 #include "buf_view.h"
34 #include "addr_internal.h"
35 #include "hci_core.h"
36 #include "id.h"
37 #include "adv.h"
38 #include "conn_internal.h"
39 #include "l2cap_internal.h"
40 #include "keys.h"
41 #include "smp.h"
42 #include "classic/ssp.h"
43 #include "att_internal.h"
44 #include "iso_internal.h"
45 #include "direction_internal.h"
46 #include "classic/sco_internal.h"
47 
48 #define LOG_LEVEL CONFIG_BT_CONN_LOG_LEVEL
49 #include <zephyr/logging/log.h>
50 LOG_MODULE_REGISTER(bt_conn);
51 
52 K_FIFO_DEFINE(free_tx);
53 
54 static void tx_free(struct bt_conn_tx *tx);
55 
conn_tx_destroy(struct bt_conn * conn,struct bt_conn_tx * tx)56 static void conn_tx_destroy(struct bt_conn *conn, struct bt_conn_tx *tx)
57 {
58 	__ASSERT_NO_MSG(tx);
59 
60 	bt_conn_tx_cb_t cb = tx->cb;
61 	void *user_data = tx->user_data;
62 
63 	LOG_DBG("conn %p tx %p cb %p ud %p", conn, tx, cb, user_data);
64 
65 	/* Free up TX metadata before calling callback in case the callback
66 	 * tries to allocate metadata
67 	 */
68 	tx_free(tx);
69 
70 	if (cb) {
71 		cb(conn, user_data, -ESHUTDOWN);
72 	}
73 }
74 
75 #if defined(CONFIG_BT_CONN_TX)
76 static void tx_complete_work(struct k_work *work);
77 #endif /* CONFIG_BT_CONN_TX */
78 
79 static void notify_recycled_conn_slot(void);
80 
81 void bt_tx_irq_raise(void);
82 
83 /* Group Connected BT_CONN only in this */
84 #if defined(CONFIG_BT_CONN)
85 /* Peripheral timeout to initialize Connection Parameter Update procedure */
86 #define CONN_UPDATE_TIMEOUT  K_MSEC(CONFIG_BT_CONN_PARAM_UPDATE_TIMEOUT)
87 
88 static void deferred_work(struct k_work *work);
89 static void notify_connected(struct bt_conn *conn);
90 
91 static struct bt_conn acl_conns[CONFIG_BT_MAX_CONN];
92 NET_BUF_POOL_DEFINE(acl_tx_pool, CONFIG_BT_L2CAP_TX_BUF_COUNT,
93 		    BT_L2CAP_BUF_SIZE(CONFIG_BT_L2CAP_TX_MTU),
94 		    CONFIG_BT_CONN_TX_USER_DATA_SIZE, NULL);
95 
96 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC)
97 const struct bt_conn_auth_cb *bt_auth;
98 sys_slist_t bt_auth_info_cbs = SYS_SLIST_STATIC_INIT(&bt_auth_info_cbs);
99 #endif /* CONFIG_BT_SMP || CONFIG_BT_CLASSIC */
100 
101 
102 static sys_slist_t conn_cbs = SYS_SLIST_STATIC_INIT(&conn_cbs);
103 
104 static struct bt_conn_tx conn_tx[CONFIG_BT_CONN_TX_MAX];
105 
106 #if defined(CONFIG_BT_CLASSIC)
107 static int bt_hci_connect_br_cancel(struct bt_conn *conn);
108 
109 static struct bt_conn sco_conns[CONFIG_BT_MAX_SCO_CONN];
110 #endif /* CONFIG_BT_CLASSIC */
111 #endif /* CONFIG_BT_CONN */
112 
113 #if defined(CONFIG_BT_CONN_TX)
114 void frag_destroy(struct net_buf *buf);
115 
116 /* Storage for fragments (views) into the upper layers' PDUs. */
117 /* TODO: remove user-data requirements */
118 NET_BUF_POOL_FIXED_DEFINE(fragments, CONFIG_BT_CONN_FRAG_COUNT, 0,
119 			  CONFIG_BT_CONN_TX_USER_DATA_SIZE, frag_destroy);
120 
121 struct frag_md {
122 	struct bt_buf_view_meta view_meta;
123 };
124 struct frag_md frag_md_pool[CONFIG_BT_CONN_FRAG_COUNT];
125 
get_frag_md(struct net_buf * fragment)126 struct frag_md *get_frag_md(struct net_buf *fragment)
127 {
128 	return &frag_md_pool[net_buf_id(fragment)];
129 }
130 
frag_destroy(struct net_buf * frag)131 void frag_destroy(struct net_buf *frag)
132 {
133 	/* allow next view to be allocated (and unlock the parent buf) */
134 	bt_buf_destroy_view(frag, &get_frag_md(frag)->view_meta);
135 
136 	LOG_DBG("");
137 
138 	/* Kick the TX processor to send the rest of the frags. */
139 	bt_tx_irq_raise();
140 }
141 
get_data_frag(struct net_buf * outside,size_t winsize)142 static struct net_buf *get_data_frag(struct net_buf *outside, size_t winsize)
143 {
144 	struct net_buf *window;
145 
146 	__ASSERT_NO_MSG(!bt_buf_has_view(outside));
147 
148 	/* Keeping a ref is the caller's responsibility */
149 	window = net_buf_alloc_len(&fragments, 0, K_NO_WAIT);
150 	if (!window) {
151 		return window;
152 	}
153 
154 	window = bt_buf_make_view(window, outside,
155 				  winsize, &get_frag_md(window)->view_meta);
156 
157 	LOG_DBG("get-acl-frag: outside %p window %p size %d", outside, window, winsize);
158 
159 	return window;
160 }
161 #else /* !CONFIG_BT_CONN_TX */
get_data_frag(struct net_buf * outside,size_t winsize)162 static struct net_buf *get_data_frag(struct net_buf *outside, size_t winsize)
163 {
164 	ARG_UNUSED(outside);
165 	ARG_UNUSED(winsize);
166 
167 	/* This will never get called. It's only to allow compilation to take
168 	 * place and the later linker stage to remove this implementation.
169 	 */
170 
171 	return NULL;
172 }
173 #endif /* CONFIG_BT_CONN_TX */
174 
175 #if defined(CONFIG_BT_ISO)
176 extern struct bt_conn iso_conns[CONFIG_BT_ISO_MAX_CHAN];
177 
178 /* Callback TX buffers for ISO */
179 static struct bt_conn_tx iso_tx[CONFIG_BT_ISO_TX_BUF_COUNT];
180 
bt_conn_iso_init(void)181 int bt_conn_iso_init(void)
182 {
183 	for (size_t i = 0; i < ARRAY_SIZE(iso_tx); i++) {
184 		k_fifo_put(&free_tx, &iso_tx[i]);
185 	}
186 
187 	return 0;
188 }
189 #endif /* CONFIG_BT_ISO */
190 
bt_conn_get_pkts(struct bt_conn * conn)191 struct k_sem *bt_conn_get_pkts(struct bt_conn *conn)
192 {
193 #if defined(CONFIG_BT_CLASSIC)
194 	if (conn->type == BT_CONN_TYPE_BR || !bt_dev.le.acl_mtu) {
195 		return &bt_dev.br.pkts;
196 	}
197 #endif /* CONFIG_BT_CLASSIC */
198 
199 #if defined(CONFIG_BT_ISO)
200 	/* Use ISO pkts semaphore if LE Read Buffer Size command returned
201 	 * dedicated ISO buffers.
202 	 */
203 	if (conn->type == BT_CONN_TYPE_ISO) {
204 		if (bt_dev.le.iso_mtu && bt_dev.le.iso_limit != 0) {
205 			return &bt_dev.le.iso_pkts;
206 		}
207 
208 		return NULL;
209 	}
210 #endif /* CONFIG_BT_ISO */
211 
212 #if defined(CONFIG_BT_CONN)
213 	if (bt_dev.le.acl_mtu) {
214 		return &bt_dev.le.acl_pkts;
215 	}
216 #endif /* CONFIG_BT_CONN */
217 
218 	return NULL;
219 }
220 
state2str(bt_conn_state_t state)221 static inline const char *state2str(bt_conn_state_t state)
222 {
223 	switch (state) {
224 	case BT_CONN_DISCONNECTED:
225 		return "disconnected";
226 	case BT_CONN_DISCONNECT_COMPLETE:
227 		return "disconnect-complete";
228 	case BT_CONN_INITIATING:
229 		return "initiating";
230 	case BT_CONN_SCAN_BEFORE_INITIATING:
231 		return "scan-before-initiating";
232 	case BT_CONN_INITIATING_FILTER_LIST:
233 		return "initiating-filter-list";
234 	case BT_CONN_ADV_CONNECTABLE:
235 		return "adv-connectable";
236 	case BT_CONN_ADV_DIR_CONNECTABLE:
237 		return "adv-dir-connectable";
238 	case BT_CONN_CONNECTED:
239 		return "connected";
240 	case BT_CONN_DISCONNECTING:
241 		return "disconnecting";
242 	default:
243 		return "(unknown)";
244 	}
245 }
246 
tx_free(struct bt_conn_tx * tx)247 static void tx_free(struct bt_conn_tx *tx)
248 {
249 	LOG_DBG("%p", tx);
250 	tx->cb = NULL;
251 	tx->user_data = NULL;
252 	k_fifo_put(&free_tx, tx);
253 }
254 
255 #if defined(CONFIG_BT_CONN_TX)
tx_notify(struct bt_conn * conn)256 static void tx_notify(struct bt_conn *conn)
257 {
258 	__ASSERT_NO_MSG(k_current_get() ==
259 			k_work_queue_thread_get(&k_sys_work_q));
260 
261 	LOG_DBG("conn %p", conn);
262 
263 	while (1) {
264 		struct bt_conn_tx *tx = NULL;
265 		unsigned int key;
266 		bt_conn_tx_cb_t cb;
267 		void *user_data;
268 
269 		key = irq_lock();
270 		if (!sys_slist_is_empty(&conn->tx_complete)) {
271 			const sys_snode_t *node = sys_slist_get_not_empty(&conn->tx_complete);
272 
273 			tx = CONTAINER_OF(node, struct bt_conn_tx, node);
274 		}
275 		irq_unlock(key);
276 
277 		if (!tx) {
278 			return;
279 		}
280 
281 		LOG_DBG("tx %p cb %p user_data %p", tx, tx->cb, tx->user_data);
282 
283 		/* Copy over the params */
284 		cb = tx->cb;
285 		user_data = tx->user_data;
286 
287 		/* Free up TX notify since there may be user waiting */
288 		tx_free(tx);
289 
290 		/* Run the callback, at this point it should be safe to
291 		 * allocate new buffers since the TX should have been
292 		 * unblocked by tx_free.
293 		 */
294 		if (cb) {
295 			cb(conn, user_data, 0);
296 		}
297 
298 		LOG_DBG("raise TX IRQ");
299 		bt_tx_irq_raise();
300 	}
301 }
302 #endif	/* CONFIG_BT_CONN_TX */
303 
bt_conn_new(struct bt_conn * conns,size_t size)304 struct bt_conn *bt_conn_new(struct bt_conn *conns, size_t size)
305 {
306 	struct bt_conn *conn = NULL;
307 	int i;
308 
309 	for (i = 0; i < size; i++) {
310 		if (atomic_cas(&conns[i].ref, 0, 1)) {
311 			conn = &conns[i];
312 			break;
313 		}
314 	}
315 
316 	if (!conn) {
317 		return NULL;
318 	}
319 
320 	(void)memset(conn, 0, offsetof(struct bt_conn, ref));
321 
322 #if defined(CONFIG_BT_CONN)
323 	k_work_init_delayable(&conn->deferred_work, deferred_work);
324 #endif /* CONFIG_BT_CONN */
325 #if defined(CONFIG_BT_CONN_TX)
326 	k_work_init(&conn->tx_complete_work, tx_complete_work);
327 #endif /* CONFIG_BT_CONN_TX */
328 
329 	return conn;
330 }
331 
bt_conn_reset_rx_state(struct bt_conn * conn)332 void bt_conn_reset_rx_state(struct bt_conn *conn)
333 {
334 	if (!conn->rx) {
335 		return;
336 	}
337 
338 	net_buf_unref(conn->rx);
339 	conn->rx = NULL;
340 }
341 
bt_acl_recv(struct bt_conn * conn,struct net_buf * buf,uint8_t flags)342 static void bt_acl_recv(struct bt_conn *conn, struct net_buf *buf, uint8_t flags)
343 {
344 	uint16_t acl_total_len;
345 
346 	bt_acl_set_ncp_sent(buf, false);
347 
348 	/* Check packet boundary flags */
349 	switch (flags) {
350 	case BT_ACL_START:
351 		if (conn->rx) {
352 			LOG_ERR("Unexpected first L2CAP frame");
353 			bt_conn_reset_rx_state(conn);
354 		}
355 
356 		LOG_DBG("First, len %u final %u", buf->len,
357 			(buf->len < sizeof(uint16_t)) ? 0 : sys_get_le16(buf->data));
358 
359 		conn->rx = net_buf_ref(buf);
360 		break;
361 	case BT_ACL_CONT:
362 		if (!conn->rx) {
363 			LOG_ERR("Unexpected L2CAP continuation");
364 			bt_conn_reset_rx_state(conn);
365 			net_buf_unref(buf);
366 			return;
367 		}
368 
369 		if (!buf->len) {
370 			LOG_DBG("Empty ACL_CONT");
371 			net_buf_unref(buf);
372 			return;
373 		}
374 
375 		if (buf->len > net_buf_tailroom(conn->rx)) {
376 			LOG_ERR("Not enough buffer space for L2CAP data");
377 
378 			/* Frame is not complete but we still pass it to L2CAP
379 			 * so that it may handle error on protocol level
380 			 * eg disconnect channel.
381 			 */
382 			bt_l2cap_recv(conn, conn->rx, false);
383 			conn->rx = NULL;
384 			net_buf_unref(buf);
385 			return;
386 		}
387 
388 		net_buf_add_mem(conn->rx, buf->data, buf->len);
389 		break;
390 	default:
391 		/* BT_ACL_START_NO_FLUSH and BT_ACL_COMPLETE are not allowed on
392 		 * LE-U from Controller to Host.
393 		 * Only BT_ACL_POINT_TO_POINT is supported.
394 		 */
395 		LOG_ERR("Unexpected ACL flags (0x%02x)", flags);
396 		bt_conn_reset_rx_state(conn);
397 		net_buf_unref(buf);
398 		return;
399 	}
400 
401 	if (conn->rx->len < sizeof(uint16_t)) {
402 		/* Still not enough data received to retrieve the L2CAP header
403 		 * length field.
404 		 */
405 		bt_send_one_host_num_completed_packets(conn->handle);
406 		bt_acl_set_ncp_sent(buf, true);
407 		net_buf_unref(buf);
408 
409 		return;
410 	}
411 
412 	acl_total_len = sys_get_le16(conn->rx->data) + sizeof(struct bt_l2cap_hdr);
413 
414 	if (conn->rx->len < acl_total_len) {
415 		/* L2CAP frame not complete. */
416 		bt_send_one_host_num_completed_packets(conn->handle);
417 		bt_acl_set_ncp_sent(buf, true);
418 		net_buf_unref(buf);
419 
420 		return;
421 	}
422 
423 	net_buf_unref(buf);
424 
425 	if (conn->rx->len > acl_total_len) {
426 		LOG_ERR("ACL len mismatch (%u > %u)", conn->rx->len, acl_total_len);
427 		bt_conn_reset_rx_state(conn);
428 		return;
429 	}
430 
431 	/* L2CAP frame complete. */
432 	buf = conn->rx;
433 	conn->rx = NULL;
434 
435 	__ASSERT(buf->ref == 1, "buf->ref %d", buf->ref);
436 
437 	LOG_DBG("Successfully parsed %u byte L2CAP packet", buf->len);
438 	bt_l2cap_recv(conn, buf, true);
439 }
440 
wait_for_tx_work(struct bt_conn * conn)441 static void wait_for_tx_work(struct bt_conn *conn)
442 {
443 #if defined(CONFIG_BT_CONN_TX)
444 	LOG_DBG("conn %p", conn);
445 
446 	if (IS_ENABLED(CONFIG_BT_RECV_WORKQ_SYS) ||
447 	    k_current_get() == k_work_queue_thread_get(&k_sys_work_q)) {
448 		tx_notify(conn);
449 	} else {
450 		struct k_work_sync sync;
451 		int err;
452 
453 		err = k_work_submit(&conn->tx_complete_work);
454 		__ASSERT(err >= 0, "couldn't submit (err %d)", err);
455 
456 		k_work_flush(&conn->tx_complete_work, &sync);
457 	}
458 	LOG_DBG("done");
459 #else
460 	ARG_UNUSED(conn);
461 #endif	/* CONFIG_BT_CONN_TX */
462 }
463 
bt_conn_recv(struct bt_conn * conn,struct net_buf * buf,uint8_t flags)464 void bt_conn_recv(struct bt_conn *conn, struct net_buf *buf, uint8_t flags)
465 {
466 	/* Make sure we notify any pending TX callbacks before processing
467 	 * new data for this connection.
468 	 *
469 	 * Always do so from the same context for sanity. In this case that will
470 	 * be the system workqueue.
471 	 */
472 	wait_for_tx_work(conn);
473 
474 	LOG_DBG("handle %u len %u flags %02x", conn->handle, buf->len, flags);
475 
476 	if (IS_ENABLED(CONFIG_BT_ISO_RX) && conn->type == BT_CONN_TYPE_ISO) {
477 		bt_iso_recv(conn, buf, flags);
478 		return;
479 	} else if (IS_ENABLED(CONFIG_BT_CONN)) {
480 		bt_acl_recv(conn, buf, flags);
481 	} else {
482 		__ASSERT(false, "Invalid connection type %u", conn->type);
483 	}
484 }
485 
dont_have_tx_context(struct bt_conn * conn)486 static bool dont_have_tx_context(struct bt_conn *conn)
487 {
488 	return k_fifo_is_empty(&free_tx);
489 }
490 
conn_tx_alloc(void)491 static struct bt_conn_tx *conn_tx_alloc(void)
492 {
493 	struct bt_conn_tx *ret = k_fifo_get(&free_tx, K_NO_WAIT);
494 
495 	LOG_DBG("%p", ret);
496 
497 	return ret;
498 }
499 
500 enum {
501 	FRAG_START,
502 	FRAG_CONT,
503 	FRAG_SINGLE,
504 	FRAG_END
505 };
506 
send_acl(struct bt_conn * conn,struct net_buf * buf,uint8_t flags)507 static int send_acl(struct bt_conn *conn, struct net_buf *buf, uint8_t flags)
508 {
509 	struct bt_hci_acl_hdr *hdr;
510 
511 	switch (flags) {
512 	case FRAG_START:
513 	case FRAG_SINGLE:
514 		flags = BT_ACL_START_NO_FLUSH;
515 		break;
516 	case FRAG_CONT:
517 	case FRAG_END:
518 		flags = BT_ACL_CONT;
519 		break;
520 	default:
521 		return -EINVAL;
522 	}
523 
524 	hdr = net_buf_push(buf, sizeof(*hdr));
525 	hdr->handle = sys_cpu_to_le16(bt_acl_handle_pack(conn->handle, flags));
526 	hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
527 
528 	bt_buf_set_type(buf, BT_BUF_ACL_OUT);
529 
530 	return bt_send(buf);
531 }
532 
send_iso(struct bt_conn * conn,struct net_buf * buf,uint8_t flags)533 static int send_iso(struct bt_conn *conn, struct net_buf *buf, uint8_t flags)
534 {
535 	struct bt_hci_iso_hdr *hdr;
536 	enum bt_iso_timestamp ts;
537 
538 	switch (flags) {
539 	case FRAG_START:
540 		flags = BT_ISO_START;
541 		break;
542 	case FRAG_CONT:
543 		flags = BT_ISO_CONT;
544 		break;
545 	case FRAG_SINGLE:
546 		flags = BT_ISO_SINGLE;
547 		break;
548 	case FRAG_END:
549 		flags = BT_ISO_END;
550 		break;
551 	default:
552 		return -EINVAL;
553 	}
554 
555 	/* The TS bit is set by `iso.c:conn_iso_send`. This special byte
556 	 * prepends the whole SDU, and won't be there for individual fragments.
557 	 *
558 	 * Conveniently, it is only legal to set the TS bit on the first HCI
559 	 * fragment, so we don't have to pass this extra metadata around for
560 	 * every fragment, only the first one.
561 	 */
562 	if (flags == BT_ISO_SINGLE || flags == BT_ISO_START) {
563 		ts = (enum bt_iso_timestamp)net_buf_pull_u8(buf);
564 	} else {
565 		ts = BT_ISO_TS_ABSENT;
566 	}
567 
568 	hdr = net_buf_push(buf, sizeof(*hdr));
569 	hdr->handle = sys_cpu_to_le16(bt_iso_handle_pack(conn->handle, flags, ts));
570 	hdr->len = sys_cpu_to_le16(buf->len - sizeof(*hdr));
571 
572 	bt_buf_set_type(buf, BT_BUF_ISO_OUT);
573 
574 	return bt_send(buf);
575 }
576 
conn_mtu(struct bt_conn * conn)577 static inline uint16_t conn_mtu(struct bt_conn *conn)
578 {
579 #if defined(CONFIG_BT_CLASSIC)
580 	if (conn->type == BT_CONN_TYPE_BR ||
581 	    (conn->type != BT_CONN_TYPE_ISO && !bt_dev.le.acl_mtu)) {
582 		return bt_dev.br.mtu;
583 	}
584 #endif /* CONFIG_BT_CLASSIC */
585 #if defined(CONFIG_BT_ISO)
586 	if (conn->type == BT_CONN_TYPE_ISO) {
587 		return bt_dev.le.iso_mtu;
588 	}
589 #endif /* CONFIG_BT_ISO */
590 #if defined(CONFIG_BT_CONN)
591 	return bt_dev.le.acl_mtu;
592 #else
593 	return 0;
594 #endif /* CONFIG_BT_CONN */
595 }
596 
is_classic_conn(struct bt_conn * conn)597 static bool is_classic_conn(struct bt_conn *conn)
598 {
599 	return (IS_ENABLED(CONFIG_BT_CLASSIC) &&
600 		conn->type == BT_CONN_TYPE_BR);
601 }
602 
is_iso_tx_conn(struct bt_conn * conn)603 static bool is_iso_tx_conn(struct bt_conn *conn)
604 {
605 	return IS_ENABLED(CONFIG_BT_ISO_TX) &&
606 		conn->type == BT_CONN_TYPE_ISO;
607 }
608 
is_le_conn(struct bt_conn * conn)609 static bool is_le_conn(struct bt_conn *conn)
610 {
611 	return IS_ENABLED(CONFIG_BT_CONN) && conn->type == BT_CONN_TYPE_LE;
612 }
613 
is_acl_conn(struct bt_conn * conn)614 static bool is_acl_conn(struct bt_conn *conn)
615 {
616 	return is_le_conn(conn) || is_classic_conn(conn);
617 }
618 
send_buf(struct bt_conn * conn,struct net_buf * buf,size_t len,void * cb,void * ud)619 static int send_buf(struct bt_conn *conn, struct net_buf *buf,
620 		    size_t len, void *cb, void *ud)
621 {
622 	struct net_buf *frag = NULL;
623 	struct bt_conn_tx *tx = NULL;
624 	uint8_t flags;
625 	int err;
626 
627 	if (buf->len == 0) {
628 		__ASSERT_NO_MSG(0);
629 
630 		return -EMSGSIZE;
631 	}
632 
633 	if (bt_buf_has_view(buf)) {
634 		__ASSERT_NO_MSG(0);
635 
636 		return -EIO;
637 	}
638 
639 	LOG_DBG("conn %p buf %p len %u buf->len %u cb %p ud %p",
640 		conn, buf, len, buf->len, cb, ud);
641 
642 	/* Acquire the right to send 1 packet to the controller */
643 	if (k_sem_take(bt_conn_get_pkts(conn), K_NO_WAIT)) {
644 		/* This shouldn't happen now that we acquire the resources
645 		 * before calling `send_buf` (in `get_conn_ready`). We say
646 		 * "acquire" as `tx_processor()` is not re-entrant and the
647 		 * thread is non-preemptible. So the sem value shouldn't change.
648 		 */
649 		__ASSERT(0, "No controller bufs");
650 
651 		return -ENOMEM;
652 	}
653 
654 	/* Allocate and set the TX context */
655 	tx = conn_tx_alloc();
656 
657 	/* See big comment above */
658 	if (!tx) {
659 		__ASSERT(0, "No TX context");
660 
661 		return -ENOMEM;
662 	}
663 
664 	tx->cb = cb;
665 	tx->user_data = ud;
666 
667 	uint16_t frag_len = MIN(conn_mtu(conn), len);
668 
669 	__ASSERT_NO_MSG(buf->ref == 1);
670 
671 	if (buf->len > frag_len) {
672 		LOG_DBG("keep %p around", buf);
673 		frag = get_data_frag(net_buf_ref(buf), frag_len);
674 	} else {
675 		LOG_DBG("move %p ref in", buf);
676 		/* Move the ref into `frag` for the last TX. That way `buf` will
677 		 * get destroyed when `frag` is destroyed.
678 		 */
679 		frag = get_data_frag(buf, frag_len);
680 	}
681 
682 	/* Caller is supposed to check we have all resources to send */
683 	__ASSERT_NO_MSG(frag != NULL);
684 
685 	/* If the current buffer doesn't fit a controller buffer */
686 	if (len > conn_mtu(conn)) {
687 		flags = conn->next_is_frag ? FRAG_CONT : FRAG_START;
688 		conn->next_is_frag = true;
689 	} else {
690 		flags = conn->next_is_frag ? FRAG_END : FRAG_SINGLE;
691 		conn->next_is_frag = false;
692 	}
693 
694 	LOG_DBG("send frag: buf %p len %d", buf, frag_len);
695 
696 	/* At this point, the buffer is either a fragment or a full HCI packet.
697 	 * The flags are also valid.
698 	 */
699 	LOG_DBG("conn %p buf %p len %u flags 0x%02x",
700 		conn, frag, frag->len, flags);
701 
702 	/* Keep track of sent buffers. We have to append _before_
703 	 * sending, as we might get pre-empted if the HCI driver calls
704 	 * k_yield() before returning.
705 	 *
706 	 * In that case, the controller could also send a num-complete-packets
707 	 * event and our handler will be confused that there is no corresponding
708 	 * callback node in the `tx_pending` list.
709 	 */
710 	atomic_inc(&conn->in_ll);
711 	sys_slist_append(&conn->tx_pending, &tx->node);
712 
713 	if (is_iso_tx_conn(conn)) {
714 		err = send_iso(conn, frag, flags);
715 	} else if (is_acl_conn(conn)) {
716 		err = send_acl(conn, frag, flags);
717 	} else {
718 		err = -EINVAL;	/* Some animals disable asserts (╯°□°)╯︵ ┻━┻ */
719 		__ASSERT(false, "Invalid connection type %u", conn->type);
720 	}
721 
722 	if (!err) {
723 		return 0;
724 	}
725 
726 	/* Remove buf from pending list */
727 	atomic_dec(&conn->in_ll);
728 	(void)sys_slist_find_and_remove(&conn->tx_pending, &tx->node);
729 
730 	LOG_ERR("Unable to send to driver (err %d)", err);
731 
732 	/* If we get here, something has seriously gone wrong: the `parent` buf
733 	 * (of which the current fragment belongs) should also be destroyed.
734 	 */
735 	net_buf_unref(frag);
736 
737 	/* `buf` might not get destroyed right away, and its `tx`
738 	 * pointer will still be reachable. Make sure that we don't try
739 	 * to use the destroyed context later.
740 	 */
741 	conn_tx_destroy(conn, tx);
742 	k_sem_give(bt_conn_get_pkts(conn));
743 
744 	/* Merge HCI driver errors */
745 	return -EIO;
746 }
747 
748 static struct k_poll_signal conn_change =
749 		K_POLL_SIGNAL_INITIALIZER(conn_change);
750 
conn_destroy(struct bt_conn * conn,void * data)751 static void conn_destroy(struct bt_conn *conn, void *data)
752 {
753 	if (conn->state == BT_CONN_CONNECTED ||
754 	    conn->state == BT_CONN_DISCONNECTING) {
755 		bt_conn_set_state(conn, BT_CONN_DISCONNECT_COMPLETE);
756 	}
757 
758 	if (conn->state != BT_CONN_DISCONNECTED) {
759 		bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
760 	}
761 }
762 
bt_conn_cleanup_all(void)763 void bt_conn_cleanup_all(void)
764 {
765 	bt_conn_foreach(BT_CONN_TYPE_ALL, conn_destroy, NULL);
766 }
767 
768 #if defined(CONFIG_BT_CONN)
769 /* Returns true if L2CAP has data to send on this conn */
acl_has_data(struct bt_conn * conn)770 static bool acl_has_data(struct bt_conn *conn)
771 {
772 	return sys_slist_peek_head(&conn->l2cap_data_ready) != NULL;
773 }
774 #endif	/* defined(CONFIG_BT_CONN) */
775 
776 /* Connection "Scheduler" of sorts:
777  *
778  * Will try to get the optimal number of queued buffers for the connection.
779  *
780  * Partitions the controller's buffers to each connection according to some
781  * heuristic. This is made to be tunable, fairness, simplicity, throughput etc.
782  *
783  * In the future, this will be a hook exposed to the application.
784  */
should_stop_tx(struct bt_conn * conn)785 static bool should_stop_tx(struct bt_conn *conn)
786 {
787 	LOG_DBG("%p", conn);
788 
789 	if (conn->state != BT_CONN_CONNECTED) {
790 		return true;
791 	}
792 
793 	/* TODO: This function should be overridable by the application: they
794 	 * should be able to provide their own heuristic.
795 	 */
796 	if (!conn->has_data(conn)) {
797 		LOG_DBG("No more data for %p", conn);
798 		return true;
799 	}
800 
801 	/* Queue only 3 buffers per-conn for now */
802 	if (atomic_get(&conn->in_ll) < 3) {
803 		/* The goal of this heuristic is to allow the link-layer to
804 		 * extend an ACL connection event as long as the application
805 		 * layer can provide data.
806 		 *
807 		 * Here we chose three buffers, as some LLs need two enqueued
808 		 * packets to be able to set the more-data bit, and one more
809 		 * buffer to allow refilling by the app while one of them is
810 		 * being sent over-the-air.
811 		 */
812 		return false;
813 	}
814 
815 	return true;
816 }
817 
bt_conn_data_ready(struct bt_conn * conn)818 void bt_conn_data_ready(struct bt_conn *conn)
819 {
820 	LOG_DBG("DR");
821 
822 	/* The TX processor will call the `pull_cb` to get the buf */
823 	if (!atomic_set(&conn->_conn_ready_lock, 1)) {
824 		/* Attach a reference to the `bt_dev.le.conn_ready` list.
825 		 *
826 		 * This reference will be consumed when the conn is popped off
827 		 * the list (in `get_conn_ready`).
828 		 */
829 		bt_conn_ref(conn);
830 		sys_slist_append(&bt_dev.le.conn_ready,
831 				 &conn->_conn_ready);
832 		LOG_DBG("raised");
833 	} else {
834 		LOG_DBG("already in list");
835 	}
836 
837 	/* Kick the TX processor */
838 	bt_tx_irq_raise();
839 }
840 
cannot_send_to_controller(struct bt_conn * conn)841 static bool cannot_send_to_controller(struct bt_conn *conn)
842 {
843 	return k_sem_count_get(bt_conn_get_pkts(conn)) == 0;
844 }
845 
dont_have_viewbufs(void)846 static bool dont_have_viewbufs(void)
847 {
848 #if defined(CONFIG_BT_CONN_TX)
849 	/* The LIFO only tracks buffers that have been destroyed at least once,
850 	 * hence the uninit check beforehand.
851 	 */
852 	if (fragments.uninit_count > 0) {
853 		/* If there are uninitialized bufs, we are guaranteed allocation. */
854 		return false;
855 	}
856 
857 	/* In practice k_fifo == k_lifo ABI. */
858 	return k_fifo_is_empty(&fragments.free);
859 
860 #else  /* !CONFIG_BT_CONN_TX */
861 	return false;
862 #endif	/* CONFIG_BT_CONN_TX */
863 }
864 
dont_have_methods(struct bt_conn * conn)865 __maybe_unused static bool dont_have_methods(struct bt_conn *conn)
866 {
867 	return (conn->tx_data_pull == NULL) ||
868 		(conn->get_and_clear_cb == NULL) ||
869 		(conn->has_data == NULL);
870 }
871 
get_conn_ready(void)872 struct bt_conn *get_conn_ready(void)
873 {
874 	/* Here we only peek: we pop the conn (and insert it at the back if it
875 	 * still has data) after the QoS function returns false.
876 	 */
877 	sys_snode_t *node  = sys_slist_peek_head(&bt_dev.le.conn_ready);
878 
879 	if (node == NULL) {
880 		return NULL;
881 	}
882 
883 	/* `conn` borrows from the list node. That node is _not_ popped yet.
884 	 *
885 	 * If we end up not popping that conn off the list, we have to make sure
886 	 * to increase the refcount before returning a pointer to that
887 	 * connection out of this function.
888 	 */
889 	struct bt_conn *conn = CONTAINER_OF(node, struct bt_conn, _conn_ready);
890 
891 	if (dont_have_viewbufs()) {
892 		/* We will get scheduled again when the (view) buffers are freed. If you
893 		 * hit this a lot, try increasing `CONFIG_BT_CONN_FRAG_COUNT`
894 		 */
895 		LOG_DBG("no view bufs");
896 		return NULL;
897 	}
898 
899 	if (cannot_send_to_controller(conn)) {
900 		/* We will get scheduled again when the buffers are freed. */
901 		LOG_DBG("no LL bufs for %p", conn);
902 		return NULL;
903 	}
904 
905 	if (dont_have_tx_context(conn)) {
906 		/* We will get scheduled again when TX contexts are available. */
907 		LOG_DBG("no TX contexts");
908 		return NULL;
909 	}
910 
911 	CHECKIF(dont_have_methods(conn)) {
912 		LOG_DBG("conn %p (type %d) is missing mandatory methods",
913 			conn, conn->type);
914 
915 		return NULL;
916 	}
917 
918 	if (should_stop_tx(conn)) {
919 		/* Move reference off the list and into the `conn` variable. */
920 		__maybe_unused sys_snode_t *s = sys_slist_get(&bt_dev.le.conn_ready);
921 
922 		__ASSERT_NO_MSG(s == node);
923 
924 		(void)atomic_set(&conn->_conn_ready_lock, 0);
925 		/* Note: we can't assert `old` is non-NULL here, as the
926 		 * connection might have been marked ready by an l2cap channel
927 		 * that cancelled its request to send.
928 		 */
929 
930 		/* Append connection to list if it still has data */
931 		if (conn->has_data(conn)) {
932 			LOG_DBG("appending %p to back of TX queue", conn);
933 			bt_conn_data_ready(conn);
934 		}
935 
936 		return conn;
937 	}
938 
939 	return bt_conn_ref(conn);
940 }
941 
942 /* Crazy that this file is compiled even if this is not true, but here we are. */
943 #if defined(CONFIG_BT_CONN)
acl_get_and_clear_cb(struct bt_conn * conn,struct net_buf * buf,bt_conn_tx_cb_t * cb,void ** ud)944 static void acl_get_and_clear_cb(struct bt_conn *conn, struct net_buf *buf,
945 				 bt_conn_tx_cb_t *cb, void **ud)
946 {
947 	__ASSERT_NO_MSG(is_acl_conn(conn));
948 
949 	*cb = closure_cb(buf->user_data);
950 	*ud = closure_data(buf->user_data);
951 	memset(buf->user_data, 0, buf->user_data_size);
952 }
953 #endif	/* defined(CONFIG_BT_CONN) */
954 
955 /* Acts as a "null-routed" bt_send(). This fn will decrease the refcount of
956  * `buf` and call the user callback with an error code.
957  */
destroy_and_callback(struct bt_conn * conn,struct net_buf * buf,bt_conn_tx_cb_t cb,void * ud)958 static void destroy_and_callback(struct bt_conn *conn,
959 				 struct net_buf *buf,
960 				 bt_conn_tx_cb_t cb,
961 				 void *ud)
962 {
963 	if (!cb) {
964 		conn->get_and_clear_cb(conn, buf, &cb, &ud);
965 	}
966 
967 	LOG_DBG("pop: cb %p userdata %p", cb, ud);
968 
969 	/* bt_send() would've done an unref. Do it here also, so the buffer is
970 	 * hopefully destroyed and the user callback can allocate a new one.
971 	 */
972 	net_buf_unref(buf);
973 
974 	if (cb) {
975 		cb(conn, ud, -ESHUTDOWN);
976 	}
977 }
978 
979 static volatile bool _suspend_tx;
980 
981 #if defined(CONFIG_BT_TESTING)
bt_conn_suspend_tx(bool suspend)982 void bt_conn_suspend_tx(bool suspend)
983 {
984 	_suspend_tx = suspend;
985 
986 	LOG_DBG("%sing all data TX", suspend ? "suspend" : "resum");
987 
988 	bt_tx_irq_raise();
989 }
990 #endif	/* CONFIG_BT_TESTING */
991 
bt_conn_tx_processor(void)992 void bt_conn_tx_processor(void)
993 {
994 	LOG_DBG("start");
995 	struct bt_conn *conn;
996 	struct net_buf *buf;
997 	bt_conn_tx_cb_t cb = NULL;
998 	size_t buf_len;
999 	void *ud = NULL;
1000 
1001 	if (!IS_ENABLED(CONFIG_BT_CONN_TX)) {
1002 		/* Mom, can we have a real compiler? */
1003 		return;
1004 	}
1005 
1006 	if (IS_ENABLED(CONFIG_BT_TESTING) && _suspend_tx) {
1007 		return;
1008 	}
1009 
1010 	conn = get_conn_ready();
1011 
1012 	if (!conn) {
1013 		LOG_DBG("no connection wants to do stuff");
1014 		return;
1015 	}
1016 
1017 	LOG_DBG("processing conn %p", conn);
1018 
1019 	if (conn->state != BT_CONN_CONNECTED) {
1020 		LOG_WRN("conn %p: not connected", conn);
1021 
1022 		/* Call the user callbacks & destroy (final-unref) the buffers
1023 		 * we were supposed to send.
1024 		 */
1025 		buf = conn->tx_data_pull(conn, SIZE_MAX, &buf_len);
1026 		while (buf) {
1027 			destroy_and_callback(conn, buf, cb, ud);
1028 			buf = conn->tx_data_pull(conn, SIZE_MAX, &buf_len);
1029 		}
1030 
1031 		goto exit;
1032 	}
1033 
1034 	/* now that we are guaranteed resources, we can pull data from the upper
1035 	 * layer (L2CAP or ISO).
1036 	 */
1037 	buf = conn->tx_data_pull(conn, conn_mtu(conn), &buf_len);
1038 	if (!buf) {
1039 		/* Either there is no more data, or the buffer is already in-use
1040 		 * by a view on it. In both cases, the TX processor will be
1041 		 * triggered again, either by the view's destroy callback, or by
1042 		 * the upper layer when it has more data.
1043 		 */
1044 		LOG_DBG("no buf returned");
1045 
1046 		goto exit;
1047 	}
1048 
1049 	bool last_buf = conn_mtu(conn) >= buf_len;
1050 
1051 	if (last_buf) {
1052 		/* Only pull the callback info from the last buffer.
1053 		 * We still allocate one TX context per-fragment though.
1054 		 */
1055 		conn->get_and_clear_cb(conn, buf, &cb, &ud);
1056 		LOG_DBG("pop: cb %p userdata %p", cb, ud);
1057 	}
1058 
1059 	LOG_DBG("TX process: conn %p buf %p (%s)",
1060 		conn, buf, last_buf ? "last" : "frag");
1061 
1062 	int err = send_buf(conn, buf, buf_len, cb, ud);
1063 
1064 	if (err) {
1065 		/* -EIO means `unrecoverable error`. It can be an assertion that
1066 		 *  failed or an error from the HCI driver.
1067 		 *
1068 		 * -ENOMEM means we thought we had all the resources to send the
1069 		 *  buf (ie. TX context + controller buffer) but one of them was
1070 		 *  not available. This is likely due to a failure of
1071 		 *  assumption, likely that we have been pre-empted somehow and
1072 		 *  that `tx_processor()` has been re-entered.
1073 		 *
1074 		 *  In both cases, we destroy the buffer and mark the connection
1075 		 *  as dead.
1076 		 */
1077 		LOG_ERR("Fatal error (%d). Disconnecting %p", err, conn);
1078 		destroy_and_callback(conn, buf, cb, ud);
1079 		bt_conn_disconnect(conn, BT_HCI_ERR_REMOTE_USER_TERM_CONN);
1080 
1081 		goto exit;
1082 	}
1083 
1084 	/* Always kick the TX work. It will self-suspend if it doesn't get
1085 	 * resources or there is nothing left to send.
1086 	 */
1087 	bt_tx_irq_raise();
1088 
1089 exit:
1090 	/* Give back the ref that `get_conn_ready()` gave us */
1091 	bt_conn_unref(conn);
1092 }
1093 
process_unack_tx(struct bt_conn * conn)1094 static void process_unack_tx(struct bt_conn *conn)
1095 {
1096 	LOG_DBG("%p", conn);
1097 
1098 	/* Return any unacknowledged packets */
1099 	while (1) {
1100 		struct bt_conn_tx *tx;
1101 		sys_snode_t *node;
1102 
1103 		node = sys_slist_get(&conn->tx_pending);
1104 
1105 		if (!node) {
1106 			return;
1107 		}
1108 
1109 		tx = CONTAINER_OF(node, struct bt_conn_tx, node);
1110 
1111 		conn_tx_destroy(conn, tx);
1112 		k_sem_give(bt_conn_get_pkts(conn));
1113 	}
1114 }
1115 
conn_lookup_handle(struct bt_conn * conns,size_t size,uint16_t handle)1116 struct bt_conn *conn_lookup_handle(struct bt_conn *conns, size_t size,
1117 				   uint16_t handle)
1118 {
1119 	int i;
1120 
1121 	for (i = 0; i < size; i++) {
1122 		struct bt_conn *conn = bt_conn_ref(&conns[i]);
1123 
1124 		if (!conn) {
1125 			continue;
1126 		}
1127 
1128 		/* We only care about connections with a valid handle */
1129 		if (!bt_conn_is_handle_valid(conn)) {
1130 			bt_conn_unref(conn);
1131 			continue;
1132 		}
1133 
1134 		if (conn->handle != handle) {
1135 			bt_conn_unref(conn);
1136 			continue;
1137 		}
1138 
1139 		return conn;
1140 	}
1141 
1142 	return NULL;
1143 }
1144 
bt_conn_set_state(struct bt_conn * conn,bt_conn_state_t state)1145 void bt_conn_set_state(struct bt_conn *conn, bt_conn_state_t state)
1146 {
1147 	bt_conn_state_t old_state;
1148 
1149 	LOG_DBG("%s -> %s", state2str(conn->state), state2str(state));
1150 
1151 	if (conn->state == state) {
1152 		LOG_WRN("no transition %s", state2str(state));
1153 		return;
1154 	}
1155 
1156 	old_state = conn->state;
1157 	conn->state = state;
1158 
1159 	/* Actions needed for exiting the old state */
1160 	switch (old_state) {
1161 	case BT_CONN_DISCONNECTED:
1162 		/* Take a reference for the first state transition after
1163 		 * bt_conn_add_le() and keep it until reaching DISCONNECTED
1164 		 * again.
1165 		 */
1166 		if (conn->type != BT_CONN_TYPE_ISO) {
1167 			bt_conn_ref(conn);
1168 		}
1169 		break;
1170 	case BT_CONN_INITIATING:
1171 		if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1172 		    conn->type == BT_CONN_TYPE_LE) {
1173 			k_work_cancel_delayable(&conn->deferred_work);
1174 		}
1175 		break;
1176 	default:
1177 		break;
1178 	}
1179 
1180 	/* Actions needed for entering the new state */
1181 	switch (conn->state) {
1182 	case BT_CONN_CONNECTED:
1183 		if (conn->type == BT_CONN_TYPE_SCO) {
1184 			if (IS_ENABLED(CONFIG_BT_CLASSIC)) {
1185 				bt_sco_connected(conn);
1186 			}
1187 			break;
1188 		}
1189 		k_poll_signal_raise(&conn_change, 0);
1190 
1191 		if (IS_ENABLED(CONFIG_BT_ISO) &&
1192 		    conn->type == BT_CONN_TYPE_ISO) {
1193 			bt_iso_connected(conn);
1194 			break;
1195 		}
1196 
1197 #if defined(CONFIG_BT_CONN)
1198 		sys_slist_init(&conn->channels);
1199 
1200 		if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
1201 		    conn->role == BT_CONN_ROLE_PERIPHERAL) {
1202 
1203 #if defined(CONFIG_BT_GAP_AUTO_UPDATE_CONN_PARAMS)
1204 			if (conn->type == BT_CONN_TYPE_LE) {
1205 				conn->le.conn_param_retry_countdown =
1206 					CONFIG_BT_CONN_PARAM_RETRY_COUNT;
1207 			}
1208 #endif /* CONFIG_BT_GAP_AUTO_UPDATE_CONN_PARAMS */
1209 
1210 			k_work_schedule(&conn->deferred_work,
1211 					CONN_UPDATE_TIMEOUT);
1212 		}
1213 #endif /* CONFIG_BT_CONN */
1214 
1215 		break;
1216 	case BT_CONN_DISCONNECTED:
1217 #if defined(CONFIG_BT_CONN)
1218 		if (conn->type == BT_CONN_TYPE_SCO) {
1219 			if (IS_ENABLED(CONFIG_BT_CLASSIC)) {
1220 				bt_sco_disconnected(conn);
1221 			}
1222 			bt_conn_unref(conn);
1223 			break;
1224 		}
1225 
1226 		/* Notify disconnection and queue a dummy buffer to wake
1227 		 * up and stop the tx thread for states where it was
1228 		 * running.
1229 		 */
1230 		switch (old_state) {
1231 		case BT_CONN_DISCONNECT_COMPLETE:
1232 			wait_for_tx_work(conn);
1233 
1234 			LOG_DBG("trigger disconnect work");
1235 			k_work_reschedule(&conn->deferred_work, K_NO_WAIT);
1236 
1237 			/* The last ref will be dropped during cleanup */
1238 			break;
1239 		case BT_CONN_INITIATING:
1240 			/* LE Create Connection command failed. This might be
1241 			 * directly from the API, don't notify application in
1242 			 * this case.
1243 			 */
1244 			if (conn->err) {
1245 				notify_connected(conn);
1246 			}
1247 
1248 			bt_conn_unref(conn);
1249 			break;
1250 		case BT_CONN_SCAN_BEFORE_INITIATING:
1251 			/* This indicates that connection establishment
1252 			 * has been stopped. This could either be triggered by
1253 			 * the application through bt_conn_disconnect or by
1254 			 * timeout set by bt_conn_le_create_param.timeout.
1255 			 */
1256 			if (conn->err) {
1257 				notify_connected(conn);
1258 			}
1259 
1260 			bt_conn_unref(conn);
1261 			break;
1262 		case BT_CONN_ADV_DIR_CONNECTABLE:
1263 			/* this indicate Directed advertising stopped */
1264 			if (conn->err) {
1265 				notify_connected(conn);
1266 			}
1267 
1268 			bt_conn_unref(conn);
1269 			break;
1270 		case BT_CONN_INITIATING_FILTER_LIST:
1271 			/* this indicates LE Create Connection with filter
1272 			 * policy has been stopped. This can only be triggered
1273 			 * by the application, so don't notify.
1274 			 */
1275 			bt_conn_unref(conn);
1276 			break;
1277 		case BT_CONN_ADV_CONNECTABLE:
1278 			/* This can only happen when application stops the
1279 			 * advertiser, conn->err is never set in this case.
1280 			 */
1281 			bt_conn_unref(conn);
1282 			break;
1283 		case BT_CONN_CONNECTED:
1284 		case BT_CONN_DISCONNECTING:
1285 		case BT_CONN_DISCONNECTED:
1286 			/* Cannot happen. */
1287 			LOG_WRN("Invalid (%u) old state", state);
1288 			break;
1289 		}
1290 		break;
1291 	case BT_CONN_INITIATING_FILTER_LIST:
1292 		break;
1293 	case BT_CONN_ADV_CONNECTABLE:
1294 		break;
1295 	case BT_CONN_SCAN_BEFORE_INITIATING:
1296 		break;
1297 	case BT_CONN_ADV_DIR_CONNECTABLE:
1298 		break;
1299 	case BT_CONN_INITIATING:
1300 		if (conn->type == BT_CONN_TYPE_SCO) {
1301 			break;
1302 		}
1303 		/*
1304 		 * Timer is needed only for LE. For other link types controller
1305 		 * will handle connection timeout.
1306 		 */
1307 		if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1308 		    conn->type == BT_CONN_TYPE_LE &&
1309 		    bt_dev.create_param.timeout != 0) {
1310 			k_work_schedule(&conn->deferred_work,
1311 					K_MSEC(10 * bt_dev.create_param.timeout));
1312 		}
1313 
1314 		break;
1315 	case BT_CONN_DISCONNECTING:
1316 		break;
1317 #endif /* CONFIG_BT_CONN */
1318 	case BT_CONN_DISCONNECT_COMPLETE:
1319 		if (conn->err == BT_HCI_ERR_CONN_FAIL_TO_ESTAB) {
1320 			/* No ACK or data was ever received. The peripheral may be
1321 			 * unaware of the connection attempt.
1322 			 *
1323 			 * Beware of confusing higher layer errors. Anything that looks
1324 			 * like it's from the remote is synthetic.
1325 			 */
1326 			LOG_WRN("conn %p failed to establish. RF noise?", conn);
1327 		}
1328 
1329 		process_unack_tx(conn);
1330 		break;
1331 	default:
1332 		LOG_WRN("no valid (%u) state was set", state);
1333 
1334 		break;
1335 	}
1336 }
1337 
bt_conn_lookup_handle(uint16_t handle,enum bt_conn_type type)1338 struct bt_conn *bt_conn_lookup_handle(uint16_t handle, enum bt_conn_type type)
1339 {
1340 	struct bt_conn *conn;
1341 
1342 #if defined(CONFIG_BT_CONN)
1343 	conn = conn_lookup_handle(acl_conns, ARRAY_SIZE(acl_conns), handle);
1344 	if (conn) {
1345 		goto found;
1346 	}
1347 #endif /* CONFIG_BT_CONN */
1348 
1349 #if defined(CONFIG_BT_ISO)
1350 	conn = conn_lookup_handle(iso_conns, ARRAY_SIZE(iso_conns), handle);
1351 	if (conn) {
1352 		goto found;
1353 	}
1354 #endif
1355 
1356 #if defined(CONFIG_BT_CLASSIC)
1357 	conn = conn_lookup_handle(sco_conns, ARRAY_SIZE(sco_conns), handle);
1358 	if (conn) {
1359 		goto found;
1360 	}
1361 #endif
1362 
1363 found:
1364 	if (conn) {
1365 		if (type & conn->type) {
1366 			return conn;
1367 		}
1368 		LOG_WRN("incompatible handle %u", handle);
1369 		bt_conn_unref(conn);
1370 	}
1371 	return NULL;
1372 }
1373 
bt_hci_conn_lookup_handle(uint16_t handle)1374 struct bt_conn *bt_hci_conn_lookup_handle(uint16_t handle)
1375 {
1376 	return bt_conn_lookup_handle(handle, BT_CONN_TYPE_ALL);
1377 }
1378 
bt_conn_foreach(enum bt_conn_type type,void (* func)(struct bt_conn * conn,void * data),void * data)1379 void bt_conn_foreach(enum bt_conn_type type,
1380 		     void (*func)(struct bt_conn *conn, void *data),
1381 		     void *data)
1382 {
1383 	int i;
1384 
1385 #if defined(CONFIG_BT_CONN)
1386 	for (i = 0; i < ARRAY_SIZE(acl_conns); i++) {
1387 		struct bt_conn *conn = bt_conn_ref(&acl_conns[i]);
1388 
1389 		if (!conn) {
1390 			continue;
1391 		}
1392 
1393 		if (!(conn->type & type)) {
1394 			bt_conn_unref(conn);
1395 			continue;
1396 		}
1397 
1398 		func(conn, data);
1399 		bt_conn_unref(conn);
1400 	}
1401 #if defined(CONFIG_BT_CLASSIC)
1402 	if (type & BT_CONN_TYPE_SCO) {
1403 		for (i = 0; i < ARRAY_SIZE(sco_conns); i++) {
1404 			struct bt_conn *conn = bt_conn_ref(&sco_conns[i]);
1405 
1406 			if (!conn) {
1407 				continue;
1408 			}
1409 
1410 			func(conn, data);
1411 			bt_conn_unref(conn);
1412 		}
1413 	}
1414 #endif /* defined(CONFIG_BT_CLASSIC) */
1415 #endif /* CONFIG_BT_CONN */
1416 
1417 #if defined(CONFIG_BT_ISO)
1418 	if (type & BT_CONN_TYPE_ISO) {
1419 		for (i = 0; i < ARRAY_SIZE(iso_conns); i++) {
1420 			struct bt_conn *conn = bt_conn_ref(&iso_conns[i]);
1421 
1422 			if (!conn) {
1423 				continue;
1424 			}
1425 
1426 			func(conn, data);
1427 			bt_conn_unref(conn);
1428 		}
1429 	}
1430 #endif /* defined(CONFIG_BT_ISO) */
1431 }
1432 
bt_conn_ref(struct bt_conn * conn)1433 struct bt_conn *bt_conn_ref(struct bt_conn *conn)
1434 {
1435 	atomic_val_t old;
1436 
1437 	__ASSERT_NO_MSG(conn);
1438 
1439 	/* Reference counter must be checked to avoid incrementing ref from
1440 	 * zero, then we should return NULL instead.
1441 	 * Loop on clear-and-set in case someone has modified the reference
1442 	 * count since the read, and start over again when that happens.
1443 	 */
1444 	do {
1445 		old = atomic_get(&conn->ref);
1446 
1447 		if (!old) {
1448 			return NULL;
1449 		}
1450 	} while (!atomic_cas(&conn->ref, old, old + 1));
1451 
1452 	LOG_DBG("handle %u ref %ld -> %ld", conn->handle, old, old + 1);
1453 
1454 	return conn;
1455 }
1456 
bt_conn_unref(struct bt_conn * conn)1457 void bt_conn_unref(struct bt_conn *conn)
1458 {
1459 	atomic_val_t old;
1460 	bool deallocated;
1461 	enum bt_conn_type conn_type;
1462 	uint8_t conn_role;
1463 	uint16_t conn_handle;
1464 
1465 	__ASSERT(conn, "Invalid connection reference");
1466 
1467 	/* Storing parameters of interest so we don't access the object
1468 	 * after decrementing its ref-count
1469 	 */
1470 	conn_type = conn->type;
1471 	conn_role = conn->role;
1472 	conn_handle = conn->handle;
1473 
1474 	old = atomic_dec(&conn->ref);
1475 	/* Prevent from accessing connection object */
1476 	conn = NULL;
1477 	deallocated = (atomic_get(&old) == 1);
1478 
1479 	LOG_DBG("handle %u ref %ld -> %ld", conn_handle, old, (old - 1));
1480 
1481 	__ASSERT(old > 0, "Conn reference counter is 0");
1482 
1483 	/* Slot has been freed and can be taken. No guarantees are made on requests
1484 	 * to claim connection object as only the first claim will be served.
1485 	 */
1486 	if (deallocated) {
1487 		notify_recycled_conn_slot();
1488 	}
1489 
1490 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && conn_type == BT_CONN_TYPE_LE &&
1491 	    conn_role == BT_CONN_ROLE_PERIPHERAL && deallocated) {
1492 		bt_le_adv_resume();
1493 	}
1494 }
1495 
bt_conn_index(const struct bt_conn * conn)1496 uint8_t bt_conn_index(const struct bt_conn *conn)
1497 {
1498 	ptrdiff_t index = 0;
1499 
1500 	switch (conn->type) {
1501 #if defined(CONFIG_BT_ISO)
1502 	case BT_CONN_TYPE_ISO:
1503 		index = conn - iso_conns;
1504 		__ASSERT(index >= 0 && index < ARRAY_SIZE(iso_conns),
1505 			"Invalid bt_conn pointer");
1506 		break;
1507 #endif
1508 #if defined(CONFIG_BT_CLASSIC)
1509 	case BT_CONN_TYPE_SCO:
1510 		index = conn - sco_conns;
1511 		__ASSERT(index >= 0 && index < ARRAY_SIZE(sco_conns),
1512 			"Invalid bt_conn pointer");
1513 		break;
1514 #endif
1515 	default:
1516 #if defined(CONFIG_BT_CONN)
1517 		index = conn - acl_conns;
1518 		__ASSERT(index >= 0 && index < ARRAY_SIZE(acl_conns),
1519 			 "Invalid bt_conn pointer");
1520 #else
1521 		__ASSERT(false, "Invalid connection type %u", conn->type);
1522 #endif /* CONFIG_BT_CONN */
1523 		break;
1524 	}
1525 
1526 	return (uint8_t)index;
1527 }
1528 
1529 
1530 #if defined(CONFIG_NET_BUF_LOG)
bt_conn_create_pdu_timeout_debug(struct net_buf_pool * pool,size_t reserve,k_timeout_t timeout,const char * func,int line)1531 struct net_buf *bt_conn_create_pdu_timeout_debug(struct net_buf_pool *pool,
1532 						 size_t reserve,
1533 						 k_timeout_t timeout,
1534 						 const char *func, int line)
1535 #else
1536 struct net_buf *bt_conn_create_pdu_timeout(struct net_buf_pool *pool,
1537 					   size_t reserve, k_timeout_t timeout)
1538 #endif
1539 {
1540 	struct net_buf *buf;
1541 
1542 	/*
1543 	 * PDU must not be allocated from ISR as we block with 'K_FOREVER'
1544 	 * during the allocation
1545 	 */
1546 	__ASSERT_NO_MSG(!k_is_in_isr());
1547 
1548 	if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
1549 	    k_current_get() == k_work_queue_thread_get(&k_sys_work_q)) {
1550 		LOG_DBG("Timeout discarded. No blocking in syswq.");
1551 		timeout = K_NO_WAIT;
1552 	}
1553 
1554 	if (!pool) {
1555 #if defined(CONFIG_BT_CONN)
1556 		pool = &acl_tx_pool;
1557 #else
1558 		return NULL;
1559 #endif /* CONFIG_BT_CONN */
1560 	}
1561 
1562 	if (IS_ENABLED(CONFIG_BT_CONN_LOG_LEVEL_DBG)) {
1563 #if defined(CONFIG_NET_BUF_LOG)
1564 		buf = net_buf_alloc_fixed_debug(pool, K_NO_WAIT, func, line);
1565 #else
1566 		buf = net_buf_alloc(pool, K_NO_WAIT);
1567 #endif
1568 		if (!buf) {
1569 			LOG_WRN("Unable to allocate buffer with K_NO_WAIT");
1570 #if defined(CONFIG_NET_BUF_LOG)
1571 			buf = net_buf_alloc_fixed_debug(pool, timeout, func,
1572 							line);
1573 #else
1574 			buf = net_buf_alloc(pool, timeout);
1575 #endif
1576 		}
1577 	} else {
1578 #if defined(CONFIG_NET_BUF_LOG)
1579 		buf = net_buf_alloc_fixed_debug(pool, timeout, func,
1580 							line);
1581 #else
1582 		buf = net_buf_alloc(pool, timeout);
1583 #endif
1584 	}
1585 
1586 	if (!buf) {
1587 		LOG_WRN("Unable to allocate buffer within timeout");
1588 		return NULL;
1589 	}
1590 
1591 	reserve += sizeof(struct bt_hci_acl_hdr) + BT_BUF_RESERVE;
1592 	net_buf_reserve(buf, reserve);
1593 
1594 	return buf;
1595 }
1596 
1597 #if defined(CONFIG_BT_CONN_TX)
tx_complete_work(struct k_work * work)1598 static void tx_complete_work(struct k_work *work)
1599 {
1600 	struct bt_conn *conn = CONTAINER_OF(work, struct bt_conn,
1601 					    tx_complete_work);
1602 
1603 	LOG_DBG("conn %p", conn);
1604 
1605 	tx_notify(conn);
1606 }
1607 #endif /* CONFIG_BT_CONN_TX */
1608 
notify_recycled_conn_slot(void)1609 static void notify_recycled_conn_slot(void)
1610 {
1611 #if defined(CONFIG_BT_CONN)
1612 	struct bt_conn_cb *callback;
1613 
1614 	SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
1615 		if (callback->recycled) {
1616 			callback->recycled();
1617 		}
1618 	}
1619 
1620 	STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
1621 		if (cb->recycled) {
1622 			cb->recycled();
1623 		}
1624 	}
1625 #endif
1626 }
1627 
1628 #if !defined(CONFIG_BT_CONN)
bt_conn_disconnect(struct bt_conn * conn,uint8_t reason)1629 int bt_conn_disconnect(struct bt_conn *conn, uint8_t reason)
1630 {
1631 	ARG_UNUSED(conn);
1632 	ARG_UNUSED(reason);
1633 
1634 	/* Dummy implementation to satisfy the compiler */
1635 
1636 	return 0;
1637 }
1638 #endif	/* !CONFIG_BT_CONN */
1639 
1640 /* Group Connected BT_CONN only in this */
1641 #if defined(CONFIG_BT_CONN)
1642 
bt_conn_connected(struct bt_conn * conn)1643 void bt_conn_connected(struct bt_conn *conn)
1644 {
1645 	bt_l2cap_connected(conn);
1646 	notify_connected(conn);
1647 }
1648 
conn_disconnect(struct bt_conn * conn,uint8_t reason)1649 static int conn_disconnect(struct bt_conn *conn, uint8_t reason)
1650 {
1651 	int err;
1652 
1653 	err = bt_hci_disconnect(conn->handle, reason);
1654 	if (err) {
1655 		return err;
1656 	}
1657 
1658 	if (conn->state == BT_CONN_CONNECTED) {
1659 		bt_conn_set_state(conn, BT_CONN_DISCONNECTING);
1660 	}
1661 
1662 	return 0;
1663 }
1664 
bt_conn_disconnect(struct bt_conn * conn,uint8_t reason)1665 int bt_conn_disconnect(struct bt_conn *conn, uint8_t reason)
1666 {
1667 	/* Disconnection is initiated by us, so auto connection shall
1668 	 * be disabled. Otherwise the passive scan would be enabled
1669 	 * and we could send LE Create Connection as soon as the remote
1670 	 * starts advertising.
1671 	 */
1672 #if !defined(CONFIG_BT_FILTER_ACCEPT_LIST)
1673 	if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1674 	    conn->type == BT_CONN_TYPE_LE) {
1675 		bt_le_set_auto_conn(&conn->le.dst, NULL);
1676 	}
1677 #endif /* !defined(CONFIG_BT_FILTER_ACCEPT_LIST) */
1678 
1679 	switch (conn->state) {
1680 	case BT_CONN_SCAN_BEFORE_INITIATING:
1681 		conn->err = reason;
1682 		bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
1683 		if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
1684 			bt_le_scan_update(false);
1685 		}
1686 		return 0;
1687 	case BT_CONN_INITIATING:
1688 		if (conn->type == BT_CONN_TYPE_LE) {
1689 			if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
1690 				k_work_cancel_delayable(&conn->deferred_work);
1691 				return bt_le_create_conn_cancel();
1692 			}
1693 		}
1694 #if defined(CONFIG_BT_ISO)
1695 		else if (conn->type == BT_CONN_TYPE_ISO) {
1696 			return conn_disconnect(conn, reason);
1697 		}
1698 #endif /* CONFIG_BT_ISO */
1699 #if defined(CONFIG_BT_CLASSIC)
1700 		else if (conn->type == BT_CONN_TYPE_BR) {
1701 			return bt_hci_connect_br_cancel(conn);
1702 		}
1703 #endif /* CONFIG_BT_CLASSIC */
1704 		else {
1705 			__ASSERT(false, "Invalid conn type %u", conn->type);
1706 		}
1707 
1708 		return 0;
1709 	case BT_CONN_CONNECTED:
1710 		return conn_disconnect(conn, reason);
1711 	case BT_CONN_DISCONNECTING:
1712 		return 0;
1713 	case BT_CONN_DISCONNECTED:
1714 	default:
1715 		return -ENOTCONN;
1716 	}
1717 }
1718 
notify_connected(struct bt_conn * conn)1719 static void notify_connected(struct bt_conn *conn)
1720 {
1721 	struct bt_conn_cb *callback;
1722 
1723 	SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
1724 
1725 		if (callback->connected) {
1726 			callback->connected(conn, conn->err);
1727 		}
1728 	}
1729 
1730 	STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
1731 		if (cb->connected) {
1732 			cb->connected(conn, conn->err);
1733 		}
1734 	}
1735 }
1736 
notify_disconnected(struct bt_conn * conn)1737 static void notify_disconnected(struct bt_conn *conn)
1738 {
1739 	struct bt_conn_cb *callback;
1740 
1741 	SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
1742 		if (callback->disconnected) {
1743 			callback->disconnected(conn, conn->err);
1744 		}
1745 	}
1746 
1747 	STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
1748 		if (cb->disconnected) {
1749 			cb->disconnected(conn, conn->err);
1750 		}
1751 	}
1752 }
1753 
1754 #if defined(CONFIG_BT_REMOTE_INFO)
notify_remote_info(struct bt_conn * conn)1755 void notify_remote_info(struct bt_conn *conn)
1756 {
1757 	struct bt_conn_remote_info remote_info;
1758 	int err;
1759 
1760 	err = bt_conn_get_remote_info(conn, &remote_info);
1761 	if (err) {
1762 		LOG_DBG("Notify remote info failed %d", err);
1763 		return;
1764 	}
1765 
1766 	struct bt_conn_cb *callback;
1767 
1768 	SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
1769 		if (callback->remote_info_available) {
1770 			callback->remote_info_available(conn, &remote_info);
1771 		}
1772 	}
1773 
1774 	STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
1775 		if (cb->remote_info_available) {
1776 			cb->remote_info_available(conn, &remote_info);
1777 		}
1778 	}
1779 }
1780 #endif /* defined(CONFIG_BT_REMOTE_INFO) */
1781 
notify_le_param_updated(struct bt_conn * conn)1782 void notify_le_param_updated(struct bt_conn *conn)
1783 {
1784 	/* If new connection parameters meet requirement of pending
1785 	 * parameters don't send peripheral conn param request anymore on timeout
1786 	 */
1787 	if (atomic_test_bit(conn->flags, BT_CONN_PERIPHERAL_PARAM_SET) &&
1788 	    conn->le.interval >= conn->le.interval_min &&
1789 	    conn->le.interval <= conn->le.interval_max &&
1790 	    conn->le.latency == conn->le.pending_latency &&
1791 	    conn->le.timeout == conn->le.pending_timeout) {
1792 		atomic_clear_bit(conn->flags, BT_CONN_PERIPHERAL_PARAM_SET);
1793 	}
1794 
1795 	struct bt_conn_cb *callback;
1796 
1797 	SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
1798 		if (callback->le_param_updated) {
1799 			callback->le_param_updated(conn, conn->le.interval,
1800 						   conn->le.latency,
1801 						   conn->le.timeout);
1802 		}
1803 	}
1804 
1805 	STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
1806 		if (cb->le_param_updated) {
1807 			cb->le_param_updated(conn, conn->le.interval,
1808 					     conn->le.latency,
1809 					     conn->le.timeout);
1810 		}
1811 	}
1812 }
1813 
1814 #if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
notify_le_data_len_updated(struct bt_conn * conn)1815 void notify_le_data_len_updated(struct bt_conn *conn)
1816 {
1817 	struct bt_conn_cb *callback;
1818 
1819 	SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
1820 		if (callback->le_data_len_updated) {
1821 			callback->le_data_len_updated(conn, &conn->le.data_len);
1822 		}
1823 	}
1824 
1825 	STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
1826 		if (cb->le_data_len_updated) {
1827 			cb->le_data_len_updated(conn, &conn->le.data_len);
1828 		}
1829 	}
1830 }
1831 #endif
1832 
1833 #if defined(CONFIG_BT_USER_PHY_UPDATE)
notify_le_phy_updated(struct bt_conn * conn)1834 void notify_le_phy_updated(struct bt_conn *conn)
1835 {
1836 	struct bt_conn_cb *callback;
1837 
1838 	SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
1839 		if (callback->le_phy_updated) {
1840 			callback->le_phy_updated(conn, &conn->le.phy);
1841 		}
1842 	}
1843 
1844 	STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
1845 		if (cb->le_phy_updated) {
1846 			cb->le_phy_updated(conn, &conn->le.phy);
1847 		}
1848 	}
1849 }
1850 #endif
1851 
le_param_req(struct bt_conn * conn,struct bt_le_conn_param * param)1852 bool le_param_req(struct bt_conn *conn, struct bt_le_conn_param *param)
1853 {
1854 	if (!bt_le_conn_params_valid(param)) {
1855 		return false;
1856 	}
1857 
1858 	struct bt_conn_cb *callback;
1859 
1860 	SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
1861 		if (!callback->le_param_req) {
1862 			continue;
1863 		}
1864 
1865 		if (!callback->le_param_req(conn, param)) {
1866 			return false;
1867 		}
1868 
1869 		/* The callback may modify the parameters so we need to
1870 		 * double-check that it returned valid parameters.
1871 		 */
1872 		if (!bt_le_conn_params_valid(param)) {
1873 			return false;
1874 		}
1875 	}
1876 
1877 	STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
1878 		if (!cb->le_param_req) {
1879 			continue;
1880 		}
1881 
1882 		if (!cb->le_param_req(conn, param)) {
1883 			return false;
1884 		}
1885 
1886 		/* The callback may modify the parameters so we need to
1887 		 * double-check that it returned valid parameters.
1888 		 */
1889 		if (!bt_le_conn_params_valid(param)) {
1890 			return false;
1891 		}
1892 	}
1893 
1894 	/* Default to accepting if there's no app callback */
1895 	return true;
1896 }
1897 
send_conn_le_param_update(struct bt_conn * conn,const struct bt_le_conn_param * param)1898 static int send_conn_le_param_update(struct bt_conn *conn,
1899 				const struct bt_le_conn_param *param)
1900 {
1901 	LOG_DBG("conn %p features 0x%02x params (%d-%d %d %d)", conn, conn->le.features[0],
1902 		param->interval_min, param->interval_max, param->latency, param->timeout);
1903 
1904 	/* Proceed only if connection parameters contains valid values*/
1905 	if (!bt_le_conn_params_valid(param)) {
1906 		return -EINVAL;
1907 	}
1908 
1909 	/* Use LE connection parameter request if both local and remote support
1910 	 * it; or if local role is central then use LE connection update.
1911 	 */
1912 	if ((BT_FEAT_LE_CONN_PARAM_REQ_PROC(bt_dev.le.features) &&
1913 	     BT_FEAT_LE_CONN_PARAM_REQ_PROC(conn->le.features) &&
1914 	     !atomic_test_bit(conn->flags, BT_CONN_PERIPHERAL_PARAM_L2CAP)) ||
1915 	     (conn->role == BT_HCI_ROLE_CENTRAL)) {
1916 		int rc;
1917 
1918 		rc = bt_conn_le_conn_update(conn, param);
1919 
1920 		/* store those in case of fallback to L2CAP */
1921 		if (rc == 0) {
1922 			conn->le.interval_min = param->interval_min;
1923 			conn->le.interval_max = param->interval_max;
1924 			conn->le.pending_latency = param->latency;
1925 			conn->le.pending_timeout = param->timeout;
1926 		}
1927 
1928 		return rc;
1929 	}
1930 
1931 	/* If remote central does not support LL Connection Parameters Request
1932 	 * Procedure
1933 	 */
1934 	return bt_l2cap_update_conn_param(conn, param);
1935 }
1936 
1937 #if defined(CONFIG_BT_ISO_UNICAST)
conn_lookup_iso(struct bt_conn * conn)1938 static struct bt_conn *conn_lookup_iso(struct bt_conn *conn)
1939 {
1940 	int i;
1941 
1942 	for (i = 0; i < ARRAY_SIZE(iso_conns); i++) {
1943 		struct bt_conn *iso = bt_conn_ref(&iso_conns[i]);
1944 
1945 		if (iso == NULL) {
1946 			continue;
1947 		}
1948 
1949 		if (iso->iso.acl == conn) {
1950 			return iso;
1951 		}
1952 
1953 		bt_conn_unref(iso);
1954 	}
1955 
1956 	return NULL;
1957 }
1958 #endif /* CONFIG_BT_ISO */
1959 
1960 #if defined(CONFIG_BT_CLASSIC)
conn_lookup_sco(struct bt_conn * conn)1961 static struct bt_conn *conn_lookup_sco(struct bt_conn *conn)
1962 {
1963 	int i;
1964 
1965 	for (i = 0; i < ARRAY_SIZE(sco_conns); i++) {
1966 		struct bt_conn *sco = bt_conn_ref(&sco_conns[i]);
1967 
1968 		if (sco == NULL) {
1969 			continue;
1970 		}
1971 
1972 		if (sco->sco.acl == conn) {
1973 			return sco;
1974 		}
1975 
1976 		bt_conn_unref(sco);
1977 	}
1978 
1979 	return NULL;
1980 }
1981 #endif /* CONFIG_BT_CLASSIC */
1982 
deferred_work(struct k_work * work)1983 static void deferred_work(struct k_work *work)
1984 {
1985 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
1986 	struct bt_conn *conn = CONTAINER_OF(dwork, struct bt_conn, deferred_work);
1987 	const struct bt_le_conn_param *param;
1988 
1989 	LOG_DBG("conn %p", conn);
1990 
1991 	if (conn->state == BT_CONN_DISCONNECTED) {
1992 #if defined(CONFIG_BT_ISO_UNICAST)
1993 		struct bt_conn *iso;
1994 
1995 		if (conn->type == BT_CONN_TYPE_ISO) {
1996 			/* bt_iso_disconnected is responsible for unref'ing the
1997 			 * connection pointer, as it is conditional on whether
1998 			 * the connection is a central or peripheral.
1999 			 */
2000 			bt_iso_disconnected(conn);
2001 			return;
2002 		}
2003 
2004 		/* Mark all ISO channels associated
2005 		 * with ACL conn as not connected, and
2006 		 * remove ACL reference
2007 		 */
2008 		iso = conn_lookup_iso(conn);
2009 		while (iso != NULL) {
2010 			struct bt_iso_chan *chan = iso->iso.chan;
2011 
2012 			if (chan != NULL) {
2013 				bt_iso_chan_set_state(chan,
2014 						      BT_ISO_STATE_DISCONNECTING);
2015 			}
2016 
2017 			bt_iso_cleanup_acl(iso);
2018 
2019 			bt_conn_unref(iso);
2020 			iso = conn_lookup_iso(conn);
2021 		}
2022 #endif
2023 #if defined(CONFIG_BT_CLASSIC)
2024 		struct bt_conn *sco;
2025 
2026 		/* Mark all SCO channels associated
2027 		 * with ACL conn as not connected, and
2028 		 * remove ACL reference
2029 		 */
2030 		sco = conn_lookup_sco(conn);
2031 		while (sco != NULL) {
2032 			struct bt_sco_chan *chan = sco->sco.chan;
2033 
2034 			if (chan != NULL) {
2035 				bt_sco_chan_set_state(chan,
2036 						      BT_SCO_STATE_DISCONNECTING);
2037 			}
2038 
2039 			bt_sco_cleanup_acl(sco);
2040 
2041 			bt_conn_unref(sco);
2042 			sco = conn_lookup_sco(conn);
2043 		}
2044 #endif /* CONFIG_BT_CLASSIC */
2045 		bt_l2cap_disconnected(conn);
2046 		notify_disconnected(conn);
2047 
2048 		/* Release the reference we took for the very first
2049 		 * state transition.
2050 		 */
2051 		bt_conn_unref(conn);
2052 		return;
2053 	}
2054 
2055 	if (conn->type != BT_CONN_TYPE_LE) {
2056 		return;
2057 	}
2058 
2059 	if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
2060 	    conn->role == BT_CONN_ROLE_CENTRAL) {
2061 		/* we don't call bt_conn_disconnect as it would also clear
2062 		 * auto connect flag if it was set, instead just cancel
2063 		 * connection directly
2064 		 */
2065 		bt_le_create_conn_cancel();
2066 		return;
2067 	}
2068 
2069 	/* if application set own params use those, otherwise use defaults. */
2070 	if (atomic_test_and_clear_bit(conn->flags,
2071 				      BT_CONN_PERIPHERAL_PARAM_SET)) {
2072 		int err;
2073 
2074 		param = BT_LE_CONN_PARAM(conn->le.interval_min,
2075 					 conn->le.interval_max,
2076 					 conn->le.pending_latency,
2077 					 conn->le.pending_timeout);
2078 
2079 		err = send_conn_le_param_update(conn, param);
2080 		if (!err) {
2081 			atomic_clear_bit(conn->flags,
2082 					 BT_CONN_PERIPHERAL_PARAM_AUTO_UPDATE);
2083 		} else {
2084 			LOG_WRN("Send LE param update failed (err %d)", err);
2085 		}
2086 	} else if (IS_ENABLED(CONFIG_BT_GAP_AUTO_UPDATE_CONN_PARAMS)) {
2087 #if defined(CONFIG_BT_GAP_PERIPHERAL_PREF_PARAMS)
2088 		int err;
2089 
2090 		param = BT_LE_CONN_PARAM(
2091 				CONFIG_BT_PERIPHERAL_PREF_MIN_INT,
2092 				CONFIG_BT_PERIPHERAL_PREF_MAX_INT,
2093 				CONFIG_BT_PERIPHERAL_PREF_LATENCY,
2094 				CONFIG_BT_PERIPHERAL_PREF_TIMEOUT);
2095 
2096 		err = send_conn_le_param_update(conn, param);
2097 		if (!err) {
2098 			atomic_set_bit(conn->flags,
2099 				       BT_CONN_PERIPHERAL_PARAM_AUTO_UPDATE);
2100 		} else {
2101 			LOG_WRN("Send auto LE param update failed (err %d)",
2102 				err);
2103 		}
2104 #endif
2105 	}
2106 
2107 	atomic_set_bit(conn->flags, BT_CONN_PERIPHERAL_PARAM_UPDATE);
2108 }
2109 
acl_conn_new(void)2110 static struct bt_conn *acl_conn_new(void)
2111 {
2112 	return bt_conn_new(acl_conns, ARRAY_SIZE(acl_conns));
2113 }
2114 
2115 #if defined(CONFIG_BT_CLASSIC)
bt_sco_cleanup(struct bt_conn * sco_conn)2116 void bt_sco_cleanup(struct bt_conn *sco_conn)
2117 {
2118 	bt_sco_cleanup_acl(sco_conn);
2119 	bt_conn_unref(sco_conn);
2120 }
2121 
sco_conn_new(void)2122 static struct bt_conn *sco_conn_new(void)
2123 {
2124 	return bt_conn_new(sco_conns, ARRAY_SIZE(sco_conns));
2125 }
2126 
bt_conn_create_br(const bt_addr_t * peer,const struct bt_br_conn_param * param)2127 struct bt_conn *bt_conn_create_br(const bt_addr_t *peer,
2128 				  const struct bt_br_conn_param *param)
2129 {
2130 	struct bt_hci_cp_connect *cp;
2131 	struct bt_conn *conn;
2132 	struct net_buf *buf;
2133 
2134 	conn = bt_conn_lookup_addr_br(peer);
2135 	if (conn) {
2136 		switch (conn->state) {
2137 		case BT_CONN_INITIATING:
2138 		case BT_CONN_CONNECTED:
2139 			return conn;
2140 		default:
2141 			bt_conn_unref(conn);
2142 			return NULL;
2143 		}
2144 	}
2145 
2146 	conn = bt_conn_add_br(peer);
2147 	if (!conn) {
2148 		return NULL;
2149 	}
2150 
2151 	buf = bt_hci_cmd_create(BT_HCI_OP_CONNECT, sizeof(*cp));
2152 	if (!buf) {
2153 		bt_conn_unref(conn);
2154 		return NULL;
2155 	}
2156 
2157 	cp = net_buf_add(buf, sizeof(*cp));
2158 
2159 	(void)memset(cp, 0, sizeof(*cp));
2160 
2161 	memcpy(&cp->bdaddr, peer, sizeof(cp->bdaddr));
2162 	cp->packet_type = sys_cpu_to_le16(0xcc18); /* DM1 DH1 DM3 DH5 DM5 DH5 */
2163 	cp->pscan_rep_mode = 0x02; /* R2 */
2164 	cp->allow_role_switch = param->allow_role_switch ? 0x01 : 0x00;
2165 	cp->clock_offset = 0x0000; /* TODO used cached clock offset */
2166 
2167 	if (bt_hci_cmd_send_sync(BT_HCI_OP_CONNECT, buf, NULL) < 0) {
2168 		bt_conn_unref(conn);
2169 		return NULL;
2170 	}
2171 
2172 	bt_conn_set_state(conn, BT_CONN_INITIATING);
2173 	conn->role = BT_CONN_ROLE_CENTRAL;
2174 
2175 	return conn;
2176 }
2177 
bt_conn_lookup_addr_sco(const bt_addr_t * peer)2178 struct bt_conn *bt_conn_lookup_addr_sco(const bt_addr_t *peer)
2179 {
2180 	int i;
2181 
2182 	for (i = 0; i < ARRAY_SIZE(sco_conns); i++) {
2183 		struct bt_conn *conn = bt_conn_ref(&sco_conns[i]);
2184 
2185 		if (!conn) {
2186 			continue;
2187 		}
2188 
2189 		if (conn->type != BT_CONN_TYPE_SCO) {
2190 			bt_conn_unref(conn);
2191 			continue;
2192 		}
2193 
2194 		if (!bt_addr_eq(peer, &conn->sco.acl->br.dst)) {
2195 			bt_conn_unref(conn);
2196 			continue;
2197 		}
2198 
2199 		return conn;
2200 	}
2201 
2202 	return NULL;
2203 }
2204 
bt_conn_lookup_addr_br(const bt_addr_t * peer)2205 struct bt_conn *bt_conn_lookup_addr_br(const bt_addr_t *peer)
2206 {
2207 	int i;
2208 
2209 	for (i = 0; i < ARRAY_SIZE(acl_conns); i++) {
2210 		struct bt_conn *conn = bt_conn_ref(&acl_conns[i]);
2211 
2212 		if (!conn) {
2213 			continue;
2214 		}
2215 
2216 		if (conn->type != BT_CONN_TYPE_BR) {
2217 			bt_conn_unref(conn);
2218 			continue;
2219 		}
2220 
2221 		if (!bt_addr_eq(peer, &conn->br.dst)) {
2222 			bt_conn_unref(conn);
2223 			continue;
2224 		}
2225 
2226 		return conn;
2227 	}
2228 
2229 	return NULL;
2230 }
2231 
bt_conn_add_sco(const bt_addr_t * peer,int link_type)2232 struct bt_conn *bt_conn_add_sco(const bt_addr_t *peer, int link_type)
2233 {
2234 	struct bt_conn *sco_conn = sco_conn_new();
2235 
2236 	if (!sco_conn) {
2237 		return NULL;
2238 	}
2239 
2240 	sco_conn->sco.acl = bt_conn_lookup_addr_br(peer);
2241 	if (!sco_conn->sco.acl) {
2242 		bt_conn_unref(sco_conn);
2243 		return NULL;
2244 	}
2245 
2246 	sco_conn->type = BT_CONN_TYPE_SCO;
2247 
2248 	if (link_type == BT_HCI_SCO) {
2249 		if (BT_FEAT_LMP_ESCO_CAPABLE(bt_dev.features)) {
2250 			sco_conn->sco.pkt_type = (bt_dev.br.esco_pkt_type &
2251 						  ESCO_PKT_MASK);
2252 		} else {
2253 			sco_conn->sco.pkt_type = (bt_dev.br.esco_pkt_type &
2254 						  SCO_PKT_MASK);
2255 		}
2256 	} else if (link_type == BT_HCI_ESCO) {
2257 		sco_conn->sco.pkt_type = (bt_dev.br.esco_pkt_type &
2258 					  ~EDR_ESCO_PKT_MASK);
2259 	}
2260 
2261 	return sco_conn;
2262 }
2263 
bt_conn_add_br(const bt_addr_t * peer)2264 struct bt_conn *bt_conn_add_br(const bt_addr_t *peer)
2265 {
2266 	struct bt_conn *conn = acl_conn_new();
2267 
2268 	if (!conn) {
2269 		return NULL;
2270 	}
2271 
2272 	bt_addr_copy(&conn->br.dst, peer);
2273 	conn->type = BT_CONN_TYPE_BR;
2274 	conn->tx_data_pull = l2cap_br_data_pull;
2275 	conn->get_and_clear_cb = acl_get_and_clear_cb;
2276 	conn->has_data = acl_has_data;
2277 
2278 	return conn;
2279 }
2280 
bt_hci_connect_br_cancel(struct bt_conn * conn)2281 static int bt_hci_connect_br_cancel(struct bt_conn *conn)
2282 {
2283 	struct bt_hci_cp_connect_cancel *cp;
2284 	struct bt_hci_rp_connect_cancel *rp;
2285 	struct net_buf *buf, *rsp;
2286 	int err;
2287 
2288 	buf = bt_hci_cmd_create(BT_HCI_OP_CONNECT_CANCEL, sizeof(*cp));
2289 	if (!buf) {
2290 		return -ENOBUFS;
2291 	}
2292 
2293 	cp = net_buf_add(buf, sizeof(*cp));
2294 	memcpy(&cp->bdaddr, &conn->br.dst, sizeof(cp->bdaddr));
2295 
2296 	err = bt_hci_cmd_send_sync(BT_HCI_OP_CONNECT_CANCEL, buf, &rsp);
2297 	if (err) {
2298 		return err;
2299 	}
2300 
2301 	rp = (void *)rsp->data;
2302 
2303 	err = rp->status ? -EIO : 0;
2304 
2305 	net_buf_unref(rsp);
2306 
2307 	return err;
2308 }
2309 
2310 #endif /* CONFIG_BT_CLASSIC */
2311 
2312 #if defined(CONFIG_BT_SMP)
bt_conn_ltk_present(const struct bt_conn * conn)2313 bool bt_conn_ltk_present(const struct bt_conn *conn)
2314 {
2315 	const struct bt_keys *keys = conn->le.keys;
2316 
2317 	if (!keys) {
2318 		keys = bt_keys_find_addr(conn->id, &conn->le.dst);
2319 	}
2320 
2321 	if (keys) {
2322 		if (conn->role == BT_HCI_ROLE_CENTRAL) {
2323 			return keys->keys & (BT_KEYS_LTK_P256 | BT_KEYS_PERIPH_LTK);
2324 		} else {
2325 			return keys->keys & (BT_KEYS_LTK_P256 | BT_KEYS_LTK);
2326 		}
2327 	}
2328 
2329 	return false;
2330 }
2331 
bt_conn_identity_resolved(struct bt_conn * conn)2332 void bt_conn_identity_resolved(struct bt_conn *conn)
2333 {
2334 	const bt_addr_le_t *rpa;
2335 
2336 	if (conn->role == BT_HCI_ROLE_CENTRAL) {
2337 		rpa = &conn->le.resp_addr;
2338 	} else {
2339 		rpa = &conn->le.init_addr;
2340 	}
2341 
2342 
2343 	struct bt_conn_cb *callback;
2344 
2345 	SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
2346 		if (callback->identity_resolved) {
2347 			callback->identity_resolved(conn, rpa, &conn->le.dst);
2348 		}
2349 	}
2350 
2351 	STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
2352 		if (cb->identity_resolved) {
2353 			cb->identity_resolved(conn, rpa, &conn->le.dst);
2354 		}
2355 	}
2356 }
2357 
bt_conn_le_start_encryption(struct bt_conn * conn,uint8_t rand[8],uint8_t ediv[2],const uint8_t * ltk,size_t len)2358 int bt_conn_le_start_encryption(struct bt_conn *conn, uint8_t rand[8],
2359 				uint8_t ediv[2], const uint8_t *ltk, size_t len)
2360 {
2361 	struct bt_hci_cp_le_start_encryption *cp;
2362 	struct net_buf *buf;
2363 
2364 	if (len > sizeof(cp->ltk)) {
2365 		return -EINVAL;
2366 	}
2367 
2368 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_START_ENCRYPTION, sizeof(*cp));
2369 	if (!buf) {
2370 		return -ENOBUFS;
2371 	}
2372 
2373 	cp = net_buf_add(buf, sizeof(*cp));
2374 	cp->handle = sys_cpu_to_le16(conn->handle);
2375 	memcpy(&cp->rand, rand, sizeof(cp->rand));
2376 	memcpy(&cp->ediv, ediv, sizeof(cp->ediv));
2377 
2378 	memcpy(cp->ltk, ltk, len);
2379 	if (len < sizeof(cp->ltk)) {
2380 		(void)memset(cp->ltk + len, 0, sizeof(cp->ltk) - len);
2381 	}
2382 
2383 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_START_ENCRYPTION, buf, NULL);
2384 }
2385 #endif /* CONFIG_BT_SMP */
2386 
2387 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC)
bt_conn_enc_key_size(const struct bt_conn * conn)2388 uint8_t bt_conn_enc_key_size(const struct bt_conn *conn)
2389 {
2390 	if (!conn->encrypt) {
2391 		return 0;
2392 	}
2393 
2394 	if (IS_ENABLED(CONFIG_BT_CLASSIC) &&
2395 	    conn->type == BT_CONN_TYPE_BR) {
2396 		struct bt_hci_cp_read_encryption_key_size *cp;
2397 		struct bt_hci_rp_read_encryption_key_size *rp;
2398 		struct net_buf *buf;
2399 		struct net_buf *rsp;
2400 		uint8_t key_size;
2401 
2402 		buf = bt_hci_cmd_create(BT_HCI_OP_READ_ENCRYPTION_KEY_SIZE,
2403 					sizeof(*cp));
2404 		if (!buf) {
2405 			return 0;
2406 		}
2407 
2408 		cp = net_buf_add(buf, sizeof(*cp));
2409 		cp->handle = sys_cpu_to_le16(conn->handle);
2410 
2411 		if (bt_hci_cmd_send_sync(BT_HCI_OP_READ_ENCRYPTION_KEY_SIZE,
2412 					buf, &rsp)) {
2413 			return 0;
2414 		}
2415 
2416 		rp = (void *)rsp->data;
2417 
2418 		key_size = rp->status ? 0 : rp->key_size;
2419 
2420 		net_buf_unref(rsp);
2421 
2422 		return key_size;
2423 	}
2424 
2425 	if (IS_ENABLED(CONFIG_BT_SMP)) {
2426 		return conn->le.keys ? conn->le.keys->enc_size : 0;
2427 	}
2428 
2429 	return 0;
2430 }
2431 
reset_pairing(struct bt_conn * conn)2432 static void reset_pairing(struct bt_conn *conn)
2433 {
2434 #if defined(CONFIG_BT_CLASSIC)
2435 	if (conn->type == BT_CONN_TYPE_BR) {
2436 		atomic_clear_bit(conn->flags, BT_CONN_BR_PAIRING);
2437 		atomic_clear_bit(conn->flags, BT_CONN_BR_PAIRING_INITIATOR);
2438 		atomic_clear_bit(conn->flags, BT_CONN_BR_LEGACY_SECURE);
2439 	}
2440 #endif /* CONFIG_BT_CLASSIC */
2441 
2442 	/* Reset required security level to current operational */
2443 	conn->required_sec_level = conn->sec_level;
2444 }
2445 
bt_conn_security_changed(struct bt_conn * conn,uint8_t hci_err,enum bt_security_err err)2446 void bt_conn_security_changed(struct bt_conn *conn, uint8_t hci_err,
2447 			      enum bt_security_err err)
2448 {
2449 	reset_pairing(conn);
2450 	bt_l2cap_security_changed(conn, hci_err);
2451 	if (IS_ENABLED(CONFIG_BT_ISO_CENTRAL)) {
2452 		bt_iso_security_changed(conn, hci_err);
2453 	}
2454 
2455 	struct bt_conn_cb *callback;
2456 
2457 	SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
2458 		if (callback->security_changed) {
2459 			callback->security_changed(conn, conn->sec_level, err);
2460 		}
2461 	}
2462 
2463 	STRUCT_SECTION_FOREACH(bt_conn_cb, cb) {
2464 		if (cb->security_changed) {
2465 			cb->security_changed(conn, conn->sec_level, err);
2466 		}
2467 	}
2468 
2469 #if defined(CONFIG_BT_KEYS_OVERWRITE_OLDEST)
2470 	if (!err && conn->sec_level >= BT_SECURITY_L2) {
2471 		if (conn->type == BT_CONN_TYPE_LE) {
2472 			bt_keys_update_usage(conn->id, bt_conn_get_dst(conn));
2473 		}
2474 
2475 #if defined(CONFIG_BT_CLASSIC)
2476 		if (conn->type == BT_CONN_TYPE_BR) {
2477 			bt_keys_link_key_update_usage(&conn->br.dst);
2478 		}
2479 #endif /* CONFIG_BT_CLASSIC */
2480 
2481 	}
2482 #endif
2483 }
2484 
start_security(struct bt_conn * conn)2485 static int start_security(struct bt_conn *conn)
2486 {
2487 	if (IS_ENABLED(CONFIG_BT_CLASSIC) && conn->type == BT_CONN_TYPE_BR) {
2488 		return bt_ssp_start_security(conn);
2489 	}
2490 
2491 	if (IS_ENABLED(CONFIG_BT_SMP)) {
2492 		return bt_smp_start_security(conn);
2493 	}
2494 
2495 	return -EINVAL;
2496 }
2497 
bt_conn_set_security(struct bt_conn * conn,bt_security_t sec)2498 int bt_conn_set_security(struct bt_conn *conn, bt_security_t sec)
2499 {
2500 	bool force_pair;
2501 	int err;
2502 
2503 	if (conn->state != BT_CONN_CONNECTED) {
2504 		return -ENOTCONN;
2505 	}
2506 
2507 	force_pair = sec & BT_SECURITY_FORCE_PAIR;
2508 	sec &= ~BT_SECURITY_FORCE_PAIR;
2509 
2510 	if (IS_ENABLED(CONFIG_BT_SMP_SC_ONLY)) {
2511 		sec = BT_SECURITY_L4;
2512 	}
2513 
2514 	if (IS_ENABLED(CONFIG_BT_SMP_OOB_LEGACY_PAIR_ONLY)) {
2515 		sec = BT_SECURITY_L3;
2516 	}
2517 
2518 	/* nothing to do */
2519 	if (!force_pair && (conn->sec_level >= sec || conn->required_sec_level >= sec)) {
2520 		return 0;
2521 	}
2522 
2523 	atomic_set_bit_to(conn->flags, BT_CONN_FORCE_PAIR, force_pair);
2524 	conn->required_sec_level = sec;
2525 
2526 	err = start_security(conn);
2527 
2528 	/* reset required security level in case of error */
2529 	if (err) {
2530 		conn->required_sec_level = conn->sec_level;
2531 	}
2532 
2533 	return err;
2534 }
2535 
bt_conn_get_security(const struct bt_conn * conn)2536 bt_security_t bt_conn_get_security(const struct bt_conn *conn)
2537 {
2538 	return conn->sec_level;
2539 }
2540 #else
bt_conn_get_security(const struct bt_conn * conn)2541 bt_security_t bt_conn_get_security(const struct bt_conn *conn)
2542 {
2543 	return BT_SECURITY_L1;
2544 }
2545 #endif /* CONFIG_BT_SMP */
2546 
bt_conn_cb_register(struct bt_conn_cb * cb)2547 int bt_conn_cb_register(struct bt_conn_cb *cb)
2548 {
2549 	if (sys_slist_find(&conn_cbs, &cb->_node, NULL)) {
2550 		return -EEXIST;
2551 	}
2552 
2553 	sys_slist_append(&conn_cbs, &cb->_node);
2554 
2555 	return 0;
2556 }
2557 
bt_conn_cb_unregister(struct bt_conn_cb * cb)2558 int bt_conn_cb_unregister(struct bt_conn_cb *cb)
2559 {
2560 	CHECKIF(cb == NULL) {
2561 		return -EINVAL;
2562 	}
2563 
2564 	if (!sys_slist_find_and_remove(&conn_cbs, &cb->_node)) {
2565 		return -ENOENT;
2566 	}
2567 
2568 	return 0;
2569 }
2570 
bt_conn_exists_le(uint8_t id,const bt_addr_le_t * peer)2571 bool bt_conn_exists_le(uint8_t id, const bt_addr_le_t *peer)
2572 {
2573 	struct bt_conn *conn = bt_conn_lookup_addr_le(id, peer);
2574 
2575 	if (conn) {
2576 		/* Connection object already exists.
2577 		 * If the connection state is not "disconnected",then the
2578 		 * connection was created but has not yet been disconnected.
2579 		 * If the connection state is "disconnected" then the connection
2580 		 * still has valid references. The last reference of the stack
2581 		 * is released after the disconnected callback.
2582 		 */
2583 		LOG_WRN("Found valid connection (%p) with address %s in %s state ", conn,
2584 			bt_addr_le_str(peer), state2str(conn->state));
2585 		bt_conn_unref(conn);
2586 		return true;
2587 	}
2588 
2589 	return false;
2590 }
2591 
bt_conn_add_le(uint8_t id,const bt_addr_le_t * peer)2592 struct bt_conn *bt_conn_add_le(uint8_t id, const bt_addr_le_t *peer)
2593 {
2594 	struct bt_conn *conn = acl_conn_new();
2595 
2596 	if (!conn) {
2597 		return NULL;
2598 	}
2599 
2600 	conn->id = id;
2601 	bt_addr_le_copy(&conn->le.dst, peer);
2602 #if defined(CONFIG_BT_SMP)
2603 	conn->sec_level = BT_SECURITY_L1;
2604 	conn->required_sec_level = BT_SECURITY_L1;
2605 #endif /* CONFIG_BT_SMP */
2606 	conn->type = BT_CONN_TYPE_LE;
2607 	conn->tx_data_pull = l2cap_data_pull;
2608 	conn->get_and_clear_cb = acl_get_and_clear_cb;
2609 	conn->has_data = acl_has_data;
2610 	conn->le.interval_min = BT_GAP_INIT_CONN_INT_MIN;
2611 	conn->le.interval_max = BT_GAP_INIT_CONN_INT_MAX;
2612 
2613 	return conn;
2614 }
2615 
bt_conn_is_peer_addr_le(const struct bt_conn * conn,uint8_t id,const bt_addr_le_t * peer)2616 bool bt_conn_is_peer_addr_le(const struct bt_conn *conn, uint8_t id,
2617 			     const bt_addr_le_t *peer)
2618 {
2619 	if (id != conn->id) {
2620 		return false;
2621 	}
2622 
2623 	/* Check against conn dst address as it may be the identity address */
2624 	if (bt_addr_le_eq(peer, &conn->le.dst)) {
2625 		return true;
2626 	}
2627 
2628 	/* Check against initial connection address */
2629 	if (conn->role == BT_HCI_ROLE_CENTRAL) {
2630 		return bt_addr_le_eq(peer, &conn->le.resp_addr);
2631 	}
2632 
2633 	return bt_addr_le_eq(peer, &conn->le.init_addr);
2634 }
2635 
bt_conn_lookup_addr_le(uint8_t id,const bt_addr_le_t * peer)2636 struct bt_conn *bt_conn_lookup_addr_le(uint8_t id, const bt_addr_le_t *peer)
2637 {
2638 	int i;
2639 
2640 	for (i = 0; i < ARRAY_SIZE(acl_conns); i++) {
2641 		struct bt_conn *conn = bt_conn_ref(&acl_conns[i]);
2642 
2643 		if (!conn) {
2644 			continue;
2645 		}
2646 
2647 		if (conn->type != BT_CONN_TYPE_LE) {
2648 			bt_conn_unref(conn);
2649 			continue;
2650 		}
2651 
2652 		if (!bt_conn_is_peer_addr_le(conn, id, peer)) {
2653 			bt_conn_unref(conn);
2654 			continue;
2655 		}
2656 
2657 		return conn;
2658 	}
2659 
2660 	return NULL;
2661 }
2662 
bt_conn_lookup_state_le(uint8_t id,const bt_addr_le_t * peer,const bt_conn_state_t state)2663 struct bt_conn *bt_conn_lookup_state_le(uint8_t id, const bt_addr_le_t *peer,
2664 					const bt_conn_state_t state)
2665 {
2666 	int i;
2667 
2668 	for (i = 0; i < ARRAY_SIZE(acl_conns); i++) {
2669 		struct bt_conn *conn = bt_conn_ref(&acl_conns[i]);
2670 
2671 		if (!conn) {
2672 			continue;
2673 		}
2674 
2675 		if (conn->type != BT_CONN_TYPE_LE) {
2676 			bt_conn_unref(conn);
2677 			continue;
2678 		}
2679 
2680 		if (peer && !bt_conn_is_peer_addr_le(conn, id, peer)) {
2681 			bt_conn_unref(conn);
2682 			continue;
2683 		}
2684 
2685 		if (!(conn->state == state && conn->id == id)) {
2686 			bt_conn_unref(conn);
2687 			continue;
2688 		}
2689 
2690 		return conn;
2691 	}
2692 
2693 	return NULL;
2694 }
2695 
bt_conn_get_dst(const struct bt_conn * conn)2696 const bt_addr_le_t *bt_conn_get_dst(const struct bt_conn *conn)
2697 {
2698 	return &conn->le.dst;
2699 }
2700 
conn_internal_to_public_state(bt_conn_state_t state)2701 static enum bt_conn_state conn_internal_to_public_state(bt_conn_state_t state)
2702 {
2703 	switch (state) {
2704 	case BT_CONN_DISCONNECTED:
2705 	case BT_CONN_DISCONNECT_COMPLETE:
2706 		return BT_CONN_STATE_DISCONNECTED;
2707 	case BT_CONN_SCAN_BEFORE_INITIATING:
2708 	case BT_CONN_INITIATING_FILTER_LIST:
2709 	case BT_CONN_ADV_CONNECTABLE:
2710 	case BT_CONN_ADV_DIR_CONNECTABLE:
2711 	case BT_CONN_INITIATING:
2712 		return BT_CONN_STATE_CONNECTING;
2713 	case BT_CONN_CONNECTED:
2714 		return BT_CONN_STATE_CONNECTED;
2715 	case BT_CONN_DISCONNECTING:
2716 		return BT_CONN_STATE_DISCONNECTING;
2717 	default:
2718 		__ASSERT(false, "Invalid conn state %u", state);
2719 		return 0;
2720 	}
2721 }
2722 
bt_conn_get_info(const struct bt_conn * conn,struct bt_conn_info * info)2723 int bt_conn_get_info(const struct bt_conn *conn, struct bt_conn_info *info)
2724 {
2725 	info->type = conn->type;
2726 	info->role = conn->role;
2727 	info->id = conn->id;
2728 	info->state = conn_internal_to_public_state(conn->state);
2729 	info->security.flags = 0;
2730 	info->security.level = bt_conn_get_security(conn);
2731 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC)
2732 	info->security.enc_key_size = bt_conn_enc_key_size(conn);
2733 #else
2734 	info->security.enc_key_size = 0;
2735 #endif /* CONFIG_BT_SMP || CONFIG_BT_CLASSIC */
2736 
2737 	switch (conn->type) {
2738 	case BT_CONN_TYPE_LE:
2739 		info->le.dst = &conn->le.dst;
2740 		info->le.src = &bt_dev.id_addr[conn->id];
2741 		if (conn->role == BT_HCI_ROLE_CENTRAL) {
2742 			info->le.local = &conn->le.init_addr;
2743 			info->le.remote = &conn->le.resp_addr;
2744 		} else {
2745 			info->le.local = &conn->le.resp_addr;
2746 			info->le.remote = &conn->le.init_addr;
2747 		}
2748 		info->le.interval = conn->le.interval;
2749 		info->le.latency = conn->le.latency;
2750 		info->le.timeout = conn->le.timeout;
2751 #if defined(CONFIG_BT_USER_PHY_UPDATE)
2752 		info->le.phy = &conn->le.phy;
2753 #endif
2754 #if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
2755 		info->le.data_len = &conn->le.data_len;
2756 #endif
2757 		if (conn->le.keys && (conn->le.keys->flags & BT_KEYS_SC)) {
2758 			info->security.flags |= BT_SECURITY_FLAG_SC;
2759 		}
2760 		if (conn->le.keys && (conn->le.keys->flags & BT_KEYS_OOB)) {
2761 			info->security.flags |= BT_SECURITY_FLAG_OOB;
2762 		}
2763 		return 0;
2764 #if defined(CONFIG_BT_CLASSIC)
2765 	case BT_CONN_TYPE_BR:
2766 		info->br.dst = &conn->br.dst;
2767 		return 0;
2768 #endif
2769 #if defined(CONFIG_BT_ISO)
2770 	case BT_CONN_TYPE_ISO:
2771 		if (IS_ENABLED(CONFIG_BT_ISO_UNICAST) &&
2772 		    conn->iso.info.type == BT_ISO_CHAN_TYPE_CONNECTED && conn->iso.acl != NULL) {
2773 			info->le.dst = &conn->iso.acl->le.dst;
2774 			info->le.src = &bt_dev.id_addr[conn->iso.acl->id];
2775 		} else {
2776 			info->le.src = BT_ADDR_LE_NONE;
2777 			info->le.dst = BT_ADDR_LE_NONE;
2778 		}
2779 		return 0;
2780 #endif
2781 	default:
2782 		break;
2783 	}
2784 
2785 	return -EINVAL;
2786 }
2787 
bt_conn_get_remote_info(struct bt_conn * conn,struct bt_conn_remote_info * remote_info)2788 int bt_conn_get_remote_info(struct bt_conn *conn,
2789 			    struct bt_conn_remote_info *remote_info)
2790 {
2791 	if (!atomic_test_bit(conn->flags, BT_CONN_AUTO_FEATURE_EXCH) ||
2792 	    (IS_ENABLED(CONFIG_BT_REMOTE_VERSION) &&
2793 	     !atomic_test_bit(conn->flags, BT_CONN_AUTO_VERSION_INFO))) {
2794 		return -EBUSY;
2795 	}
2796 
2797 	remote_info->type = conn->type;
2798 #if defined(CONFIG_BT_REMOTE_VERSION)
2799 	/* The conn->rv values will be just zeroes if the operation failed */
2800 	remote_info->version = conn->rv.version;
2801 	remote_info->manufacturer = conn->rv.manufacturer;
2802 	remote_info->subversion = conn->rv.subversion;
2803 #else
2804 	remote_info->version = 0;
2805 	remote_info->manufacturer = 0;
2806 	remote_info->subversion = 0;
2807 #endif
2808 
2809 	switch (conn->type) {
2810 	case BT_CONN_TYPE_LE:
2811 		remote_info->le.features = conn->le.features;
2812 		return 0;
2813 #if defined(CONFIG_BT_CLASSIC)
2814 	case BT_CONN_TYPE_BR:
2815 		/* TODO: Make sure the HCI commands to read br features and
2816 		*  extended features has finished. */
2817 		return -ENOTSUP;
2818 #endif
2819 	default:
2820 		return -EINVAL;
2821 	}
2822 }
2823 
2824 /* Read Transmit Power Level HCI command */
bt_conn_get_tx_power_level(struct bt_conn * conn,uint8_t type,int8_t * tx_power_level)2825 static int bt_conn_get_tx_power_level(struct bt_conn *conn, uint8_t type,
2826 				      int8_t *tx_power_level)
2827 {
2828 	int err;
2829 	struct bt_hci_rp_read_tx_power_level *rp;
2830 	struct net_buf *rsp;
2831 	struct bt_hci_cp_read_tx_power_level *cp;
2832 	struct net_buf *buf;
2833 
2834 	buf = bt_hci_cmd_create(BT_HCI_OP_READ_TX_POWER_LEVEL, sizeof(*cp));
2835 	if (!buf) {
2836 		return -ENOBUFS;
2837 	}
2838 
2839 	cp = net_buf_add(buf, sizeof(*cp));
2840 	cp->type = type;
2841 	cp->handle = sys_cpu_to_le16(conn->handle);
2842 
2843 	err = bt_hci_cmd_send_sync(BT_HCI_OP_READ_TX_POWER_LEVEL, buf, &rsp);
2844 	if (err) {
2845 		return err;
2846 	}
2847 
2848 	rp = (void *) rsp->data;
2849 	*tx_power_level = rp->tx_power_level;
2850 	net_buf_unref(rsp);
2851 
2852 	return 0;
2853 }
2854 
2855 #if defined(CONFIG_BT_TRANSMIT_POWER_CONTROL)
notify_tx_power_report(struct bt_conn * conn,struct bt_conn_le_tx_power_report report)2856 void notify_tx_power_report(struct bt_conn *conn,
2857 			    struct bt_conn_le_tx_power_report report)
2858 {
2859 	struct bt_conn_cb *callback;
2860 
2861 	SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
2862 		if (callback->tx_power_report) {
2863 			callback->tx_power_report(conn, &report);
2864 		}
2865 	}
2866 
2867 	STRUCT_SECTION_FOREACH(bt_conn_cb, cb)
2868 	{
2869 		if (cb->tx_power_report) {
2870 			cb->tx_power_report(conn, &report);
2871 		}
2872 	}
2873 }
2874 
bt_conn_le_enhanced_get_tx_power_level(struct bt_conn * conn,struct bt_conn_le_tx_power * tx_power)2875 int bt_conn_le_enhanced_get_tx_power_level(struct bt_conn *conn,
2876 					   struct bt_conn_le_tx_power *tx_power)
2877 {
2878 	int err;
2879 	struct bt_hci_rp_le_read_tx_power_level *rp;
2880 	struct net_buf *rsp;
2881 	struct bt_hci_cp_le_read_tx_power_level *cp;
2882 	struct net_buf *buf;
2883 
2884 	if (!tx_power->phy) {
2885 		return -EINVAL;
2886 	}
2887 
2888 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_ENH_READ_TX_POWER_LEVEL, sizeof(*cp));
2889 	if (!buf) {
2890 		return -ENOBUFS;
2891 	}
2892 
2893 	cp = net_buf_add(buf, sizeof(*cp));
2894 	cp->handle = sys_cpu_to_le16(conn->handle);
2895 	cp->phy = tx_power->phy;
2896 
2897 	err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_ENH_READ_TX_POWER_LEVEL, buf, &rsp);
2898 	if (err) {
2899 		return err;
2900 	}
2901 
2902 	rp = (void *) rsp->data;
2903 	tx_power->phy = rp->phy;
2904 	tx_power->current_level = rp->current_tx_power_level;
2905 	tx_power->max_level = rp->max_tx_power_level;
2906 	net_buf_unref(rsp);
2907 
2908 	return 0;
2909 }
2910 
bt_conn_le_get_remote_tx_power_level(struct bt_conn * conn,enum bt_conn_le_tx_power_phy phy)2911 int bt_conn_le_get_remote_tx_power_level(struct bt_conn *conn,
2912 					 enum bt_conn_le_tx_power_phy phy)
2913 {
2914 	struct bt_hci_cp_le_read_tx_power_level *cp;
2915 	struct net_buf *buf;
2916 
2917 	if (!phy) {
2918 		return -EINVAL;
2919 	}
2920 
2921 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_READ_REMOTE_TX_POWER_LEVEL, sizeof(*cp));
2922 	if (!buf) {
2923 		return -ENOBUFS;
2924 	}
2925 
2926 	cp = net_buf_add(buf, sizeof(*cp));
2927 	cp->handle = sys_cpu_to_le16(conn->handle);
2928 	cp->phy = phy;
2929 
2930 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_REMOTE_TX_POWER_LEVEL, buf, NULL);
2931 }
2932 
bt_conn_le_set_tx_power_report_enable(struct bt_conn * conn,bool local_enable,bool remote_enable)2933 int bt_conn_le_set_tx_power_report_enable(struct bt_conn *conn,
2934 					  bool local_enable,
2935 					  bool remote_enable)
2936 {
2937 	struct bt_hci_cp_le_set_tx_power_report_enable *cp;
2938 	struct net_buf *buf;
2939 
2940 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_TX_POWER_REPORT_ENABLE, sizeof(*cp));
2941 	if (!buf) {
2942 		return -ENOBUFS;
2943 	}
2944 
2945 	cp = net_buf_add(buf, sizeof(*cp));
2946 	cp->handle = sys_cpu_to_le16(conn->handle);
2947 	cp->local_enable = local_enable ? BT_HCI_LE_TX_POWER_REPORT_ENABLE :
2948 		BT_HCI_LE_TX_POWER_REPORT_DISABLE;
2949 	cp->remote_enable = remote_enable ? BT_HCI_LE_TX_POWER_REPORT_ENABLE :
2950 		BT_HCI_LE_TX_POWER_REPORT_DISABLE;
2951 
2952 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_TX_POWER_REPORT_ENABLE, buf, NULL);
2953 }
2954 #endif /* CONFIG_BT_TRANSMIT_POWER_CONTROL */
2955 
bt_conn_le_get_tx_power_level(struct bt_conn * conn,struct bt_conn_le_tx_power * tx_power_level)2956 int bt_conn_le_get_tx_power_level(struct bt_conn *conn,
2957 				  struct bt_conn_le_tx_power *tx_power_level)
2958 {
2959 	int err;
2960 
2961 	if (tx_power_level->phy != 0) {
2962 		if (IS_ENABLED(CONFIG_BT_TRANSMIT_POWER_CONTROL)) {
2963 			return bt_conn_le_enhanced_get_tx_power_level(conn, tx_power_level);
2964 		} else {
2965 			return -ENOTSUP;
2966 		}
2967 	}
2968 
2969 	err = bt_conn_get_tx_power_level(conn, BT_TX_POWER_LEVEL_CURRENT,
2970 					 &tx_power_level->current_level);
2971 	if (err) {
2972 		return err;
2973 	}
2974 
2975 	err = bt_conn_get_tx_power_level(conn, BT_TX_POWER_LEVEL_MAX,
2976 					 &tx_power_level->max_level);
2977 	return err;
2978 }
2979 
2980 #if defined(CONFIG_BT_PATH_LOSS_MONITORING)
notify_path_loss_threshold_report(struct bt_conn * conn,struct bt_conn_le_path_loss_threshold_report report)2981 void notify_path_loss_threshold_report(struct bt_conn *conn,
2982 				       struct bt_conn_le_path_loss_threshold_report report)
2983 {
2984 	struct bt_conn_cb *callback;
2985 
2986 	SYS_SLIST_FOR_EACH_CONTAINER(&callback_list, callback, _node) {
2987 		if (callback->path_loss_threshold_report) {
2988 			callback->path_loss_threshold_report(conn, &report);
2989 		}
2990 	}
2991 
2992 	STRUCT_SECTION_FOREACH(bt_conn_cb, cb)
2993 	{
2994 		if (cb->path_loss_threshold_report) {
2995 			cb->path_loss_threshold_report(conn, &report);
2996 		}
2997 	}
2998 }
2999 
bt_conn_le_set_path_loss_mon_param(struct bt_conn * conn,const struct bt_conn_le_path_loss_reporting_param * params)3000 int bt_conn_le_set_path_loss_mon_param(struct bt_conn *conn,
3001 				       const struct bt_conn_le_path_loss_reporting_param *params)
3002 {
3003 	struct bt_hci_cp_le_set_path_loss_reporting_parameters *cp;
3004 	struct net_buf *buf;
3005 
3006 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_PATH_LOSS_REPORTING_PARAMETERS, sizeof(*cp));
3007 	if (!buf) {
3008 		return -ENOBUFS;
3009 	}
3010 
3011 	cp = net_buf_add(buf, sizeof(*cp));
3012 	cp->handle = sys_cpu_to_le16(conn->handle);
3013 	cp->high_threshold = params->high_threshold;
3014 	cp->high_hysteresis = params->high_hysteresis;
3015 	cp->low_threshold = params->low_threshold;
3016 	cp->low_hysteresis = params->low_hysteresis;
3017 	cp->min_time_spent = sys_cpu_to_le16(params->min_time_spent);
3018 
3019 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_PATH_LOSS_REPORTING_PARAMETERS, buf, NULL);
3020 }
3021 
bt_conn_le_set_path_loss_mon_enable(struct bt_conn * conn,bool reporting_enable)3022 int bt_conn_le_set_path_loss_mon_enable(struct bt_conn *conn, bool reporting_enable)
3023 {
3024 	struct bt_hci_cp_le_set_path_loss_reporting_enable *cp;
3025 	struct net_buf *buf;
3026 
3027 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_PATH_LOSS_REPORTING_ENABLE, sizeof(*cp));
3028 	if (!buf) {
3029 		return -ENOBUFS;
3030 	}
3031 
3032 	cp = net_buf_add(buf, sizeof(*cp));
3033 	cp->handle = sys_cpu_to_le16(conn->handle);
3034 	cp->enable = reporting_enable ? BT_HCI_LE_PATH_LOSS_REPORTING_ENABLE :
3035 			BT_HCI_LE_PATH_LOSS_REPORTING_DISABLE;
3036 
3037 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_PATH_LOSS_REPORTING_ENABLE, buf, NULL);
3038 }
3039 #endif /* CONFIG_BT_PATH_LOSS_MONITORING */
3040 
bt_conn_le_param_update(struct bt_conn * conn,const struct bt_le_conn_param * param)3041 int bt_conn_le_param_update(struct bt_conn *conn,
3042 			    const struct bt_le_conn_param *param)
3043 {
3044 	LOG_DBG("conn %p features 0x%02x params (%d-%d %d %d)", conn, conn->le.features[0],
3045 		param->interval_min, param->interval_max, param->latency, param->timeout);
3046 
3047 	if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
3048 	    conn->role == BT_CONN_ROLE_CENTRAL) {
3049 		return send_conn_le_param_update(conn, param);
3050 	}
3051 
3052 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL)) {
3053 		/* if peripheral conn param update timer expired just send request */
3054 		if (atomic_test_bit(conn->flags, BT_CONN_PERIPHERAL_PARAM_UPDATE)) {
3055 			return send_conn_le_param_update(conn, param);
3056 		}
3057 
3058 		/* store new conn params to be used by update timer */
3059 		conn->le.interval_min = param->interval_min;
3060 		conn->le.interval_max = param->interval_max;
3061 		conn->le.pending_latency = param->latency;
3062 		conn->le.pending_timeout = param->timeout;
3063 		atomic_set_bit(conn->flags, BT_CONN_PERIPHERAL_PARAM_SET);
3064 	}
3065 
3066 	return 0;
3067 }
3068 
3069 #if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
bt_conn_le_data_len_update(struct bt_conn * conn,const struct bt_conn_le_data_len_param * param)3070 int bt_conn_le_data_len_update(struct bt_conn *conn,
3071 			       const struct bt_conn_le_data_len_param *param)
3072 {
3073 	if (conn->le.data_len.tx_max_len == param->tx_max_len &&
3074 	    conn->le.data_len.tx_max_time == param->tx_max_time) {
3075 		return -EALREADY;
3076 	}
3077 
3078 	return bt_le_set_data_len(conn, param->tx_max_len, param->tx_max_time);
3079 }
3080 #endif /* CONFIG_BT_USER_DATA_LEN_UPDATE */
3081 
3082 #if defined(CONFIG_BT_USER_PHY_UPDATE)
bt_conn_le_phy_update(struct bt_conn * conn,const struct bt_conn_le_phy_param * param)3083 int bt_conn_le_phy_update(struct bt_conn *conn,
3084 			  const struct bt_conn_le_phy_param *param)
3085 {
3086 	uint8_t phy_opts, all_phys;
3087 
3088 	if ((param->options & BT_CONN_LE_PHY_OPT_CODED_S2) &&
3089 	    (param->options & BT_CONN_LE_PHY_OPT_CODED_S8)) {
3090 		phy_opts = BT_HCI_LE_PHY_CODED_ANY;
3091 	} else if (param->options & BT_CONN_LE_PHY_OPT_CODED_S2) {
3092 		phy_opts = BT_HCI_LE_PHY_CODED_S2;
3093 	} else if (param->options & BT_CONN_LE_PHY_OPT_CODED_S8) {
3094 		phy_opts = BT_HCI_LE_PHY_CODED_S8;
3095 	} else {
3096 		phy_opts = BT_HCI_LE_PHY_CODED_ANY;
3097 	}
3098 
3099 	all_phys = 0U;
3100 	if (param->pref_tx_phy == BT_GAP_LE_PHY_NONE) {
3101 		all_phys |= BT_HCI_LE_PHY_TX_ANY;
3102 	}
3103 
3104 	if (param->pref_rx_phy == BT_GAP_LE_PHY_NONE) {
3105 		all_phys |= BT_HCI_LE_PHY_RX_ANY;
3106 	}
3107 
3108 	return bt_le_set_phy(conn, all_phys, param->pref_tx_phy,
3109 			     param->pref_rx_phy, phy_opts);
3110 }
3111 #endif
3112 
3113 #if defined(CONFIG_BT_CENTRAL)
bt_conn_set_param_le(struct bt_conn * conn,const struct bt_le_conn_param * param)3114 static void bt_conn_set_param_le(struct bt_conn *conn,
3115 				 const struct bt_le_conn_param *param)
3116 {
3117 	conn->le.interval_min = param->interval_min;
3118 	conn->le.interval_max = param->interval_max;
3119 	conn->le.latency = param->latency;
3120 	conn->le.timeout = param->timeout;
3121 }
3122 
create_param_setup(const struct bt_conn_le_create_param * param)3123 static void create_param_setup(const struct bt_conn_le_create_param *param)
3124 {
3125 	bt_dev.create_param = *param;
3126 
3127 	bt_dev.create_param.timeout =
3128 		(bt_dev.create_param.timeout != 0) ?
3129 		bt_dev.create_param.timeout :
3130 		(MSEC_PER_SEC / 10) * CONFIG_BT_CREATE_CONN_TIMEOUT;
3131 
3132 	bt_dev.create_param.interval_coded =
3133 		(bt_dev.create_param.interval_coded != 0) ?
3134 		bt_dev.create_param.interval_coded :
3135 		bt_dev.create_param.interval;
3136 
3137 	bt_dev.create_param.window_coded =
3138 		(bt_dev.create_param.window_coded != 0) ?
3139 		bt_dev.create_param.window_coded :
3140 		bt_dev.create_param.window;
3141 }
3142 
3143 #if defined(CONFIG_BT_FILTER_ACCEPT_LIST)
bt_conn_le_create_auto(const struct bt_conn_le_create_param * create_param,const struct bt_le_conn_param * param)3144 int bt_conn_le_create_auto(const struct bt_conn_le_create_param *create_param,
3145 			   const struct bt_le_conn_param *param)
3146 {
3147 	struct bt_conn *conn;
3148 	int err;
3149 
3150 	if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
3151 		return -EAGAIN;
3152 	}
3153 
3154 	if (!bt_le_conn_params_valid(param)) {
3155 		return -EINVAL;
3156 	}
3157 
3158 	conn = bt_conn_lookup_state_le(BT_ID_DEFAULT, BT_ADDR_LE_NONE,
3159 				       BT_CONN_INITIATING_FILTER_LIST);
3160 	if (conn) {
3161 		bt_conn_unref(conn);
3162 		return -EALREADY;
3163 	}
3164 
3165 	/* Scanning either to connect or explicit scan, either case scanner was
3166 	 * started by application and should not be stopped.
3167 	 */
3168 	if (!BT_LE_STATES_SCAN_INIT(bt_dev.le.states) &&
3169 	    atomic_test_bit(bt_dev.flags, BT_DEV_SCANNING)) {
3170 		return -EINVAL;
3171 	}
3172 
3173 	if (atomic_test_bit(bt_dev.flags, BT_DEV_INITIATING)) {
3174 		return -EINVAL;
3175 	}
3176 
3177 	if (!bt_id_scan_random_addr_check()) {
3178 		return -EINVAL;
3179 	}
3180 
3181 	conn = bt_conn_add_le(BT_ID_DEFAULT, BT_ADDR_LE_NONE);
3182 	if (!conn) {
3183 		return -ENOMEM;
3184 	}
3185 
3186 	bt_conn_set_param_le(conn, param);
3187 	create_param_setup(create_param);
3188 
3189 	atomic_set_bit(conn->flags, BT_CONN_AUTO_CONNECT);
3190 	bt_conn_set_state(conn, BT_CONN_INITIATING_FILTER_LIST);
3191 
3192 	err = bt_le_create_conn(conn);
3193 	if (err) {
3194 		LOG_ERR("Failed to start filtered scan");
3195 		conn->err = 0;
3196 		bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
3197 		bt_conn_unref(conn);
3198 		return err;
3199 	}
3200 
3201 	/* Since we don't give the application a reference to manage in
3202 	 * this case, we need to release this reference here.
3203 	 */
3204 	bt_conn_unref(conn);
3205 	return 0;
3206 }
3207 
bt_conn_create_auto_stop(void)3208 int bt_conn_create_auto_stop(void)
3209 {
3210 	struct bt_conn *conn;
3211 	int err;
3212 
3213 	if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
3214 		return -EINVAL;
3215 	}
3216 
3217 	conn = bt_conn_lookup_state_le(BT_ID_DEFAULT, BT_ADDR_LE_NONE,
3218 				       BT_CONN_INITIATING_FILTER_LIST);
3219 	if (!conn) {
3220 		return -EINVAL;
3221 	}
3222 
3223 	if (!atomic_test_bit(bt_dev.flags, BT_DEV_INITIATING)) {
3224 		return -EINVAL;
3225 	}
3226 
3227 	bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
3228 	bt_conn_unref(conn);
3229 
3230 	err = bt_le_create_conn_cancel();
3231 	if (err) {
3232 		LOG_ERR("Failed to stop initiator");
3233 		return err;
3234 	}
3235 
3236 	return 0;
3237 }
3238 #endif /* defined(CONFIG_BT_FILTER_ACCEPT_LIST) */
3239 
conn_le_create_common_checks(const bt_addr_le_t * peer,const struct bt_le_conn_param * conn_param)3240 static int conn_le_create_common_checks(const bt_addr_le_t *peer,
3241 					const struct bt_le_conn_param *conn_param)
3242 {
3243 
3244 	if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
3245 		return -EAGAIN;
3246 	}
3247 
3248 	if (!bt_le_conn_params_valid(conn_param)) {
3249 		return -EINVAL;
3250 	}
3251 
3252 	if (!BT_LE_STATES_SCAN_INIT(bt_dev.le.states) &&
3253 	    atomic_test_bit(bt_dev.flags, BT_DEV_EXPLICIT_SCAN)) {
3254 		return -EAGAIN;
3255 	}
3256 
3257 	if (atomic_test_bit(bt_dev.flags, BT_DEV_INITIATING)) {
3258 		return -EALREADY;
3259 	}
3260 
3261 	if (!bt_id_scan_random_addr_check()) {
3262 		return -EINVAL;
3263 	}
3264 
3265 	if (bt_conn_exists_le(BT_ID_DEFAULT, peer)) {
3266 		return -EINVAL;
3267 	}
3268 
3269 	return 0;
3270 }
3271 
conn_le_create_helper(const bt_addr_le_t * peer,const struct bt_le_conn_param * conn_param)3272 static struct bt_conn *conn_le_create_helper(const bt_addr_le_t *peer,
3273 				     const struct bt_le_conn_param *conn_param)
3274 {
3275 	bt_addr_le_t dst;
3276 	struct bt_conn *conn;
3277 
3278 	if (bt_addr_le_is_resolved(peer)) {
3279 		bt_addr_le_copy_resolved(&dst, peer);
3280 	} else {
3281 		bt_addr_le_copy(&dst, bt_lookup_id_addr(BT_ID_DEFAULT, peer));
3282 	}
3283 
3284 	/* Only default identity supported for now */
3285 	conn = bt_conn_add_le(BT_ID_DEFAULT, &dst);
3286 	if (!conn) {
3287 		return NULL;
3288 	}
3289 
3290 	bt_conn_set_param_le(conn, conn_param);
3291 
3292 	return conn;
3293 }
3294 
bt_conn_le_create(const bt_addr_le_t * peer,const struct bt_conn_le_create_param * create_param,const struct bt_le_conn_param * conn_param,struct bt_conn ** ret_conn)3295 int bt_conn_le_create(const bt_addr_le_t *peer, const struct bt_conn_le_create_param *create_param,
3296 		      const struct bt_le_conn_param *conn_param, struct bt_conn **ret_conn)
3297 {
3298 	struct bt_conn *conn;
3299 	int err;
3300 
3301 	err = conn_le_create_common_checks(peer, conn_param);
3302 	if (err) {
3303 		return err;
3304 	}
3305 
3306 	conn = conn_le_create_helper(peer, conn_param);
3307 	if (!conn) {
3308 		return -ENOMEM;
3309 	}
3310 
3311 	create_param_setup(create_param);
3312 
3313 #if defined(CONFIG_BT_SMP)
3314 	if (bt_dev.le.rl_entries > bt_dev.le.rl_size) {
3315 		/* Use host-based identity resolving. */
3316 		bt_conn_set_state(conn, BT_CONN_SCAN_BEFORE_INITIATING);
3317 
3318 		err = bt_le_scan_update(true);
3319 		if (err) {
3320 			bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
3321 			bt_conn_unref(conn);
3322 
3323 			return err;
3324 		}
3325 
3326 		*ret_conn = conn;
3327 		return 0;
3328 	}
3329 #endif
3330 
3331 	bt_conn_set_state(conn, BT_CONN_INITIATING);
3332 
3333 	err = bt_le_create_conn(conn);
3334 	if (err) {
3335 		conn->err = 0;
3336 		bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
3337 		bt_conn_unref(conn);
3338 
3339 		bt_le_scan_update(false);
3340 		return err;
3341 	}
3342 
3343 	*ret_conn = conn;
3344 	return 0;
3345 }
3346 
bt_conn_le_create_synced(const struct bt_le_ext_adv * adv,const struct bt_conn_le_create_synced_param * synced_param,const struct bt_le_conn_param * conn_param,struct bt_conn ** ret_conn)3347 int bt_conn_le_create_synced(const struct bt_le_ext_adv *adv,
3348 			     const struct bt_conn_le_create_synced_param *synced_param,
3349 			     const struct bt_le_conn_param *conn_param, struct bt_conn **ret_conn)
3350 {
3351 	struct bt_conn *conn;
3352 	int err;
3353 
3354 	err = conn_le_create_common_checks(synced_param->peer, conn_param);
3355 	if (err) {
3356 		return err;
3357 	}
3358 
3359 	if (!atomic_test_bit(adv->flags, BT_PER_ADV_ENABLED)) {
3360 		return -EINVAL;
3361 	}
3362 
3363 	if (!BT_FEAT_LE_PAWR_ADVERTISER(bt_dev.le.features)) {
3364 		return -ENOTSUP;
3365 	}
3366 
3367 	if (synced_param->subevent >= BT_HCI_PAWR_SUBEVENT_MAX) {
3368 		return -EINVAL;
3369 	}
3370 
3371 	conn = conn_le_create_helper(synced_param->peer, conn_param);
3372 	if (!conn) {
3373 		return -ENOMEM;
3374 	}
3375 
3376 	/* The connection creation timeout is not really useful for PAwR.
3377 	 * The controller will give a result for the connection attempt
3378 	 * within a periodic interval. We do not know the periodic interval
3379 	 * used, so disable the timeout.
3380 	 */
3381 	bt_dev.create_param.timeout = 0;
3382 	bt_conn_set_state(conn, BT_CONN_INITIATING);
3383 
3384 	err = bt_le_create_conn_synced(conn, adv, synced_param->subevent);
3385 	if (err) {
3386 		conn->err = 0;
3387 		bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
3388 		bt_conn_unref(conn);
3389 
3390 		return err;
3391 	}
3392 
3393 	*ret_conn = conn;
3394 	return 0;
3395 }
3396 
3397 #if !defined(CONFIG_BT_FILTER_ACCEPT_LIST)
bt_le_set_auto_conn(const bt_addr_le_t * addr,const struct bt_le_conn_param * param)3398 int bt_le_set_auto_conn(const bt_addr_le_t *addr,
3399 			const struct bt_le_conn_param *param)
3400 {
3401 	struct bt_conn *conn;
3402 
3403 	if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
3404 		return -EAGAIN;
3405 	}
3406 
3407 	if (param && !bt_le_conn_params_valid(param)) {
3408 		return -EINVAL;
3409 	}
3410 
3411 	if (!bt_id_scan_random_addr_check()) {
3412 		return -EINVAL;
3413 	}
3414 
3415 	/* Only default identity is supported */
3416 	conn = bt_conn_lookup_addr_le(BT_ID_DEFAULT, addr);
3417 	if (!conn) {
3418 		conn = bt_conn_add_le(BT_ID_DEFAULT, addr);
3419 		if (!conn) {
3420 			return -ENOMEM;
3421 		}
3422 	}
3423 
3424 	if (param) {
3425 		bt_conn_set_param_le(conn, param);
3426 
3427 		if (!atomic_test_and_set_bit(conn->flags,
3428 					     BT_CONN_AUTO_CONNECT)) {
3429 			bt_conn_ref(conn);
3430 		}
3431 	} else {
3432 		if (atomic_test_and_clear_bit(conn->flags,
3433 					      BT_CONN_AUTO_CONNECT)) {
3434 			bt_conn_unref(conn);
3435 			if (conn->state == BT_CONN_SCAN_BEFORE_INITIATING) {
3436 				bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
3437 			}
3438 		}
3439 	}
3440 
3441 	if (conn->state == BT_CONN_DISCONNECTED &&
3442 	    atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
3443 		if (param) {
3444 			bt_conn_set_state(conn, BT_CONN_SCAN_BEFORE_INITIATING);
3445 		}
3446 		bt_le_scan_update(false);
3447 	}
3448 
3449 	bt_conn_unref(conn);
3450 
3451 	return 0;
3452 }
3453 #endif /* !defined(CONFIG_BT_FILTER_ACCEPT_LIST) */
3454 #endif /* CONFIG_BT_CENTRAL */
3455 
bt_conn_le_conn_update(struct bt_conn * conn,const struct bt_le_conn_param * param)3456 int bt_conn_le_conn_update(struct bt_conn *conn,
3457 			   const struct bt_le_conn_param *param)
3458 {
3459 	struct hci_cp_le_conn_update *conn_update;
3460 	struct net_buf *buf;
3461 
3462 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_CONN_UPDATE,
3463 				sizeof(*conn_update));
3464 	if (!buf) {
3465 		return -ENOBUFS;
3466 	}
3467 
3468 	conn_update = net_buf_add(buf, sizeof(*conn_update));
3469 	(void)memset(conn_update, 0, sizeof(*conn_update));
3470 	conn_update->handle = sys_cpu_to_le16(conn->handle);
3471 	conn_update->conn_interval_min = sys_cpu_to_le16(param->interval_min);
3472 	conn_update->conn_interval_max = sys_cpu_to_le16(param->interval_max);
3473 	conn_update->conn_latency = sys_cpu_to_le16(param->latency);
3474 	conn_update->supervision_timeout = sys_cpu_to_le16(param->timeout);
3475 
3476 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_CONN_UPDATE, buf, NULL);
3477 }
3478 
3479 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC)
bt_conn_auth_cb_register(const struct bt_conn_auth_cb * cb)3480 int bt_conn_auth_cb_register(const struct bt_conn_auth_cb *cb)
3481 {
3482 	if (!cb) {
3483 		bt_auth = NULL;
3484 		return 0;
3485 	}
3486 
3487 	if (bt_auth) {
3488 		return -EALREADY;
3489 	}
3490 
3491 	/* The cancel callback must always be provided if the app provides
3492 	 * interactive callbacks.
3493 	 */
3494 	if (!cb->cancel &&
3495 	    (cb->passkey_display || cb->passkey_entry || cb->passkey_confirm ||
3496 #if defined(CONFIG_BT_CLASSIC)
3497 	     cb->pincode_entry ||
3498 #endif
3499 	     cb->pairing_confirm)) {
3500 		return -EINVAL;
3501 	}
3502 
3503 	bt_auth = cb;
3504 	return 0;
3505 }
3506 
3507 #if defined(CONFIG_BT_SMP)
bt_conn_auth_cb_overlay(struct bt_conn * conn,const struct bt_conn_auth_cb * cb)3508 int bt_conn_auth_cb_overlay(struct bt_conn *conn, const struct bt_conn_auth_cb *cb)
3509 {
3510 	CHECKIF(conn == NULL) {
3511 		return -EINVAL;
3512 	}
3513 
3514 	/* The cancel callback must always be provided if the app provides
3515 	 * interactive callbacks.
3516 	 */
3517 	if (cb && !cb->cancel &&
3518 	    (cb->passkey_display || cb->passkey_entry || cb->passkey_confirm ||
3519 	     cb->pairing_confirm)) {
3520 		return -EINVAL;
3521 	}
3522 
3523 	if (conn->type == BT_CONN_TYPE_LE) {
3524 		return bt_smp_auth_cb_overlay(conn, cb);
3525 	}
3526 
3527 	return -ENOTSUP;
3528 }
3529 #endif
3530 
bt_conn_auth_info_cb_register(struct bt_conn_auth_info_cb * cb)3531 int bt_conn_auth_info_cb_register(struct bt_conn_auth_info_cb *cb)
3532 {
3533 	CHECKIF(cb == NULL) {
3534 		return -EINVAL;
3535 	}
3536 
3537 	if (sys_slist_find(&bt_auth_info_cbs, &cb->node, NULL)) {
3538 		return -EALREADY;
3539 	}
3540 
3541 	sys_slist_append(&bt_auth_info_cbs, &cb->node);
3542 
3543 	return 0;
3544 }
3545 
bt_conn_auth_info_cb_unregister(struct bt_conn_auth_info_cb * cb)3546 int bt_conn_auth_info_cb_unregister(struct bt_conn_auth_info_cb *cb)
3547 {
3548 	CHECKIF(cb == NULL) {
3549 		return -EINVAL;
3550 	}
3551 
3552 	if (!sys_slist_find_and_remove(&bt_auth_info_cbs, &cb->node)) {
3553 		return -EALREADY;
3554 	}
3555 
3556 	return 0;
3557 }
3558 
bt_conn_auth_passkey_entry(struct bt_conn * conn,unsigned int passkey)3559 int bt_conn_auth_passkey_entry(struct bt_conn *conn, unsigned int passkey)
3560 {
3561 	if (IS_ENABLED(CONFIG_BT_SMP) && conn->type == BT_CONN_TYPE_LE) {
3562 		return bt_smp_auth_passkey_entry(conn, passkey);
3563 	}
3564 
3565 	if (IS_ENABLED(CONFIG_BT_CLASSIC) && conn->type == BT_CONN_TYPE_BR) {
3566 		if (!bt_auth) {
3567 			return -EINVAL;
3568 		}
3569 
3570 		return bt_ssp_auth_passkey_entry(conn, passkey);
3571 	}
3572 
3573 	return -EINVAL;
3574 }
3575 
3576 #if defined(CONFIG_BT_PASSKEY_KEYPRESS)
bt_conn_auth_keypress_notify(struct bt_conn * conn,enum bt_conn_auth_keypress type)3577 int bt_conn_auth_keypress_notify(struct bt_conn *conn,
3578 				 enum bt_conn_auth_keypress type)
3579 {
3580 	if (IS_ENABLED(CONFIG_BT_SMP) && conn->type == BT_CONN_TYPE_LE) {
3581 		return bt_smp_auth_keypress_notify(conn, type);
3582 	}
3583 
3584 	LOG_ERR("Not implemented for conn type %d", conn->type);
3585 	return -EINVAL;
3586 }
3587 #endif
3588 
bt_conn_auth_passkey_confirm(struct bt_conn * conn)3589 int bt_conn_auth_passkey_confirm(struct bt_conn *conn)
3590 {
3591 	if (IS_ENABLED(CONFIG_BT_SMP) && conn->type == BT_CONN_TYPE_LE) {
3592 		return bt_smp_auth_passkey_confirm(conn);
3593 	}
3594 
3595 	if (IS_ENABLED(CONFIG_BT_CLASSIC) && conn->type == BT_CONN_TYPE_BR) {
3596 		if (!bt_auth) {
3597 			return -EINVAL;
3598 		}
3599 
3600 		return bt_ssp_auth_passkey_confirm(conn);
3601 	}
3602 
3603 	return -EINVAL;
3604 }
3605 
bt_conn_auth_cancel(struct bt_conn * conn)3606 int bt_conn_auth_cancel(struct bt_conn *conn)
3607 {
3608 	if (IS_ENABLED(CONFIG_BT_SMP) && conn->type == BT_CONN_TYPE_LE) {
3609 		return bt_smp_auth_cancel(conn);
3610 	}
3611 
3612 	if (IS_ENABLED(CONFIG_BT_CLASSIC) && conn->type == BT_CONN_TYPE_BR) {
3613 		if (!bt_auth) {
3614 			return -EINVAL;
3615 		}
3616 
3617 		return bt_ssp_auth_cancel(conn);
3618 	}
3619 
3620 	return -EINVAL;
3621 }
3622 
bt_conn_auth_pairing_confirm(struct bt_conn * conn)3623 int bt_conn_auth_pairing_confirm(struct bt_conn *conn)
3624 {
3625 	if (IS_ENABLED(CONFIG_BT_SMP) && conn->type == BT_CONN_TYPE_LE) {
3626 		return bt_smp_auth_pairing_confirm(conn);
3627 	}
3628 
3629 	if (IS_ENABLED(CONFIG_BT_CLASSIC) && conn->type == BT_CONN_TYPE_BR) {
3630 		if (!bt_auth) {
3631 			return -EINVAL;
3632 		}
3633 
3634 		return bt_ssp_auth_pairing_confirm(conn);
3635 	}
3636 
3637 	return -EINVAL;
3638 }
3639 #endif /* CONFIG_BT_SMP || CONFIG_BT_CLASSIC */
3640 
bt_conn_lookup_index(uint8_t index)3641 struct bt_conn *bt_conn_lookup_index(uint8_t index)
3642 {
3643 	if (index >= ARRAY_SIZE(acl_conns)) {
3644 		return NULL;
3645 	}
3646 
3647 	return bt_conn_ref(&acl_conns[index]);
3648 }
3649 
bt_conn_init(void)3650 int bt_conn_init(void)
3651 {
3652 	int err, i;
3653 
3654 	k_fifo_init(&free_tx);
3655 	for (i = 0; i < ARRAY_SIZE(conn_tx); i++) {
3656 		k_fifo_put(&free_tx, &conn_tx[i]);
3657 	}
3658 
3659 	bt_att_init();
3660 
3661 	err = bt_smp_init();
3662 	if (err) {
3663 		return err;
3664 	}
3665 
3666 	bt_l2cap_init();
3667 
3668 	/* Initialize background scan */
3669 	if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
3670 		for (i = 0; i < ARRAY_SIZE(acl_conns); i++) {
3671 			struct bt_conn *conn = bt_conn_ref(&acl_conns[i]);
3672 
3673 			if (!conn) {
3674 				continue;
3675 			}
3676 
3677 #if !defined(CONFIG_BT_FILTER_ACCEPT_LIST)
3678 			if (atomic_test_bit(conn->flags,
3679 					    BT_CONN_AUTO_CONNECT)) {
3680 				/* Only the default identity is supported */
3681 				conn->id = BT_ID_DEFAULT;
3682 				bt_conn_set_state(conn,
3683 						  BT_CONN_SCAN_BEFORE_INITIATING);
3684 			}
3685 #endif /* !defined(CONFIG_BT_FILTER_ACCEPT_LIST) */
3686 
3687 			bt_conn_unref(conn);
3688 		}
3689 	}
3690 
3691 	return 0;
3692 }
3693 
3694 #if defined(CONFIG_BT_DF_CONNECTION_CTE_RX)
bt_hci_le_df_connection_iq_report_common(uint8_t event,struct net_buf * buf)3695 void bt_hci_le_df_connection_iq_report_common(uint8_t event, struct net_buf *buf)
3696 {
3697 	struct bt_df_conn_iq_samples_report iq_report;
3698 	struct bt_conn *conn;
3699 	int err;
3700 
3701 	if (event == BT_HCI_EVT_LE_CONNECTION_IQ_REPORT) {
3702 		err = hci_df_prepare_connection_iq_report(buf, &iq_report, &conn);
3703 		if (err) {
3704 			LOG_ERR("Prepare CTE conn IQ report failed %d", err);
3705 			return;
3706 		}
3707 	} else if (IS_ENABLED(CONFIG_BT_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES) &&
3708 		   event == BT_HCI_EVT_VS_LE_CONNECTION_IQ_REPORT) {
3709 		err = hci_df_vs_prepare_connection_iq_report(buf, &iq_report, &conn);
3710 		if (err) {
3711 			LOG_ERR("Prepare CTE conn IQ report failed %d", err);
3712 			return;
3713 		}
3714 	} else {
3715 		LOG_ERR("Unhandled VS connection IQ report");
3716 		return;
3717 	}
3718 
3719 	struct bt_conn_cb *callback;
3720 
3721 	SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3722 		if (callback->cte_report_cb) {
3723 			callback->cte_report_cb(conn, &iq_report);
3724 		}
3725 	}
3726 
3727 	STRUCT_SECTION_FOREACH(bt_conn_cb, cb)
3728 	{
3729 		if (cb->cte_report_cb) {
3730 			cb->cte_report_cb(conn, &iq_report);
3731 		}
3732 	}
3733 
3734 	bt_conn_unref(conn);
3735 }
3736 
bt_hci_le_df_connection_iq_report(struct net_buf * buf)3737 void bt_hci_le_df_connection_iq_report(struct net_buf *buf)
3738 {
3739 	bt_hci_le_df_connection_iq_report_common(BT_HCI_EVT_LE_CONNECTION_IQ_REPORT, buf);
3740 }
3741 
3742 #if defined(CONFIG_BT_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES)
bt_hci_le_vs_df_connection_iq_report(struct net_buf * buf)3743 void bt_hci_le_vs_df_connection_iq_report(struct net_buf *buf)
3744 {
3745 	bt_hci_le_df_connection_iq_report_common(BT_HCI_EVT_VS_LE_CONNECTION_IQ_REPORT, buf);
3746 }
3747 #endif /* CONFIG_BT_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES */
3748 #endif /* CONFIG_BT_DF_CONNECTION_CTE_RX */
3749 
3750 #if defined(CONFIG_BT_DF_CONNECTION_CTE_REQ)
bt_hci_le_df_cte_req_failed(struct net_buf * buf)3751 void bt_hci_le_df_cte_req_failed(struct net_buf *buf)
3752 {
3753 	struct bt_df_conn_iq_samples_report iq_report;
3754 	struct bt_conn *conn;
3755 	int err;
3756 
3757 	err = hci_df_prepare_conn_cte_req_failed(buf, &iq_report, &conn);
3758 	if (err) {
3759 		LOG_ERR("Prepare CTE REQ failed IQ report failed %d", err);
3760 		return;
3761 	}
3762 
3763 	struct bt_conn_cb *callback;
3764 
3765 	SYS_SLIST_FOR_EACH_CONTAINER(&conn_cbs, callback, _node) {
3766 		if (callback->cte_report_cb) {
3767 			callback->cte_report_cb(conn, &iq_report);
3768 		}
3769 	}
3770 
3771 	STRUCT_SECTION_FOREACH(bt_conn_cb, cb)
3772 	{
3773 		if (cb->cte_report_cb) {
3774 			cb->cte_report_cb(conn, &iq_report);
3775 		}
3776 	}
3777 
3778 	bt_conn_unref(conn);
3779 }
3780 #endif /* CONFIG_BT_DF_CONNECTION_CTE_REQ */
3781 
3782 #endif /* CONFIG_BT_CONN */
3783