1 /* hci_core.c - HCI core Bluetooth handling */
2 
3 /*
4  * Copyright (c) 2017-2021 Nordic Semiconductor ASA
5  * Copyright (c) 2015-2016 Intel Corporation
6  *
7  * SPDX-License-Identifier: Apache-2.0
8  */
9 
10 #include <zephyr/kernel.h>
11 #include <string.h>
12 #include <stdio.h>
13 #include <errno.h>
14 #include <zephyr/sys/atomic.h>
15 #include <zephyr/sys/util.h>
16 #include <zephyr/sys/slist.h>
17 #include <zephyr/sys/byteorder.h>
18 #include <zephyr/debug/stack.h>
19 #include <zephyr/sys/__assert.h>
20 #include <soc.h>
21 
22 #include <zephyr/settings/settings.h>
23 
24 #include <zephyr/bluetooth/bluetooth.h>
25 #include <zephyr/bluetooth/conn.h>
26 #include <zephyr/bluetooth/l2cap.h>
27 #include <zephyr/bluetooth/hci.h>
28 #include <zephyr/bluetooth/hci_vs.h>
29 #include <zephyr/drivers/bluetooth/hci_driver.h>
30 
31 #include "common/bt_str.h"
32 #include "common/assert.h"
33 
34 #include "common/rpa.h"
35 #include "keys.h"
36 #include "monitor.h"
37 #include "hci_core.h"
38 #include "hci_ecc.h"
39 #include "ecc.h"
40 #include "id.h"
41 #include "adv.h"
42 #include "scan.h"
43 
44 #include "addr_internal.h"
45 #include "conn_internal.h"
46 #include "iso_internal.h"
47 #include "l2cap_internal.h"
48 #include "gatt_internal.h"
49 #include "smp.h"
50 #include "crypto.h"
51 #include "settings.h"
52 
53 #if defined(CONFIG_BT_BREDR)
54 #include "br.h"
55 #endif
56 
57 #if defined(CONFIG_BT_DF)
58 #include "direction_internal.h"
59 #endif /* CONFIG_BT_DF */
60 
61 #define LOG_LEVEL CONFIG_BT_HCI_CORE_LOG_LEVEL
62 #include <zephyr/logging/log.h>
63 LOG_MODULE_REGISTER(bt_hci_core);
64 
65 #define HCI_CMD_TIMEOUT      K_SECONDS(10)
66 
67 /* Stacks for the threads */
68 #if !defined(CONFIG_BT_RECV_BLOCKING)
69 static void rx_work_handler(struct k_work *work);
70 static K_WORK_DEFINE(rx_work, rx_work_handler);
71 #if defined(CONFIG_BT_RECV_WORKQ_BT)
72 static struct k_work_q bt_workq;
73 static K_KERNEL_STACK_DEFINE(rx_thread_stack, CONFIG_BT_RX_STACK_SIZE);
74 #endif /* CONFIG_BT_RECV_WORKQ_BT */
75 #endif /* !CONFIG_BT_RECV_BLOCKING */
76 static struct k_thread tx_thread_data;
77 static K_KERNEL_STACK_DEFINE(tx_thread_stack, CONFIG_BT_HCI_TX_STACK_SIZE);
78 
79 static void init_work(struct k_work *work);
80 
81 struct bt_dev bt_dev = {
82 	.init          = Z_WORK_INITIALIZER(init_work),
83 #if defined(CONFIG_BT_PRIVACY)
84 	.rpa_timeout   = CONFIG_BT_RPA_TIMEOUT,
85 #endif
86 #if defined(CONFIG_BT_DEVICE_APPEARANCE_DYNAMIC)
87 	.appearance = CONFIG_BT_DEVICE_APPEARANCE,
88 #endif
89 };
90 
91 static bt_ready_cb_t ready_cb;
92 
93 #if defined(CONFIG_BT_HCI_VS_EVT_USER)
94 static bt_hci_vnd_evt_cb_t *hci_vnd_evt_cb;
95 #endif /* CONFIG_BT_HCI_VS_EVT_USER */
96 
97 struct cmd_data {
98 	/** HCI status of the command completion */
99 	uint8_t  status;
100 
101 	/** The command OpCode that the buffer contains */
102 	uint16_t opcode;
103 
104 	/** The state to update when command completes with success. */
105 	struct bt_hci_cmd_state_set *state;
106 
107 	/** Used by bt_hci_cmd_send_sync. */
108 	struct k_sem *sync;
109 };
110 
111 static struct cmd_data cmd_data[CONFIG_BT_BUF_CMD_TX_COUNT];
112 
113 #define cmd(buf) (&cmd_data[net_buf_id(buf)])
114 #define acl(buf) ((struct acl_data *)net_buf_user_data(buf))
115 
bt_hci_cmd_state_set_init(struct net_buf * buf,struct bt_hci_cmd_state_set * state,atomic_t * target,int bit,bool val)116 void bt_hci_cmd_state_set_init(struct net_buf *buf,
117 			       struct bt_hci_cmd_state_set *state,
118 			       atomic_t *target, int bit, bool val)
119 {
120 	state->target = target;
121 	state->bit = bit;
122 	state->val = val;
123 	cmd(buf)->state = state;
124 }
125 
126 /* HCI command buffers. Derive the needed size from both Command and Event
127  * buffer length since the buffer is also used for the response event i.e
128  * command complete or command status.
129  */
130 #define CMD_BUF_SIZE MAX(BT_BUF_EVT_RX_SIZE, BT_BUF_CMD_TX_SIZE)
131 NET_BUF_POOL_FIXED_DEFINE(hci_cmd_pool, CONFIG_BT_BUF_CMD_TX_COUNT,
132 			  CMD_BUF_SIZE, 8, NULL);
133 
134 struct event_handler {
135 	uint8_t event;
136 	uint8_t min_len;
137 	void (*handler)(struct net_buf *buf);
138 };
139 
140 #define EVENT_HANDLER(_evt, _handler, _min_len) \
141 { \
142 	.event = _evt, \
143 	.handler = _handler, \
144 	.min_len = _min_len, \
145 }
146 
handle_event_common(uint8_t event,struct net_buf * buf,const struct event_handler * handlers,size_t num_handlers)147 static int handle_event_common(uint8_t event, struct net_buf *buf,
148 			       const struct event_handler *handlers, size_t num_handlers)
149 {
150 	size_t i;
151 
152 	for (i = 0; i < num_handlers; i++) {
153 		const struct event_handler *handler = &handlers[i];
154 
155 		if (handler->event != event) {
156 			continue;
157 		}
158 
159 		if (buf->len < handler->min_len) {
160 			LOG_ERR("Too small (%u bytes) event 0x%02x", buf->len, event);
161 			return -EINVAL;
162 		}
163 
164 		handler->handler(buf);
165 		return 0;
166 	}
167 
168 	return -EOPNOTSUPP;
169 }
170 
handle_event(uint8_t event,struct net_buf * buf,const struct event_handler * handlers,size_t num_handlers)171 static void handle_event(uint8_t event, struct net_buf *buf, const struct event_handler *handlers,
172 			 size_t num_handlers)
173 {
174 	int err;
175 
176 	err = handle_event_common(event, buf, handlers, num_handlers);
177 	if (err == -EOPNOTSUPP) {
178 		LOG_WRN("Unhandled event 0x%02x len %u: %s", event, buf->len,
179 			bt_hex(buf->data, buf->len));
180 	}
181 
182 	/* Other possible errors are handled by handle_event_common function */
183 }
184 
handle_vs_event(uint8_t event,struct net_buf * buf,const struct event_handler * handlers,size_t num_handlers)185 static void handle_vs_event(uint8_t event, struct net_buf *buf,
186 			    const struct event_handler *handlers, size_t num_handlers)
187 {
188 	int err;
189 
190 	err = handle_event_common(event, buf, handlers, num_handlers);
191 	if (err == -EOPNOTSUPP) {
192 		LOG_WRN("Unhandled vendor-specific event: %s", bt_hex(buf->data, buf->len));
193 	}
194 
195 	/* Other possible errors are handled by handle_event_common function */
196 }
197 
198 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
bt_hci_host_num_completed_packets(struct net_buf * buf)199 void bt_hci_host_num_completed_packets(struct net_buf *buf)
200 {
201 
202 	struct bt_hci_cp_host_num_completed_packets *cp;
203 	uint16_t handle = acl(buf)->handle;
204 	struct bt_hci_handle_count *hc;
205 	struct bt_conn *conn;
206 	uint8_t index = acl(buf)->index;
207 
208 	net_buf_destroy(buf);
209 
210 	/* Do nothing if controller to host flow control is not supported */
211 	if (!BT_CMD_TEST(bt_dev.supported_commands, 10, 5)) {
212 		return;
213 	}
214 
215 	conn = bt_conn_lookup_index(index);
216 	if (!conn) {
217 		LOG_WRN("Unable to look up conn with index 0x%02x", index);
218 		return;
219 	}
220 
221 	if (conn->state != BT_CONN_CONNECTED &&
222 	    conn->state != BT_CONN_DISCONNECTING) {
223 		LOG_WRN("Not reporting packet for non-connected conn");
224 		bt_conn_unref(conn);
225 		return;
226 	}
227 
228 	bt_conn_unref(conn);
229 
230 	LOG_DBG("Reporting completed packet for handle %u", handle);
231 
232 	buf = bt_hci_cmd_create(BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS,
233 				sizeof(*cp) + sizeof(*hc));
234 	if (!buf) {
235 		LOG_ERR("Unable to allocate new HCI command");
236 		return;
237 	}
238 
239 	cp = net_buf_add(buf, sizeof(*cp));
240 	cp->num_handles = sys_cpu_to_le16(1);
241 
242 	hc = net_buf_add(buf, sizeof(*hc));
243 	hc->handle = sys_cpu_to_le16(handle);
244 	hc->count  = sys_cpu_to_le16(1);
245 
246 	bt_hci_cmd_send(BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS, buf);
247 }
248 #endif /* defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL) */
249 
bt_hci_cmd_create(uint16_t opcode,uint8_t param_len)250 struct net_buf *bt_hci_cmd_create(uint16_t opcode, uint8_t param_len)
251 {
252 	struct bt_hci_cmd_hdr *hdr;
253 	struct net_buf *buf;
254 
255 	LOG_DBG("opcode 0x%04x param_len %u", opcode, param_len);
256 
257 	buf = net_buf_alloc(&hci_cmd_pool, K_FOREVER);
258 	__ASSERT_NO_MSG(buf);
259 
260 	LOG_DBG("buf %p", buf);
261 
262 	net_buf_reserve(buf, BT_BUF_RESERVE);
263 
264 	bt_buf_set_type(buf, BT_BUF_CMD);
265 
266 	cmd(buf)->opcode = opcode;
267 	cmd(buf)->sync = NULL;
268 	cmd(buf)->state = NULL;
269 
270 	hdr = net_buf_add(buf, sizeof(*hdr));
271 	hdr->opcode = sys_cpu_to_le16(opcode);
272 	hdr->param_len = param_len;
273 
274 	return buf;
275 }
276 
bt_hci_cmd_send(uint16_t opcode,struct net_buf * buf)277 int bt_hci_cmd_send(uint16_t opcode, struct net_buf *buf)
278 {
279 	if (!buf) {
280 		buf = bt_hci_cmd_create(opcode, 0);
281 		if (!buf) {
282 			return -ENOBUFS;
283 		}
284 	}
285 
286 	LOG_DBG("opcode 0x%04x len %u", opcode, buf->len);
287 
288 	/* Host Number of Completed Packets can ignore the ncmd value
289 	 * and does not generate any cmd complete/status events.
290 	 */
291 	if (opcode == BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS) {
292 		int err;
293 
294 		err = bt_send(buf);
295 		if (err) {
296 			LOG_ERR("Unable to send to driver (err %d)", err);
297 			net_buf_unref(buf);
298 		}
299 
300 		return err;
301 	}
302 
303 	net_buf_put(&bt_dev.cmd_tx_queue, buf);
304 
305 	return 0;
306 }
307 
bt_hci_cmd_send_sync(uint16_t opcode,struct net_buf * buf,struct net_buf ** rsp)308 int bt_hci_cmd_send_sync(uint16_t opcode, struct net_buf *buf,
309 			 struct net_buf **rsp)
310 {
311 	struct k_sem sync_sem;
312 	uint8_t status;
313 	int err;
314 
315 	if (!buf) {
316 		buf = bt_hci_cmd_create(opcode, 0);
317 		if (!buf) {
318 			return -ENOBUFS;
319 		}
320 	}
321 
322 	LOG_DBG("buf %p opcode 0x%04x len %u", buf, opcode, buf->len);
323 
324 	k_sem_init(&sync_sem, 0, 1);
325 	cmd(buf)->sync = &sync_sem;
326 
327 	net_buf_put(&bt_dev.cmd_tx_queue, net_buf_ref(buf));
328 
329 	err = k_sem_take(&sync_sem, HCI_CMD_TIMEOUT);
330 	BT_ASSERT_MSG(err == 0, "command opcode 0x%04x timeout with err %d", opcode, err);
331 
332 	status = cmd(buf)->status;
333 	if (status) {
334 		LOG_WRN("opcode 0x%04x status 0x%02x", opcode, status);
335 		net_buf_unref(buf);
336 
337 		switch (status) {
338 		case BT_HCI_ERR_CONN_LIMIT_EXCEEDED:
339 			return -ECONNREFUSED;
340 		case BT_HCI_ERR_INSUFFICIENT_RESOURCES:
341 			return -ENOMEM;
342 		default:
343 			return -EIO;
344 		}
345 	}
346 
347 	LOG_DBG("rsp %p opcode 0x%04x len %u", buf, opcode, buf->len);
348 
349 	if (rsp) {
350 		*rsp = buf;
351 	} else {
352 		net_buf_unref(buf);
353 	}
354 
355 	return 0;
356 }
357 
bt_hci_le_rand(void * buffer,size_t len)358 int bt_hci_le_rand(void *buffer, size_t len)
359 {
360 	struct bt_hci_rp_le_rand *rp;
361 	struct net_buf *rsp;
362 	size_t count;
363 	int err;
364 
365 	/* Check first that HCI_LE_Rand is supported */
366 	if (!BT_CMD_TEST(bt_dev.supported_commands, 27, 7)) {
367 		return -ENOTSUP;
368 	}
369 
370 	while (len > 0) {
371 		/* Number of bytes to fill on this iteration */
372 		count = MIN(len, sizeof(rp->rand));
373 		/* Request the next 8 bytes over HCI */
374 		err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_RAND, NULL, &rsp);
375 		if (err) {
376 			return err;
377 		}
378 		/* Copy random data into buffer */
379 		rp = (void *)rsp->data;
380 		memcpy(buffer, rp->rand, count);
381 
382 		net_buf_unref(rsp);
383 		buffer = (uint8_t *)buffer + count;
384 		len -= count;
385 	}
386 
387 	return 0;
388 }
389 
hci_le_read_max_data_len(uint16_t * tx_octets,uint16_t * tx_time)390 static int hci_le_read_max_data_len(uint16_t *tx_octets, uint16_t *tx_time)
391 {
392 	struct bt_hci_rp_le_read_max_data_len *rp;
393 	struct net_buf *rsp;
394 	int err;
395 
396 	err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_MAX_DATA_LEN, NULL, &rsp);
397 	if (err) {
398 		LOG_ERR("Failed to read DLE max data len");
399 		return err;
400 	}
401 
402 	rp = (void *)rsp->data;
403 	*tx_octets = sys_le16_to_cpu(rp->max_tx_octets);
404 	*tx_time = sys_le16_to_cpu(rp->max_tx_time);
405 	net_buf_unref(rsp);
406 
407 	return 0;
408 }
409 
bt_get_phy(uint8_t hci_phy)410 uint8_t bt_get_phy(uint8_t hci_phy)
411 {
412 	switch (hci_phy) {
413 	case BT_HCI_LE_PHY_1M:
414 		return BT_GAP_LE_PHY_1M;
415 	case BT_HCI_LE_PHY_2M:
416 		return BT_GAP_LE_PHY_2M;
417 	case BT_HCI_LE_PHY_CODED:
418 		return BT_GAP_LE_PHY_CODED;
419 	default:
420 		return 0;
421 	}
422 }
423 
424 #if defined(CONFIG_BT_CONN_TX)
hci_num_completed_packets(struct net_buf * buf)425 static void hci_num_completed_packets(struct net_buf *buf)
426 {
427 	struct bt_hci_evt_num_completed_packets *evt = (void *)buf->data;
428 	int i;
429 
430 	if (sizeof(*evt) + sizeof(evt->h[0]) * evt->num_handles > buf->len) {
431 		LOG_ERR("evt num_handles (=%u) too large (%u > %u)",
432 			evt->num_handles,
433 			sizeof(*evt) + sizeof(evt->h[0]) * evt->num_handles,
434 			buf->len);
435 		return;
436 	}
437 
438 	LOG_DBG("num_handles %u", evt->num_handles);
439 
440 	for (i = 0; i < evt->num_handles; i++) {
441 		uint16_t handle, count;
442 		struct bt_conn *conn;
443 
444 		handle = sys_le16_to_cpu(evt->h[i].handle);
445 		count = sys_le16_to_cpu(evt->h[i].count);
446 
447 		LOG_DBG("handle %u count %u", handle, count);
448 
449 		conn = bt_conn_lookup_handle(handle);
450 		if (!conn) {
451 			LOG_ERR("No connection for handle %u", handle);
452 			continue;
453 		}
454 
455 		while (count--) {
456 			struct bt_conn_tx *tx;
457 			sys_snode_t *node;
458 			unsigned int key;
459 
460 			key = irq_lock();
461 
462 			if (conn->pending_no_cb) {
463 				conn->pending_no_cb--;
464 				irq_unlock(key);
465 				k_sem_give(bt_conn_get_pkts(conn));
466 				continue;
467 			}
468 
469 			node = sys_slist_get(&conn->tx_pending);
470 			irq_unlock(key);
471 
472 			if (!node) {
473 				LOG_ERR("packets count mismatch");
474 				break;
475 			}
476 
477 			tx = CONTAINER_OF(node, struct bt_conn_tx, node);
478 
479 			key = irq_lock();
480 			conn->pending_no_cb = tx->pending_no_cb;
481 			tx->pending_no_cb = 0U;
482 			sys_slist_append(&conn->tx_complete, &tx->node);
483 			irq_unlock(key);
484 
485 			k_work_submit(&conn->tx_complete_work);
486 			k_sem_give(bt_conn_get_pkts(conn));
487 		}
488 
489 		bt_conn_unref(conn);
490 	}
491 }
492 #endif /* CONFIG_BT_CONN_TX */
493 
494 #if defined(CONFIG_BT_CONN)
hci_acl(struct net_buf * buf)495 static void hci_acl(struct net_buf *buf)
496 {
497 	struct bt_hci_acl_hdr *hdr;
498 	uint16_t handle, len;
499 	struct bt_conn *conn;
500 	uint8_t flags;
501 
502 	LOG_DBG("buf %p", buf);
503 
504 	BT_ASSERT(buf->len >= sizeof(*hdr));
505 
506 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
507 	len = sys_le16_to_cpu(hdr->len);
508 	handle = sys_le16_to_cpu(hdr->handle);
509 	flags = bt_acl_flags(handle);
510 
511 	acl(buf)->handle = bt_acl_handle(handle);
512 	acl(buf)->index = BT_CONN_INDEX_INVALID;
513 
514 	LOG_DBG("handle %u len %u flags %u", acl(buf)->handle, len, flags);
515 
516 	if (buf->len != len) {
517 		LOG_ERR("ACL data length mismatch (%u != %u)", buf->len, len);
518 		net_buf_unref(buf);
519 		return;
520 	}
521 
522 	conn = bt_conn_lookup_handle(acl(buf)->handle);
523 	if (!conn) {
524 		LOG_ERR("Unable to find conn for handle %u", acl(buf)->handle);
525 		net_buf_unref(buf);
526 		return;
527 	}
528 
529 	acl(buf)->index = bt_conn_index(conn);
530 
531 	bt_conn_recv(conn, buf, flags);
532 	bt_conn_unref(conn);
533 }
534 
hci_data_buf_overflow(struct net_buf * buf)535 static void hci_data_buf_overflow(struct net_buf *buf)
536 {
537 	struct bt_hci_evt_data_buf_overflow *evt = (void *)buf->data;
538 
539 	LOG_WRN("Data buffer overflow (link type 0x%02x)", evt->link_type);
540 }
541 
542 #if defined(CONFIG_BT_CENTRAL)
set_phy_conn_param(const struct bt_conn * conn,struct bt_hci_ext_conn_phy * phy)543 static void set_phy_conn_param(const struct bt_conn *conn,
544 			       struct bt_hci_ext_conn_phy *phy)
545 {
546 	phy->conn_interval_min = sys_cpu_to_le16(conn->le.interval_min);
547 	phy->conn_interval_max = sys_cpu_to_le16(conn->le.interval_max);
548 	phy->conn_latency = sys_cpu_to_le16(conn->le.latency);
549 	phy->supervision_timeout = sys_cpu_to_le16(conn->le.timeout);
550 
551 	phy->min_ce_len = 0;
552 	phy->max_ce_len = 0;
553 }
554 
bt_le_create_conn_ext(const struct bt_conn * conn)555 int bt_le_create_conn_ext(const struct bt_conn *conn)
556 {
557 	struct bt_hci_cp_le_ext_create_conn *cp;
558 	struct bt_hci_ext_conn_phy *phy;
559 	struct bt_hci_cmd_state_set state;
560 	bool use_filter = false;
561 	struct net_buf *buf;
562 	uint8_t own_addr_type;
563 	uint8_t num_phys;
564 	int err;
565 
566 	if (IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST)) {
567 		use_filter = atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT);
568 	}
569 
570 	err = bt_id_set_create_conn_own_addr(use_filter, &own_addr_type);
571 	if (err) {
572 		return err;
573 	}
574 
575 	num_phys = (!(bt_dev.create_param.options &
576 		      BT_CONN_LE_OPT_NO_1M) ? 1 : 0) +
577 		   ((bt_dev.create_param.options &
578 		      BT_CONN_LE_OPT_CODED) ? 1 : 0);
579 
580 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_EXT_CREATE_CONN, sizeof(*cp) +
581 				num_phys * sizeof(*phy));
582 	if (!buf) {
583 		return -ENOBUFS;
584 	}
585 
586 	cp = net_buf_add(buf, sizeof(*cp));
587 	(void)memset(cp, 0, sizeof(*cp));
588 
589 	if (use_filter) {
590 		/* User Initiated procedure use fast scan parameters. */
591 		bt_addr_le_copy(&cp->peer_addr, BT_ADDR_LE_ANY);
592 		cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_FILTER;
593 	} else {
594 		const bt_addr_le_t *peer_addr = &conn->le.dst;
595 
596 #if defined(CONFIG_BT_SMP)
597 		if (bt_dev.le.rl_entries > bt_dev.le.rl_size) {
598 			/* Host resolving is used, use the RPA directly. */
599 			peer_addr = &conn->le.resp_addr;
600 		}
601 #endif
602 		bt_addr_le_copy(&cp->peer_addr, peer_addr);
603 		cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_NO_FILTER;
604 	}
605 
606 	cp->own_addr_type = own_addr_type;
607 	cp->phys = 0;
608 
609 	if (!(bt_dev.create_param.options & BT_CONN_LE_OPT_NO_1M)) {
610 		cp->phys |= BT_HCI_LE_EXT_SCAN_PHY_1M;
611 		phy = net_buf_add(buf, sizeof(*phy));
612 		phy->scan_interval = sys_cpu_to_le16(
613 			bt_dev.create_param.interval);
614 		phy->scan_window = sys_cpu_to_le16(
615 			bt_dev.create_param.window);
616 		set_phy_conn_param(conn, phy);
617 	}
618 
619 	if (bt_dev.create_param.options & BT_CONN_LE_OPT_CODED) {
620 		cp->phys |= BT_HCI_LE_EXT_SCAN_PHY_CODED;
621 		phy = net_buf_add(buf, sizeof(*phy));
622 		phy->scan_interval = sys_cpu_to_le16(
623 			bt_dev.create_param.interval_coded);
624 		phy->scan_window = sys_cpu_to_le16(
625 			bt_dev.create_param.window_coded);
626 		set_phy_conn_param(conn, phy);
627 	}
628 
629 	bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags,
630 				  BT_DEV_INITIATING, true);
631 
632 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_EXT_CREATE_CONN, buf, NULL);
633 }
634 
bt_le_create_conn_synced(const struct bt_conn * conn,const struct bt_le_ext_adv * adv,uint8_t subevent)635 int bt_le_create_conn_synced(const struct bt_conn *conn, const struct bt_le_ext_adv *adv,
636 			     uint8_t subevent)
637 {
638 	struct bt_hci_cp_le_ext_create_conn_v2 *cp;
639 	struct bt_hci_ext_conn_phy *phy;
640 	struct bt_hci_cmd_state_set state;
641 	struct net_buf *buf;
642 	uint8_t own_addr_type;
643 	int err;
644 
645 	err = bt_id_set_create_conn_own_addr(false, &own_addr_type);
646 	if (err) {
647 		return err;
648 	}
649 
650 	/* There shall only be one Initiating_PHYs */
651 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_EXT_CREATE_CONN_V2, sizeof(*cp) + sizeof(*phy));
652 	if (!buf) {
653 		return -ENOBUFS;
654 	}
655 
656 	cp = net_buf_add(buf, sizeof(*cp));
657 	(void)memset(cp, 0, sizeof(*cp));
658 
659 	cp->subevent = subevent;
660 	cp->adv_handle = adv->handle;
661 	bt_addr_le_copy(&cp->peer_addr, &conn->le.dst);
662 	cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_NO_FILTER;
663 	cp->own_addr_type = own_addr_type;
664 
665 	/* The Initiating_PHY is the secondary phy of the corresponding ext adv set */
666 	if (adv->options & BT_LE_ADV_OPT_CODED) {
667 		cp->phys = BT_HCI_LE_EXT_SCAN_PHY_CODED;
668 	} else if (adv->options & BT_LE_ADV_OPT_NO_2M) {
669 		cp->phys = BT_HCI_LE_EXT_SCAN_PHY_1M;
670 	} else {
671 		cp->phys = BT_HCI_LE_EXT_SCAN_PHY_2M;
672 	}
673 
674 	phy = net_buf_add(buf, sizeof(*phy));
675 	(void)memset(phy, 0, sizeof(*phy));
676 	set_phy_conn_param(conn, phy);
677 
678 	bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags, BT_DEV_INITIATING, true);
679 
680 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_EXT_CREATE_CONN_V2, buf, NULL);
681 }
682 
bt_le_create_conn_legacy(const struct bt_conn * conn)683 static int bt_le_create_conn_legacy(const struct bt_conn *conn)
684 {
685 	struct bt_hci_cp_le_create_conn *cp;
686 	struct bt_hci_cmd_state_set state;
687 	bool use_filter = false;
688 	struct net_buf *buf;
689 	uint8_t own_addr_type;
690 	int err;
691 
692 	if (IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST)) {
693 		use_filter = atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT);
694 	}
695 
696 	err = bt_id_set_create_conn_own_addr(use_filter, &own_addr_type);
697 	if (err) {
698 		return err;
699 	}
700 
701 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_CREATE_CONN, sizeof(*cp));
702 	if (!buf) {
703 		return -ENOBUFS;
704 	}
705 
706 	cp = net_buf_add(buf, sizeof(*cp));
707 	memset(cp, 0, sizeof(*cp));
708 	cp->own_addr_type = own_addr_type;
709 
710 	if (use_filter) {
711 		/* User Initiated procedure use fast scan parameters. */
712 		bt_addr_le_copy(&cp->peer_addr, BT_ADDR_LE_ANY);
713 		cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_FILTER;
714 	} else {
715 		const bt_addr_le_t *peer_addr = &conn->le.dst;
716 
717 #if defined(CONFIG_BT_SMP)
718 		if (bt_dev.le.rl_entries > bt_dev.le.rl_size) {
719 			/* Host resolving is used, use the RPA directly. */
720 			peer_addr = &conn->le.resp_addr;
721 		}
722 #endif
723 		bt_addr_le_copy(&cp->peer_addr, peer_addr);
724 		cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_NO_FILTER;
725 	}
726 
727 	cp->scan_interval = sys_cpu_to_le16(bt_dev.create_param.interval);
728 	cp->scan_window = sys_cpu_to_le16(bt_dev.create_param.window);
729 
730 	cp->conn_interval_min = sys_cpu_to_le16(conn->le.interval_min);
731 	cp->conn_interval_max = sys_cpu_to_le16(conn->le.interval_max);
732 	cp->conn_latency = sys_cpu_to_le16(conn->le.latency);
733 	cp->supervision_timeout = sys_cpu_to_le16(conn->le.timeout);
734 
735 	bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags,
736 				  BT_DEV_INITIATING, true);
737 
738 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_CREATE_CONN, buf, NULL);
739 }
740 
bt_le_create_conn(const struct bt_conn * conn)741 int bt_le_create_conn(const struct bt_conn *conn)
742 {
743 	if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
744 	    BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
745 		return bt_le_create_conn_ext(conn);
746 	}
747 
748 	return bt_le_create_conn_legacy(conn);
749 }
750 
bt_le_create_conn_cancel(void)751 int bt_le_create_conn_cancel(void)
752 {
753 	struct net_buf *buf;
754 	struct bt_hci_cmd_state_set state;
755 
756 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_CREATE_CONN_CANCEL, 0);
757 
758 	bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags,
759 				  BT_DEV_INITIATING, false);
760 
761 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_CREATE_CONN_CANCEL, buf, NULL);
762 }
763 #endif /* CONFIG_BT_CENTRAL */
764 
bt_hci_disconnect(uint16_t handle,uint8_t reason)765 int bt_hci_disconnect(uint16_t handle, uint8_t reason)
766 {
767 	struct net_buf *buf;
768 	struct bt_hci_cp_disconnect *disconn;
769 
770 	buf = bt_hci_cmd_create(BT_HCI_OP_DISCONNECT, sizeof(*disconn));
771 	if (!buf) {
772 		return -ENOBUFS;
773 	}
774 
775 	disconn = net_buf_add(buf, sizeof(*disconn));
776 	disconn->handle = sys_cpu_to_le16(handle);
777 	disconn->reason = reason;
778 
779 	return bt_hci_cmd_send_sync(BT_HCI_OP_DISCONNECT, buf, NULL);
780 }
781 
782 static uint16_t disconnected_handles[CONFIG_BT_MAX_CONN];
disconnected_handles_reset(void)783 static void disconnected_handles_reset(void)
784 {
785 	(void)memset(disconnected_handles, 0, sizeof(disconnected_handles));
786 }
787 
conn_handle_disconnected(uint16_t handle)788 static void conn_handle_disconnected(uint16_t handle)
789 {
790 	for (int i = 0; i < ARRAY_SIZE(disconnected_handles); i++) {
791 		if (!disconnected_handles[i]) {
792 			/* Use invalid connection handle bits so that connection
793 			 * handle 0 can be used as a valid non-zero handle.
794 			 */
795 			disconnected_handles[i] = ~BT_ACL_HANDLE_MASK | handle;
796 		}
797 	}
798 }
799 
conn_handle_is_disconnected(uint16_t handle)800 static bool conn_handle_is_disconnected(uint16_t handle)
801 {
802 	handle |= ~BT_ACL_HANDLE_MASK;
803 
804 	for (int i = 0; i < ARRAY_SIZE(disconnected_handles); i++) {
805 		if (disconnected_handles[i] == handle) {
806 			disconnected_handles[i] = 0;
807 			return true;
808 		}
809 	}
810 
811 	return false;
812 }
813 
hci_disconn_complete_prio(struct net_buf * buf)814 static void hci_disconn_complete_prio(struct net_buf *buf)
815 {
816 	struct bt_hci_evt_disconn_complete *evt = (void *)buf->data;
817 	uint16_t handle = sys_le16_to_cpu(evt->handle);
818 	struct bt_conn *conn;
819 
820 	LOG_DBG("status 0x%02x handle %u reason 0x%02x", evt->status, handle, evt->reason);
821 
822 	if (evt->status) {
823 		return;
824 	}
825 
826 	conn = bt_conn_lookup_handle(handle);
827 	if (!conn) {
828 		/* Priority disconnect complete event received before normal
829 		 * connection complete event.
830 		 */
831 		conn_handle_disconnected(handle);
832 		return;
833 	}
834 
835 	bt_conn_set_state(conn, BT_CONN_DISCONNECT_COMPLETE);
836 	bt_conn_unref(conn);
837 }
838 
hci_disconn_complete(struct net_buf * buf)839 static void hci_disconn_complete(struct net_buf *buf)
840 {
841 	struct bt_hci_evt_disconn_complete *evt = (void *)buf->data;
842 	uint16_t handle = sys_le16_to_cpu(evt->handle);
843 	struct bt_conn *conn;
844 
845 	LOG_DBG("status 0x%02x handle %u reason 0x%02x", evt->status, handle, evt->reason);
846 
847 	if (evt->status) {
848 		return;
849 	}
850 
851 	conn = bt_conn_lookup_handle(handle);
852 	if (!conn) {
853 		LOG_ERR("Unable to look up conn with handle %u", handle);
854 		return;
855 	}
856 
857 	conn->err = evt->reason;
858 
859 	bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
860 
861 	if (conn->type != BT_CONN_TYPE_LE) {
862 #if defined(CONFIG_BT_BREDR)
863 		if (conn->type == BT_CONN_TYPE_SCO) {
864 			bt_sco_cleanup(conn);
865 			return;
866 		}
867 		/*
868 		 * If only for one connection session bond was set, clear keys
869 		 * database row for this connection.
870 		 */
871 		if (conn->type == BT_CONN_TYPE_BR &&
872 		    atomic_test_and_clear_bit(conn->flags, BT_CONN_BR_NOBOND)) {
873 			bt_keys_link_key_clear(conn->br.link_key);
874 		}
875 #endif
876 		bt_conn_unref(conn);
877 		return;
878 	}
879 
880 #if defined(CONFIG_BT_CENTRAL) && !defined(CONFIG_BT_FILTER_ACCEPT_LIST)
881 	if (atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT)) {
882 		bt_conn_set_state(conn, BT_CONN_CONNECTING_SCAN);
883 		bt_le_scan_update(false);
884 	}
885 #endif /* defined(CONFIG_BT_CENTRAL) && !defined(CONFIG_BT_FILTER_ACCEPT_LIST) */
886 
887 	bt_conn_unref(conn);
888 }
889 
hci_le_read_remote_features(struct bt_conn * conn)890 static int hci_le_read_remote_features(struct bt_conn *conn)
891 {
892 	struct bt_hci_cp_le_read_remote_features *cp;
893 	struct net_buf *buf;
894 
895 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_READ_REMOTE_FEATURES,
896 				sizeof(*cp));
897 	if (!buf) {
898 		return -ENOBUFS;
899 	}
900 
901 	cp = net_buf_add(buf, sizeof(*cp));
902 	cp->handle = sys_cpu_to_le16(conn->handle);
903 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_REMOTE_FEATURES, buf, NULL);
904 }
905 
hci_read_remote_version(struct bt_conn * conn)906 static int hci_read_remote_version(struct bt_conn *conn)
907 {
908 	struct bt_hci_cp_read_remote_version_info *cp;
909 	struct net_buf *buf;
910 
911 	if (conn->state != BT_CONN_CONNECTED) {
912 		return -ENOTCONN;
913 	}
914 
915 	/* Remote version cannot change. */
916 	if (atomic_test_bit(conn->flags, BT_CONN_AUTO_VERSION_INFO)) {
917 		return 0;
918 	}
919 
920 	buf = bt_hci_cmd_create(BT_HCI_OP_READ_REMOTE_VERSION_INFO,
921 				sizeof(*cp));
922 	if (!buf) {
923 		return -ENOBUFS;
924 	}
925 
926 	cp = net_buf_add(buf, sizeof(*cp));
927 	cp->handle = sys_cpu_to_le16(conn->handle);
928 
929 	return bt_hci_cmd_send_sync(BT_HCI_OP_READ_REMOTE_VERSION_INFO, buf,
930 				    NULL);
931 }
932 
933 /* LE Data Length Change Event is optional so this function just ignore
934  * error and stack will continue to use default values.
935  */
bt_le_set_data_len(struct bt_conn * conn,uint16_t tx_octets,uint16_t tx_time)936 int bt_le_set_data_len(struct bt_conn *conn, uint16_t tx_octets, uint16_t tx_time)
937 {
938 	struct bt_hci_cp_le_set_data_len *cp;
939 	struct net_buf *buf;
940 
941 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_DATA_LEN, sizeof(*cp));
942 	if (!buf) {
943 		return -ENOBUFS;
944 	}
945 
946 	cp = net_buf_add(buf, sizeof(*cp));
947 	cp->handle = sys_cpu_to_le16(conn->handle);
948 	cp->tx_octets = sys_cpu_to_le16(tx_octets);
949 	cp->tx_time = sys_cpu_to_le16(tx_time);
950 
951 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_DATA_LEN, buf, NULL);
952 }
953 
954 #if defined(CONFIG_BT_USER_PHY_UPDATE)
hci_le_read_phy(struct bt_conn * conn)955 static int hci_le_read_phy(struct bt_conn *conn)
956 {
957 	struct bt_hci_cp_le_read_phy *cp;
958 	struct bt_hci_rp_le_read_phy *rp;
959 	struct net_buf *buf, *rsp;
960 	int err;
961 
962 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_READ_PHY, sizeof(*cp));
963 	if (!buf) {
964 		return -ENOBUFS;
965 	}
966 
967 	cp = net_buf_add(buf, sizeof(*cp));
968 	cp->handle = sys_cpu_to_le16(conn->handle);
969 
970 	err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_PHY, buf, &rsp);
971 	if (err) {
972 		return err;
973 	}
974 
975 	rp = (void *)rsp->data;
976 	conn->le.phy.tx_phy = bt_get_phy(rp->tx_phy);
977 	conn->le.phy.rx_phy = bt_get_phy(rp->rx_phy);
978 	net_buf_unref(rsp);
979 
980 	return 0;
981 }
982 #endif /* defined(CONFIG_BT_USER_PHY_UPDATE) */
983 
bt_le_set_phy(struct bt_conn * conn,uint8_t all_phys,uint8_t pref_tx_phy,uint8_t pref_rx_phy,uint8_t phy_opts)984 int bt_le_set_phy(struct bt_conn *conn, uint8_t all_phys,
985 		  uint8_t pref_tx_phy, uint8_t pref_rx_phy, uint8_t phy_opts)
986 {
987 	struct bt_hci_cp_le_set_phy *cp;
988 	struct net_buf *buf;
989 
990 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_PHY, sizeof(*cp));
991 	if (!buf) {
992 		return -ENOBUFS;
993 	}
994 
995 	cp = net_buf_add(buf, sizeof(*cp));
996 	cp->handle = sys_cpu_to_le16(conn->handle);
997 	cp->all_phys = all_phys;
998 	cp->tx_phys = pref_tx_phy;
999 	cp->rx_phys = pref_rx_phy;
1000 	cp->phy_opts = phy_opts;
1001 
1002 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_PHY, buf, NULL);
1003 }
1004 
find_pending_connect(uint8_t role,bt_addr_le_t * peer_addr)1005 static struct bt_conn *find_pending_connect(uint8_t role, bt_addr_le_t *peer_addr)
1006 {
1007 	struct bt_conn *conn;
1008 
1009 	/*
1010 	 * Make lookup to check if there's a connection object in
1011 	 * CONNECT or CONNECT_AUTO state associated with passed peer LE address.
1012 	 */
1013 	if (IS_ENABLED(CONFIG_BT_CENTRAL) && role == BT_HCI_ROLE_CENTRAL) {
1014 		conn = bt_conn_lookup_state_le(BT_ID_DEFAULT, peer_addr,
1015 					       BT_CONN_CONNECTING);
1016 		if (IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST) && !conn) {
1017 			conn = bt_conn_lookup_state_le(BT_ID_DEFAULT,
1018 						       BT_ADDR_LE_NONE,
1019 						       BT_CONN_CONNECTING_AUTO);
1020 		}
1021 
1022 		return conn;
1023 	}
1024 
1025 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && role == BT_HCI_ROLE_PERIPHERAL) {
1026 		conn = bt_conn_lookup_state_le(bt_dev.adv_conn_id, peer_addr,
1027 					       BT_CONN_CONNECTING_DIR_ADV);
1028 		if (!conn) {
1029 			conn = bt_conn_lookup_state_le(bt_dev.adv_conn_id,
1030 						       BT_ADDR_LE_NONE,
1031 						       BT_CONN_CONNECTING_ADV);
1032 		}
1033 
1034 		return conn;
1035 	}
1036 
1037 	return NULL;
1038 }
1039 
1040 /* We don't want the application to get a PHY update callback upon connection
1041  * establishment on 2M PHY. Therefore we must prevent issuing LE Set PHY
1042  * in this scenario.
1043  */
skip_auto_phy_update_on_conn_establishment(struct bt_conn * conn)1044 static bool skip_auto_phy_update_on_conn_establishment(struct bt_conn *conn)
1045 {
1046 #if defined(CONFIG_BT_USER_PHY_UPDATE)
1047 	if (IS_ENABLED(CONFIG_BT_AUTO_PHY_UPDATE) &&
1048 	    IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1049 	    BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
1050 		if (conn->le.phy.tx_phy == BT_HCI_LE_PHY_2M &&
1051 		    conn->le.phy.rx_phy == BT_HCI_LE_PHY_2M) {
1052 			return true;
1053 		}
1054 	}
1055 #else
1056 	ARG_UNUSED(conn);
1057 #endif /* defined(CONFIG_BT_USER_PHY_UPDATE) */
1058 
1059 	return false;
1060 }
1061 
conn_auto_initiate(struct bt_conn * conn)1062 static void conn_auto_initiate(struct bt_conn *conn)
1063 {
1064 	int err;
1065 
1066 	if (conn->state != BT_CONN_CONNECTED) {
1067 		/* It is possible that connection was disconnected directly from
1068 		 * connected callback so we must check state before doing
1069 		 * connection parameters update.
1070 		 */
1071 		return;
1072 	}
1073 
1074 	if (!atomic_test_bit(conn->flags, BT_CONN_AUTO_FEATURE_EXCH) &&
1075 	    ((conn->role == BT_HCI_ROLE_CENTRAL) ||
1076 	     BT_FEAT_LE_PER_INIT_FEAT_XCHG(bt_dev.le.features))) {
1077 		err = hci_le_read_remote_features(conn);
1078 		if (err) {
1079 			LOG_ERR("Failed read remote features (%d)", err);
1080 		}
1081 	}
1082 
1083 	if (IS_ENABLED(CONFIG_BT_REMOTE_VERSION) &&
1084 	    !atomic_test_bit(conn->flags, BT_CONN_AUTO_VERSION_INFO)) {
1085 		err = hci_read_remote_version(conn);
1086 		if (err) {
1087 			LOG_ERR("Failed read remote version (%d)", err);
1088 		}
1089 	}
1090 
1091 	if (IS_ENABLED(CONFIG_BT_AUTO_PHY_UPDATE) &&
1092 	    BT_FEAT_LE_PHY_2M(bt_dev.le.features) &&
1093 	    !skip_auto_phy_update_on_conn_establishment(conn)) {
1094 		err = bt_le_set_phy(conn, 0U, BT_HCI_LE_PHY_PREFER_2M,
1095 				    BT_HCI_LE_PHY_PREFER_2M,
1096 				    BT_HCI_LE_PHY_CODED_ANY);
1097 		if (err) {
1098 			LOG_ERR("Failed LE Set PHY (%d)", err);
1099 		}
1100 	}
1101 
1102 	if (IS_ENABLED(CONFIG_BT_AUTO_DATA_LEN_UPDATE) &&
1103 	    BT_FEAT_LE_DLE(bt_dev.le.features)) {
1104 		if (IS_BT_QUIRK_NO_AUTO_DLE(&bt_dev)) {
1105 			uint16_t tx_octets, tx_time;
1106 
1107 			err = hci_le_read_max_data_len(&tx_octets, &tx_time);
1108 			if (!err) {
1109 				err = bt_le_set_data_len(conn,
1110 						tx_octets, tx_time);
1111 				if (err) {
1112 					LOG_ERR("Failed to set data len (%d)", err);
1113 				}
1114 			}
1115 		} else {
1116 			/* No need to auto-initiate DLE procedure.
1117 			 * It is done by the controller.
1118 			 */
1119 		}
1120 	}
1121 }
1122 
le_conn_complete_cancel(uint8_t err)1123 static void le_conn_complete_cancel(uint8_t err)
1124 {
1125 	struct bt_conn *conn;
1126 
1127 	/* Handle create connection cancel.
1128 	 *
1129 	 * There is no need to check ID address as only one
1130 	 * connection in central role can be in pending state.
1131 	 */
1132 	conn = find_pending_connect(BT_HCI_ROLE_CENTRAL, NULL);
1133 	if (!conn) {
1134 		LOG_ERR("No pending central connection");
1135 		return;
1136 	}
1137 
1138 	conn->err = err;
1139 
1140 	/* Handle cancellation of outgoing connection attempt. */
1141 	if (!IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST)) {
1142 		/* We notify before checking autoconnect flag
1143 		 * as application may choose to change it from
1144 		 * callback.
1145 		 */
1146 		bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
1147 		/* Check if device is marked for autoconnect. */
1148 		if (atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT)) {
1149 			/* Restart passive scanner for device */
1150 			bt_conn_set_state(conn, BT_CONN_CONNECTING_SCAN);
1151 		}
1152 	} else {
1153 		if (atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT)) {
1154 			/* Restart FAL initiator after RPA timeout. */
1155 			bt_le_create_conn(conn);
1156 		} else {
1157 			/* Create connection canceled by timeout */
1158 			bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
1159 		}
1160 	}
1161 
1162 	bt_conn_unref(conn);
1163 }
1164 
le_conn_complete_adv_timeout(void)1165 static void le_conn_complete_adv_timeout(void)
1166 {
1167 	if (!(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1168 	      BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
1169 		struct bt_le_ext_adv *adv = bt_le_adv_lookup_legacy();
1170 		struct bt_conn *conn;
1171 
1172 		/* Handle advertising timeout after high duty cycle directed
1173 		 * advertising.
1174 		 */
1175 
1176 		atomic_clear_bit(adv->flags, BT_ADV_ENABLED);
1177 
1178 		if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1179 		    !BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
1180 			/* No advertising set terminated event, must be a
1181 			 * legacy advertiser set.
1182 			 */
1183 			bt_le_adv_delete_legacy();
1184 		}
1185 
1186 		/* There is no need to check ID address as only one
1187 		 * connection in peripheral role can be in pending state.
1188 		 */
1189 		conn = find_pending_connect(BT_HCI_ROLE_PERIPHERAL, NULL);
1190 		if (!conn) {
1191 			LOG_ERR("No pending peripheral connection");
1192 			return;
1193 		}
1194 
1195 		conn->err = BT_HCI_ERR_ADV_TIMEOUT;
1196 		bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
1197 
1198 		bt_conn_unref(conn);
1199 	}
1200 }
1201 
enh_conn_complete(struct bt_hci_evt_le_enh_conn_complete * evt)1202 static void enh_conn_complete(struct bt_hci_evt_le_enh_conn_complete *evt)
1203 {
1204 #if defined(CONFIG_BT_CONN) && (CONFIG_BT_EXT_ADV_MAX_ADV_SET > 1)
1205 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
1206 		evt->role == BT_HCI_ROLE_PERIPHERAL &&
1207 		evt->status == BT_HCI_ERR_SUCCESS &&
1208 		(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1209 				BT_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
1210 
1211 		/* Cache the connection complete event. Process it later.
1212 		 * See bt_dev.cached_conn_complete.
1213 		 */
1214 		for (int i = 0; i < ARRAY_SIZE(bt_dev.cached_conn_complete); i++) {
1215 			if (!bt_dev.cached_conn_complete[i].valid) {
1216 				(void)memcpy(&bt_dev.cached_conn_complete[i].evt,
1217 					evt,
1218 					sizeof(struct bt_hci_evt_le_enh_conn_complete));
1219 				bt_dev.cached_conn_complete[i].valid = true;
1220 				return;
1221 			}
1222 		}
1223 
1224 		__ASSERT(false, "No more cache entries available."
1225 				"This should not happen by design");
1226 
1227 		return;
1228 	}
1229 #endif
1230 	bt_hci_le_enh_conn_complete(evt);
1231 }
1232 
translate_addrs(bt_addr_le_t * peer_addr,bt_addr_le_t * id_addr,const struct bt_hci_evt_le_enh_conn_complete * evt,uint8_t id)1233 static void translate_addrs(bt_addr_le_t *peer_addr, bt_addr_le_t *id_addr,
1234 			    const struct bt_hci_evt_le_enh_conn_complete *evt, uint8_t id)
1235 {
1236 	if (bt_addr_le_is_resolved(&evt->peer_addr)) {
1237 		bt_addr_le_copy_resolved(id_addr, &evt->peer_addr);
1238 
1239 		bt_addr_copy(&peer_addr->a, &evt->peer_rpa);
1240 		peer_addr->type = BT_ADDR_LE_RANDOM;
1241 	} else {
1242 		bt_addr_le_copy(id_addr, bt_lookup_id_addr(id, &evt->peer_addr));
1243 		bt_addr_le_copy(peer_addr, &evt->peer_addr);
1244 	}
1245 }
1246 
update_conn(struct bt_conn * conn,const bt_addr_le_t * id_addr,const struct bt_hci_evt_le_enh_conn_complete * evt)1247 static void update_conn(struct bt_conn *conn, const bt_addr_le_t *id_addr,
1248 			const struct bt_hci_evt_le_enh_conn_complete *evt)
1249 {
1250 	conn->handle = sys_le16_to_cpu(evt->handle);
1251 	bt_addr_le_copy(&conn->le.dst, id_addr);
1252 	conn->le.interval = sys_le16_to_cpu(evt->interval);
1253 	conn->le.latency = sys_le16_to_cpu(evt->latency);
1254 	conn->le.timeout = sys_le16_to_cpu(evt->supv_timeout);
1255 	conn->role = evt->role;
1256 	conn->err = 0U;
1257 
1258 #if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
1259 	conn->le.data_len.tx_max_len = BT_GAP_DATA_LEN_DEFAULT;
1260 	conn->le.data_len.tx_max_time = BT_GAP_DATA_TIME_DEFAULT;
1261 	conn->le.data_len.rx_max_len = BT_GAP_DATA_LEN_DEFAULT;
1262 	conn->le.data_len.rx_max_time = BT_GAP_DATA_TIME_DEFAULT;
1263 #endif
1264 }
1265 
bt_hci_le_enh_conn_complete(struct bt_hci_evt_le_enh_conn_complete * evt)1266 void bt_hci_le_enh_conn_complete(struct bt_hci_evt_le_enh_conn_complete *evt)
1267 {
1268 	uint16_t handle = sys_le16_to_cpu(evt->handle);
1269 	bool is_disconnected = conn_handle_is_disconnected(handle);
1270 	bt_addr_le_t peer_addr, id_addr;
1271 	struct bt_conn *conn;
1272 	uint8_t id;
1273 
1274 	LOG_DBG("status 0x%02x handle %u role %u peer %s peer RPA %s", evt->status, handle,
1275 		evt->role, bt_addr_le_str(&evt->peer_addr), bt_addr_str(&evt->peer_rpa));
1276 	LOG_DBG("local RPA %s", bt_addr_str(&evt->local_rpa));
1277 
1278 #if defined(CONFIG_BT_SMP)
1279 	bt_id_pending_keys_update();
1280 #endif
1281 
1282 	if (evt->status) {
1283 		if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
1284 		    evt->status == BT_HCI_ERR_ADV_TIMEOUT) {
1285 			le_conn_complete_adv_timeout();
1286 			return;
1287 		}
1288 
1289 		if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1290 		    evt->status == BT_HCI_ERR_UNKNOWN_CONN_ID) {
1291 			le_conn_complete_cancel(evt->status);
1292 			bt_le_scan_update(false);
1293 			return;
1294 		}
1295 
1296 		if (IS_ENABLED(CONFIG_BT_CENTRAL) && IS_ENABLED(CONFIG_BT_PER_ADV_RSP) &&
1297 		    evt->status == BT_HCI_ERR_CONN_FAIL_TO_ESTAB) {
1298 			le_conn_complete_cancel(evt->status);
1299 
1300 			atomic_clear_bit(bt_dev.flags, BT_DEV_INITIATING);
1301 
1302 			return;
1303 		}
1304 
1305 		LOG_WRN("Unexpected status 0x%02x", evt->status);
1306 
1307 		return;
1308 	}
1309 
1310 	id = evt->role == BT_HCI_ROLE_PERIPHERAL ? bt_dev.adv_conn_id : BT_ID_DEFAULT;
1311 	translate_addrs(&peer_addr, &id_addr, evt, id);
1312 
1313 	conn = find_pending_connect(evt->role, &id_addr);
1314 
1315 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
1316 	    evt->role == BT_HCI_ROLE_PERIPHERAL &&
1317 	    !(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1318 	      BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
1319 		struct bt_le_ext_adv *adv = bt_le_adv_lookup_legacy();
1320 		/* Clear advertising even if we are not able to add connection
1321 		 * object to keep host in sync with controller state.
1322 		 */
1323 		atomic_clear_bit(adv->flags, BT_ADV_ENABLED);
1324 		(void)bt_le_lim_adv_cancel_timeout(adv);
1325 	}
1326 
1327 	if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1328 	    evt->role == BT_HCI_ROLE_CENTRAL) {
1329 		/* Clear initiating even if we are not able to add connection
1330 		 * object to keep the host in sync with controller state.
1331 		 */
1332 		atomic_clear_bit(bt_dev.flags, BT_DEV_INITIATING);
1333 	}
1334 
1335 	if (!conn) {
1336 		LOG_ERR("No pending conn for peer %s", bt_addr_le_str(&evt->peer_addr));
1337 		bt_hci_disconnect(handle, BT_HCI_ERR_REMOTE_USER_TERM_CONN);
1338 		return;
1339 	}
1340 
1341 	update_conn(conn, &id_addr, evt);
1342 
1343 #if defined(CONFIG_BT_USER_PHY_UPDATE)
1344 	conn->le.phy.tx_phy = BT_GAP_LE_PHY_1M;
1345 	conn->le.phy.rx_phy = BT_GAP_LE_PHY_1M;
1346 #endif
1347 	/*
1348 	 * Use connection address (instead of identity address) as initiator
1349 	 * or responder address. Only peripheral needs to be updated. For central all
1350 	 * was set during outgoing connection creation.
1351 	 */
1352 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
1353 	    conn->role == BT_HCI_ROLE_PERIPHERAL) {
1354 		bt_addr_le_copy(&conn->le.init_addr, &peer_addr);
1355 
1356 		if (!(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1357 		      BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
1358 			struct bt_le_ext_adv *adv = bt_le_adv_lookup_legacy();
1359 
1360 			if (IS_ENABLED(CONFIG_BT_PRIVACY) &&
1361 			    !atomic_test_bit(adv->flags, BT_ADV_USE_IDENTITY)) {
1362 				conn->le.resp_addr.type = BT_ADDR_LE_RANDOM;
1363 				if (!bt_addr_eq(&evt->local_rpa, BT_ADDR_ANY)) {
1364 					bt_addr_copy(&conn->le.resp_addr.a,
1365 						     &evt->local_rpa);
1366 				} else {
1367 					bt_addr_copy(&conn->le.resp_addr.a,
1368 						     &bt_dev.random_addr.a);
1369 				}
1370 			} else {
1371 				bt_addr_le_copy(&conn->le.resp_addr,
1372 						&bt_dev.id_addr[conn->id]);
1373 			}
1374 		} else {
1375 			/* Copy the local RPA and handle this in advertising set
1376 			 * terminated event.
1377 			 */
1378 			bt_addr_copy(&conn->le.resp_addr.a, &evt->local_rpa);
1379 		}
1380 
1381 		/* if the controller supports, lets advertise for another
1382 		 * peripheral connection.
1383 		 * check for connectable advertising state is sufficient as
1384 		 * this is how this le connection complete for peripheral occurred.
1385 		 */
1386 		if (BT_LE_STATES_PER_CONN_ADV(bt_dev.le.states)) {
1387 			bt_le_adv_resume();
1388 		}
1389 
1390 		if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1391 		    !BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
1392 			struct bt_le_ext_adv *adv = bt_le_adv_lookup_legacy();
1393 			/* No advertising set terminated event, must be a
1394 			 * legacy advertiser set.
1395 			 */
1396 			if (!atomic_test_bit(adv->flags, BT_ADV_PERSIST)) {
1397 				bt_le_adv_delete_legacy();
1398 			}
1399 		}
1400 	}
1401 
1402 	if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1403 	    conn->role == BT_HCI_ROLE_CENTRAL) {
1404 		bt_addr_le_copy(&conn->le.resp_addr, &peer_addr);
1405 
1406 		if (IS_ENABLED(CONFIG_BT_PRIVACY)) {
1407 			conn->le.init_addr.type = BT_ADDR_LE_RANDOM;
1408 			if (!bt_addr_eq(&evt->local_rpa, BT_ADDR_ANY)) {
1409 				bt_addr_copy(&conn->le.init_addr.a,
1410 					     &evt->local_rpa);
1411 			} else {
1412 				bt_addr_copy(&conn->le.init_addr.a,
1413 					     &bt_dev.random_addr.a);
1414 			}
1415 		} else {
1416 			bt_addr_le_copy(&conn->le.init_addr,
1417 					&bt_dev.id_addr[conn->id]);
1418 		}
1419 	}
1420 
1421 #if defined(CONFIG_BT_USER_PHY_UPDATE)
1422 	if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1423 	    BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
1424 		int err;
1425 
1426 		err = hci_le_read_phy(conn);
1427 		if (err) {
1428 			LOG_WRN("Failed to read PHY (%d)", err);
1429 		}
1430 	}
1431 #endif /* defined(CONFIG_BT_USER_PHY_UPDATE) */
1432 
1433 	bt_conn_set_state(conn, BT_CONN_CONNECTED);
1434 
1435 	if (is_disconnected) {
1436 		/* Mark the connection as already disconnected before calling
1437 		 * the connected callback, so that the application cannot
1438 		 * start sending packets
1439 		 */
1440 		bt_conn_set_state(conn, BT_CONN_DISCONNECT_COMPLETE);
1441 	}
1442 
1443 	bt_conn_connected(conn);
1444 
1445 	/* Start auto-initiated procedures */
1446 	conn_auto_initiate(conn);
1447 
1448 	bt_conn_unref(conn);
1449 
1450 	if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1451 	    conn->role == BT_HCI_ROLE_CENTRAL) {
1452 		bt_le_scan_update(false);
1453 	}
1454 }
1455 
1456 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
bt_hci_le_enh_conn_complete_sync(struct bt_hci_evt_le_enh_conn_complete_v2 * evt,struct bt_le_per_adv_sync * sync)1457 void bt_hci_le_enh_conn_complete_sync(struct bt_hci_evt_le_enh_conn_complete_v2 *evt,
1458 				      struct bt_le_per_adv_sync *sync)
1459 {
1460 	uint16_t handle = sys_le16_to_cpu(evt->handle);
1461 	bool is_disconnected = conn_handle_is_disconnected(handle);
1462 	bt_addr_le_t peer_addr, id_addr;
1463 	struct bt_conn *conn;
1464 
1465 	if (!sync->num_subevents) {
1466 		LOG_ERR("Unexpected connection complete event");
1467 
1468 		return;
1469 	}
1470 
1471 	conn = bt_conn_add_le(BT_ID_DEFAULT, BT_ADDR_LE_ANY);
1472 	if (!conn) {
1473 		LOG_ERR("Unable to allocate connection");
1474 		/* Tell the controller to disconnect to keep it in sync with
1475 		 * the host state and avoid a "rogue" connection.
1476 		 */
1477 		bt_hci_disconnect(handle, BT_HCI_ERR_REMOTE_USER_TERM_CONN);
1478 
1479 		return;
1480 	}
1481 
1482 	LOG_DBG("status 0x%02x handle %u role %u peer %s peer RPA %s", evt->status, handle,
1483 		evt->role, bt_addr_le_str(&evt->peer_addr), bt_addr_str(&evt->peer_rpa));
1484 	LOG_DBG("local RPA %s", bt_addr_str(&evt->local_rpa));
1485 
1486 	if (evt->role != BT_HCI_ROLE_PERIPHERAL) {
1487 		LOG_ERR("PAwR sync always becomes peripheral");
1488 
1489 		return;
1490 	}
1491 
1492 #if defined(CONFIG_BT_SMP)
1493 	bt_id_pending_keys_update();
1494 #endif
1495 
1496 	if (evt->status) {
1497 		LOG_ERR("Unexpected status 0x%02x", evt->status);
1498 
1499 		return;
1500 	}
1501 
1502 	translate_addrs(&peer_addr, &id_addr, (const struct bt_hci_evt_le_enh_conn_complete *)evt,
1503 			BT_ID_DEFAULT);
1504 	update_conn(conn, &id_addr, (const struct bt_hci_evt_le_enh_conn_complete *)evt);
1505 
1506 #if defined(CONFIG_BT_USER_PHY_UPDATE)
1507 	/* The connection is always initated on the same phy as the PAwR advertiser */
1508 	conn->le.phy.tx_phy = sync->phy;
1509 	conn->le.phy.rx_phy = sync->phy;
1510 #endif
1511 
1512 	bt_addr_le_copy(&conn->le.init_addr, &peer_addr);
1513 
1514 	/* There is no random addr to get, set responder addr to local identity addr. */
1515 	bt_addr_le_copy(&conn->le.resp_addr, &bt_dev.id_addr[conn->id]);
1516 
1517 	bt_conn_set_state(conn, BT_CONN_CONNECTED);
1518 
1519 	if (is_disconnected) {
1520 		/* Mark the connection as already disconnected before calling
1521 		 * the connected callback, so that the application cannot
1522 		 * start sending packets
1523 		 */
1524 		bt_conn_set_state(conn, BT_CONN_DISCONNECT_COMPLETE);
1525 	}
1526 
1527 	bt_conn_connected(conn);
1528 
1529 	/* Since we don't give the application a reference to manage
1530 	 * for peripheral connections, we need to release this reference here.
1531 	 */
1532 	bt_conn_unref(conn);
1533 
1534 	/* Start auto-initiated procedures */
1535 	conn_auto_initiate(conn);
1536 }
1537 #endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
1538 
le_enh_conn_complete(struct net_buf * buf)1539 static void le_enh_conn_complete(struct net_buf *buf)
1540 {
1541 	enh_conn_complete((void *)buf->data);
1542 }
1543 
1544 #if defined(CONFIG_BT_PER_ADV_RSP) || defined(CONFIG_BT_PER_ADV_SYNC_RSP)
le_enh_conn_complete_v2(struct net_buf * buf)1545 static void le_enh_conn_complete_v2(struct net_buf *buf)
1546 {
1547 	struct bt_hci_evt_le_enh_conn_complete_v2 *evt =
1548 		(struct bt_hci_evt_le_enh_conn_complete_v2 *)buf->data;
1549 
1550 	if (evt->adv_handle == BT_HCI_ADV_HANDLE_INVALID &&
1551 	    evt->sync_handle == BT_HCI_SYNC_HANDLE_INVALID) {
1552 		/* The connection was not created via PAwR, handle the event like v1 */
1553 		enh_conn_complete((struct bt_hci_evt_le_enh_conn_complete *)evt);
1554 	}
1555 #if defined(CONFIG_BT_PER_ADV_RSP)
1556 	else if (evt->adv_handle != BT_HCI_ADV_HANDLE_INVALID &&
1557 		 evt->sync_handle == BT_HCI_SYNC_HANDLE_INVALID) {
1558 		/* The connection was created via PAwR advertiser, it can be handled like v1 */
1559 		enh_conn_complete((struct bt_hci_evt_le_enh_conn_complete *)evt);
1560 	}
1561 #endif /* CONFIG_BT_PER_ADV_RSP */
1562 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
1563 	else if (evt->adv_handle == BT_HCI_ADV_HANDLE_INVALID &&
1564 		 evt->sync_handle != BT_HCI_SYNC_HANDLE_INVALID) {
1565 		/* Created via PAwR sync, no adv set terminated event, needs separate handling */
1566 		struct bt_le_per_adv_sync *sync;
1567 
1568 		sync = bt_hci_get_per_adv_sync(evt->sync_handle);
1569 		if (!sync) {
1570 			LOG_ERR("Unknown sync handle %d", evt->sync_handle);
1571 
1572 			return;
1573 		}
1574 
1575 		bt_hci_le_enh_conn_complete_sync(evt, sync);
1576 	}
1577 #endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
1578 	else {
1579 		LOG_ERR("Invalid connection complete event");
1580 	}
1581 }
1582 #endif /* CONFIG_BT_PER_ADV_RSP || CONFIG_BT_PER_ADV_SYNC_RSP */
1583 
le_legacy_conn_complete(struct net_buf * buf)1584 static void le_legacy_conn_complete(struct net_buf *buf)
1585 {
1586 	struct bt_hci_evt_le_conn_complete *evt = (void *)buf->data;
1587 	struct bt_hci_evt_le_enh_conn_complete enh;
1588 
1589 	LOG_DBG("status 0x%02x role %u %s", evt->status, evt->role,
1590 		bt_addr_le_str(&evt->peer_addr));
1591 
1592 	enh.status         = evt->status;
1593 	enh.handle         = evt->handle;
1594 	enh.role           = evt->role;
1595 	enh.interval       = evt->interval;
1596 	enh.latency        = evt->latency;
1597 	enh.supv_timeout   = evt->supv_timeout;
1598 	enh.clock_accuracy = evt->clock_accuracy;
1599 
1600 	bt_addr_le_copy(&enh.peer_addr, &evt->peer_addr);
1601 
1602 	if (IS_ENABLED(CONFIG_BT_PRIVACY)) {
1603 		bt_addr_copy(&enh.local_rpa, &bt_dev.random_addr.a);
1604 	} else {
1605 		bt_addr_copy(&enh.local_rpa, BT_ADDR_ANY);
1606 	}
1607 
1608 	bt_addr_copy(&enh.peer_rpa, BT_ADDR_ANY);
1609 
1610 	enh_conn_complete(&enh);
1611 }
1612 
le_remote_feat_complete(struct net_buf * buf)1613 static void le_remote_feat_complete(struct net_buf *buf)
1614 {
1615 	struct bt_hci_evt_le_remote_feat_complete *evt = (void *)buf->data;
1616 	uint16_t handle = sys_le16_to_cpu(evt->handle);
1617 	struct bt_conn *conn;
1618 
1619 	conn = bt_conn_lookup_handle(handle);
1620 	if (!conn) {
1621 		LOG_ERR("Unable to lookup conn for handle %u", handle);
1622 		return;
1623 	}
1624 
1625 	if (!evt->status) {
1626 		memcpy(conn->le.features, evt->features,
1627 		       sizeof(conn->le.features));
1628 	}
1629 
1630 	atomic_set_bit(conn->flags, BT_CONN_AUTO_FEATURE_EXCH);
1631 
1632 	if (IS_ENABLED(CONFIG_BT_REMOTE_INFO) &&
1633 	    !IS_ENABLED(CONFIG_BT_REMOTE_VERSION)) {
1634 		notify_remote_info(conn);
1635 	}
1636 
1637 	bt_conn_unref(conn);
1638 }
1639 
1640 #if defined(CONFIG_BT_DATA_LEN_UPDATE)
le_data_len_change(struct net_buf * buf)1641 static void le_data_len_change(struct net_buf *buf)
1642 {
1643 	struct bt_hci_evt_le_data_len_change *evt = (void *)buf->data;
1644 	uint16_t handle = sys_le16_to_cpu(evt->handle);
1645 	struct bt_conn *conn;
1646 
1647 	conn = bt_conn_lookup_handle(handle);
1648 	if (!conn) {
1649 		LOG_ERR("Unable to lookup conn for handle %u", handle);
1650 		return;
1651 	}
1652 
1653 #if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
1654 	uint16_t max_tx_octets = sys_le16_to_cpu(evt->max_tx_octets);
1655 	uint16_t max_rx_octets = sys_le16_to_cpu(evt->max_rx_octets);
1656 	uint16_t max_tx_time = sys_le16_to_cpu(evt->max_tx_time);
1657 	uint16_t max_rx_time = sys_le16_to_cpu(evt->max_rx_time);
1658 
1659 	LOG_DBG("max. tx: %u (%uus), max. rx: %u (%uus)", max_tx_octets, max_tx_time, max_rx_octets,
1660 		max_rx_time);
1661 
1662 	conn->le.data_len.tx_max_len = max_tx_octets;
1663 	conn->le.data_len.tx_max_time = max_tx_time;
1664 	conn->le.data_len.rx_max_len = max_rx_octets;
1665 	conn->le.data_len.rx_max_time = max_rx_time;
1666 	notify_le_data_len_updated(conn);
1667 #endif
1668 
1669 	bt_conn_unref(conn);
1670 }
1671 #endif /* CONFIG_BT_DATA_LEN_UPDATE */
1672 
1673 #if defined(CONFIG_BT_PHY_UPDATE)
le_phy_update_complete(struct net_buf * buf)1674 static void le_phy_update_complete(struct net_buf *buf)
1675 {
1676 	struct bt_hci_evt_le_phy_update_complete *evt = (void *)buf->data;
1677 	uint16_t handle = sys_le16_to_cpu(evt->handle);
1678 	struct bt_conn *conn;
1679 
1680 	conn = bt_conn_lookup_handle(handle);
1681 	if (!conn) {
1682 		LOG_ERR("Unable to lookup conn for handle %u", handle);
1683 		return;
1684 	}
1685 
1686 	LOG_DBG("PHY updated: status: 0x%02x, tx: %u, rx: %u", evt->status, evt->tx_phy,
1687 		evt->rx_phy);
1688 
1689 #if defined(CONFIG_BT_USER_PHY_UPDATE)
1690 	conn->le.phy.tx_phy = bt_get_phy(evt->tx_phy);
1691 	conn->le.phy.rx_phy = bt_get_phy(evt->rx_phy);
1692 	notify_le_phy_updated(conn);
1693 #endif
1694 
1695 	bt_conn_unref(conn);
1696 }
1697 #endif /* CONFIG_BT_PHY_UPDATE */
1698 
bt_le_conn_params_valid(const struct bt_le_conn_param * param)1699 bool bt_le_conn_params_valid(const struct bt_le_conn_param *param)
1700 {
1701 	/* All limits according to BT Core spec 5.0 [Vol 2, Part E, 7.8.12] */
1702 
1703 	if (param->interval_min > param->interval_max ||
1704 	    param->interval_min < 6 || param->interval_max > 3200) {
1705 		return false;
1706 	}
1707 
1708 	if (param->latency > 499) {
1709 		return false;
1710 	}
1711 
1712 	if (param->timeout < 10 || param->timeout > 3200 ||
1713 	    ((param->timeout * 4U) <=
1714 	     ((1U + param->latency) * param->interval_max))) {
1715 		return false;
1716 	}
1717 
1718 	return true;
1719 }
1720 
le_conn_param_neg_reply(uint16_t handle,uint8_t reason)1721 static void le_conn_param_neg_reply(uint16_t handle, uint8_t reason)
1722 {
1723 	struct bt_hci_cp_le_conn_param_req_neg_reply *cp;
1724 	struct net_buf *buf;
1725 
1726 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY,
1727 				sizeof(*cp));
1728 	if (!buf) {
1729 		LOG_ERR("Unable to allocate buffer");
1730 		return;
1731 	}
1732 
1733 	cp = net_buf_add(buf, sizeof(*cp));
1734 	cp->handle = sys_cpu_to_le16(handle);
1735 	cp->reason = sys_cpu_to_le16(reason);
1736 
1737 	bt_hci_cmd_send(BT_HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, buf);
1738 }
1739 
le_conn_param_req_reply(uint16_t handle,const struct bt_le_conn_param * param)1740 static int le_conn_param_req_reply(uint16_t handle,
1741 				   const struct bt_le_conn_param *param)
1742 {
1743 	struct bt_hci_cp_le_conn_param_req_reply *cp;
1744 	struct net_buf *buf;
1745 
1746 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(*cp));
1747 	if (!buf) {
1748 		return -ENOBUFS;
1749 	}
1750 
1751 	cp = net_buf_add(buf, sizeof(*cp));
1752 	(void)memset(cp, 0, sizeof(*cp));
1753 
1754 	cp->handle = sys_cpu_to_le16(handle);
1755 	cp->interval_min = sys_cpu_to_le16(param->interval_min);
1756 	cp->interval_max = sys_cpu_to_le16(param->interval_max);
1757 	cp->latency = sys_cpu_to_le16(param->latency);
1758 	cp->timeout = sys_cpu_to_le16(param->timeout);
1759 
1760 	return bt_hci_cmd_send(BT_HCI_OP_LE_CONN_PARAM_REQ_REPLY, buf);
1761 }
1762 
le_conn_param_req(struct net_buf * buf)1763 static void le_conn_param_req(struct net_buf *buf)
1764 {
1765 	struct bt_hci_evt_le_conn_param_req *evt = (void *)buf->data;
1766 	struct bt_le_conn_param param;
1767 	struct bt_conn *conn;
1768 	uint16_t handle;
1769 
1770 	handle = sys_le16_to_cpu(evt->handle);
1771 	param.interval_min = sys_le16_to_cpu(evt->interval_min);
1772 	param.interval_max = sys_le16_to_cpu(evt->interval_max);
1773 	param.latency = sys_le16_to_cpu(evt->latency);
1774 	param.timeout = sys_le16_to_cpu(evt->timeout);
1775 
1776 	conn = bt_conn_lookup_handle(handle);
1777 	if (!conn) {
1778 		LOG_ERR("Unable to lookup conn for handle %u", handle);
1779 		le_conn_param_neg_reply(handle, BT_HCI_ERR_UNKNOWN_CONN_ID);
1780 		return;
1781 	}
1782 
1783 	if (!le_param_req(conn, &param)) {
1784 		le_conn_param_neg_reply(handle, BT_HCI_ERR_INVALID_LL_PARAM);
1785 	} else {
1786 		le_conn_param_req_reply(handle, &param);
1787 	}
1788 
1789 	bt_conn_unref(conn);
1790 }
1791 
le_conn_update_complete(struct net_buf * buf)1792 static void le_conn_update_complete(struct net_buf *buf)
1793 {
1794 	struct bt_hci_evt_le_conn_update_complete *evt = (void *)buf->data;
1795 	struct bt_conn *conn;
1796 	uint16_t handle;
1797 
1798 	handle = sys_le16_to_cpu(evt->handle);
1799 
1800 	LOG_DBG("status 0x%02x, handle %u", evt->status, handle);
1801 
1802 	conn = bt_conn_lookup_handle(handle);
1803 	if (!conn) {
1804 		LOG_ERR("Unable to lookup conn for handle %u", handle);
1805 		return;
1806 	}
1807 
1808 	if (evt->status == BT_HCI_ERR_UNSUPP_REMOTE_FEATURE &&
1809 	    conn->role == BT_HCI_ROLE_PERIPHERAL &&
1810 	    !atomic_test_and_set_bit(conn->flags,
1811 				     BT_CONN_PERIPHERAL_PARAM_L2CAP)) {
1812 		/* CPR not supported, let's try L2CAP CPUP instead */
1813 		struct bt_le_conn_param param;
1814 
1815 		param.interval_min = conn->le.interval_min;
1816 		param.interval_max = conn->le.interval_max;
1817 		param.latency = conn->le.pending_latency;
1818 		param.timeout = conn->le.pending_timeout;
1819 
1820 		bt_l2cap_update_conn_param(conn, &param);
1821 	} else {
1822 		if (!evt->status) {
1823 			conn->le.interval = sys_le16_to_cpu(evt->interval);
1824 			conn->le.latency = sys_le16_to_cpu(evt->latency);
1825 			conn->le.timeout = sys_le16_to_cpu(evt->supv_timeout);
1826 
1827 #if defined(CONFIG_BT_GAP_AUTO_UPDATE_CONN_PARAMS)
1828 			atomic_clear_bit(conn->flags,
1829 					 BT_CONN_PERIPHERAL_PARAM_AUTO_UPDATE);
1830 		} else if (atomic_test_bit(conn->flags,
1831 					   BT_CONN_PERIPHERAL_PARAM_AUTO_UPDATE) &&
1832 			   evt->status == BT_HCI_ERR_UNSUPP_LL_PARAM_VAL &&
1833 			   conn->le.conn_param_retry_countdown) {
1834 			conn->le.conn_param_retry_countdown--;
1835 			k_work_schedule(&conn->deferred_work,
1836 					K_MSEC(CONFIG_BT_CONN_PARAM_RETRY_TIMEOUT));
1837 		} else {
1838 			atomic_clear_bit(conn->flags,
1839 					 BT_CONN_PERIPHERAL_PARAM_AUTO_UPDATE);
1840 #endif /* CONFIG_BT_GAP_AUTO_UPDATE_CONN_PARAMS */
1841 
1842 		}
1843 
1844 		notify_le_param_updated(conn);
1845 	}
1846 
1847 	bt_conn_unref(conn);
1848 }
1849 
1850 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
set_flow_control(void)1851 static int set_flow_control(void)
1852 {
1853 	struct bt_hci_cp_host_buffer_size *hbs;
1854 	struct net_buf *buf;
1855 	int err;
1856 
1857 	/* Check if host flow control is actually supported */
1858 	if (!BT_CMD_TEST(bt_dev.supported_commands, 10, 5)) {
1859 		LOG_WRN("Controller to host flow control not supported");
1860 		return 0;
1861 	}
1862 
1863 	buf = bt_hci_cmd_create(BT_HCI_OP_HOST_BUFFER_SIZE,
1864 				sizeof(*hbs));
1865 	if (!buf) {
1866 		return -ENOBUFS;
1867 	}
1868 
1869 	hbs = net_buf_add(buf, sizeof(*hbs));
1870 	(void)memset(hbs, 0, sizeof(*hbs));
1871 	hbs->acl_mtu = sys_cpu_to_le16(CONFIG_BT_BUF_ACL_RX_SIZE);
1872 	hbs->acl_pkts = sys_cpu_to_le16(CONFIG_BT_BUF_ACL_RX_COUNT);
1873 
1874 	err = bt_hci_cmd_send_sync(BT_HCI_OP_HOST_BUFFER_SIZE, buf, NULL);
1875 	if (err) {
1876 		return err;
1877 	}
1878 
1879 	buf = bt_hci_cmd_create(BT_HCI_OP_SET_CTL_TO_HOST_FLOW, 1);
1880 	if (!buf) {
1881 		return -ENOBUFS;
1882 	}
1883 
1884 	net_buf_add_u8(buf, BT_HCI_CTL_TO_HOST_FLOW_ENABLE);
1885 	return bt_hci_cmd_send_sync(BT_HCI_OP_SET_CTL_TO_HOST_FLOW, buf, NULL);
1886 }
1887 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
1888 
unpair(uint8_t id,const bt_addr_le_t * addr)1889 static void unpair(uint8_t id, const bt_addr_le_t *addr)
1890 {
1891 	struct bt_keys *keys = NULL;
1892 	struct bt_conn *conn = bt_conn_lookup_addr_le(id, addr);
1893 
1894 	if (conn) {
1895 		/* Clear the conn->le.keys pointer since we'll invalidate it,
1896 		 * and don't want any subsequent code (like disconnected
1897 		 * callbacks) accessing it.
1898 		 */
1899 		if (conn->type == BT_CONN_TYPE_LE) {
1900 			keys = conn->le.keys;
1901 			conn->le.keys = NULL;
1902 		}
1903 
1904 		bt_conn_disconnect(conn, BT_HCI_ERR_REMOTE_USER_TERM_CONN);
1905 		bt_conn_unref(conn);
1906 	}
1907 
1908 	if (IS_ENABLED(CONFIG_BT_BREDR)) {
1909 		/* LE Public may indicate BR/EDR as well */
1910 		if (addr->type == BT_ADDR_LE_PUBLIC) {
1911 			bt_keys_link_key_clear_addr(&addr->a);
1912 		}
1913 	}
1914 
1915 	if (IS_ENABLED(CONFIG_BT_SMP)) {
1916 		if (!keys) {
1917 			keys = bt_keys_find_addr(id, addr);
1918 		}
1919 
1920 		if (keys) {
1921 			bt_keys_clear(keys);
1922 		}
1923 	}
1924 
1925 	bt_gatt_clear(id, addr);
1926 
1927 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_BREDR)
1928 	struct bt_conn_auth_info_cb *listener, *next;
1929 
1930 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&bt_auth_info_cbs, listener,
1931 					  next, node) {
1932 		if (listener->bond_deleted) {
1933 			listener->bond_deleted(id, addr);
1934 		}
1935 	}
1936 #endif /* defined(CONFIG_BT_SMP) || defined(CONFIG_BT_BREDR) */
1937 }
1938 
unpair_remote(const struct bt_bond_info * info,void * data)1939 static void unpair_remote(const struct bt_bond_info *info, void *data)
1940 {
1941 	uint8_t *id = (uint8_t *) data;
1942 
1943 	unpair(*id, &info->addr);
1944 }
1945 
bt_unpair(uint8_t id,const bt_addr_le_t * addr)1946 int bt_unpair(uint8_t id, const bt_addr_le_t *addr)
1947 {
1948 	if (id >= CONFIG_BT_ID_MAX) {
1949 		return -EINVAL;
1950 	}
1951 
1952 	if (IS_ENABLED(CONFIG_BT_SMP) &&
1953 	    (!addr || bt_addr_le_eq(addr, BT_ADDR_LE_ANY))) {
1954 		bt_foreach_bond(id, unpair_remote, &id);
1955 		return 0;
1956 	}
1957 
1958 	unpair(id, addr);
1959 	return 0;
1960 }
1961 
1962 #endif /* CONFIG_BT_CONN */
1963 
1964 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_BREDR)
bt_security_err_get(uint8_t hci_err)1965 enum bt_security_err bt_security_err_get(uint8_t hci_err)
1966 {
1967 	switch (hci_err) {
1968 	case BT_HCI_ERR_SUCCESS:
1969 		return BT_SECURITY_ERR_SUCCESS;
1970 	case BT_HCI_ERR_AUTH_FAIL:
1971 		return BT_SECURITY_ERR_AUTH_FAIL;
1972 	case BT_HCI_ERR_PIN_OR_KEY_MISSING:
1973 		return BT_SECURITY_ERR_PIN_OR_KEY_MISSING;
1974 	case BT_HCI_ERR_PAIRING_NOT_SUPPORTED:
1975 		return BT_SECURITY_ERR_PAIR_NOT_SUPPORTED;
1976 	case BT_HCI_ERR_PAIRING_NOT_ALLOWED:
1977 		return BT_SECURITY_ERR_PAIR_NOT_ALLOWED;
1978 	case BT_HCI_ERR_INVALID_PARAM:
1979 		return BT_SECURITY_ERR_INVALID_PARAM;
1980 	default:
1981 		return BT_SECURITY_ERR_UNSPECIFIED;
1982 	}
1983 }
1984 #endif /* defined(CONFIG_BT_SMP) || defined(CONFIG_BT_BREDR) */
1985 
1986 #if defined(CONFIG_BT_SMP)
update_sec_level(struct bt_conn * conn)1987 static bool update_sec_level(struct bt_conn *conn)
1988 {
1989 	if (conn->le.keys && (conn->le.keys->flags & BT_KEYS_AUTHENTICATED)) {
1990 		if (conn->le.keys->flags & BT_KEYS_SC &&
1991 		    conn->le.keys->enc_size == BT_SMP_MAX_ENC_KEY_SIZE) {
1992 			conn->sec_level = BT_SECURITY_L4;
1993 		} else {
1994 			conn->sec_level = BT_SECURITY_L3;
1995 		}
1996 	} else {
1997 		conn->sec_level = BT_SECURITY_L2;
1998 	}
1999 
2000 	return !(conn->required_sec_level > conn->sec_level);
2001 }
2002 #endif /* CONFIG_BT_SMP */
2003 
2004 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_BREDR)
hci_encrypt_change(struct net_buf * buf)2005 static void hci_encrypt_change(struct net_buf *buf)
2006 {
2007 	struct bt_hci_evt_encrypt_change *evt = (void *)buf->data;
2008 	uint16_t handle = sys_le16_to_cpu(evt->handle);
2009 	uint8_t status = evt->status;
2010 	struct bt_conn *conn;
2011 
2012 	LOG_DBG("status 0x%02x handle %u encrypt 0x%02x", evt->status, handle, evt->encrypt);
2013 
2014 	conn = bt_conn_lookup_handle(handle);
2015 	if (!conn) {
2016 		LOG_ERR("Unable to look up conn with handle %u", handle);
2017 		return;
2018 	}
2019 
2020 	if (status) {
2021 		bt_conn_security_changed(conn, status,
2022 					 bt_security_err_get(status));
2023 		bt_conn_unref(conn);
2024 		return;
2025 	}
2026 
2027 	conn->encrypt = evt->encrypt;
2028 
2029 #if defined(CONFIG_BT_SMP)
2030 	if (conn->type == BT_CONN_TYPE_LE) {
2031 		/*
2032 		 * we update keys properties only on successful encryption to
2033 		 * avoid losing valid keys if encryption was not successful.
2034 		 *
2035 		 * Update keys with last pairing info for proper sec level
2036 		 * update. This is done only for LE transport, for BR/EDR keys
2037 		 * are updated on HCI 'Link Key Notification Event'
2038 		 */
2039 		if (conn->encrypt) {
2040 			bt_smp_update_keys(conn);
2041 		}
2042 
2043 		if (!update_sec_level(conn)) {
2044 			status = BT_HCI_ERR_AUTH_FAIL;
2045 		}
2046 	}
2047 #endif /* CONFIG_BT_SMP */
2048 #if defined(CONFIG_BT_BREDR)
2049 	if (conn->type == BT_CONN_TYPE_BR) {
2050 		if (!bt_br_update_sec_level(conn)) {
2051 			bt_conn_unref(conn);
2052 			return;
2053 		}
2054 
2055 		if (IS_ENABLED(CONFIG_BT_SMP)) {
2056 			/*
2057 			 * Start SMP over BR/EDR if we are pairing and are
2058 			 * central on the link
2059 			 */
2060 			if (atomic_test_bit(conn->flags, BT_CONN_BR_PAIRING) &&
2061 			    conn->role == BT_CONN_ROLE_CENTRAL) {
2062 				bt_smp_br_send_pairing_req(conn);
2063 			}
2064 		}
2065 	}
2066 #endif /* CONFIG_BT_BREDR */
2067 
2068 	bt_conn_security_changed(conn, status, bt_security_err_get(status));
2069 
2070 	if (status) {
2071 		LOG_ERR("Failed to set required security level");
2072 		bt_conn_disconnect(conn, status);
2073 	}
2074 
2075 	bt_conn_unref(conn);
2076 }
2077 
hci_encrypt_key_refresh_complete(struct net_buf * buf)2078 static void hci_encrypt_key_refresh_complete(struct net_buf *buf)
2079 {
2080 	struct bt_hci_evt_encrypt_key_refresh_complete *evt = (void *)buf->data;
2081 	uint8_t status = evt->status;
2082 	struct bt_conn *conn;
2083 	uint16_t handle;
2084 
2085 	handle = sys_le16_to_cpu(evt->handle);
2086 
2087 	LOG_DBG("status 0x%02x handle %u", evt->status, handle);
2088 
2089 	conn = bt_conn_lookup_handle(handle);
2090 	if (!conn) {
2091 		LOG_ERR("Unable to look up conn with handle %u", handle);
2092 		return;
2093 	}
2094 
2095 	if (status) {
2096 		bt_conn_security_changed(conn, status,
2097 					 bt_security_err_get(status));
2098 		bt_conn_unref(conn);
2099 		return;
2100 	}
2101 
2102 	/*
2103 	 * Update keys with last pairing info for proper sec level update.
2104 	 * This is done only for LE transport. For BR/EDR transport keys are
2105 	 * updated on HCI 'Link Key Notification Event', therefore update here
2106 	 * only security level based on available keys and encryption state.
2107 	 */
2108 #if defined(CONFIG_BT_SMP)
2109 	if (conn->type == BT_CONN_TYPE_LE) {
2110 		bt_smp_update_keys(conn);
2111 
2112 		if (!update_sec_level(conn)) {
2113 			status = BT_HCI_ERR_AUTH_FAIL;
2114 		}
2115 	}
2116 #endif /* CONFIG_BT_SMP */
2117 #if defined(CONFIG_BT_BREDR)
2118 	if (conn->type == BT_CONN_TYPE_BR) {
2119 		if (!bt_br_update_sec_level(conn)) {
2120 			bt_conn_unref(conn);
2121 			return;
2122 		}
2123 	}
2124 #endif /* CONFIG_BT_BREDR */
2125 
2126 	bt_conn_security_changed(conn, status, bt_security_err_get(status));
2127 	if (status) {
2128 		LOG_ERR("Failed to set required security level");
2129 		bt_conn_disconnect(conn, status);
2130 	}
2131 
2132 	bt_conn_unref(conn);
2133 }
2134 #endif /* CONFIG_BT_SMP || CONFIG_BT_BREDR */
2135 
2136 #if defined(CONFIG_BT_REMOTE_VERSION)
bt_hci_evt_read_remote_version_complete(struct net_buf * buf)2137 static void bt_hci_evt_read_remote_version_complete(struct net_buf *buf)
2138 {
2139 	struct bt_hci_evt_remote_version_info *evt;
2140 	struct bt_conn *conn;
2141 	uint16_t handle;
2142 
2143 	evt = net_buf_pull_mem(buf, sizeof(*evt));
2144 	handle = sys_le16_to_cpu(evt->handle);
2145 	conn = bt_conn_lookup_handle(handle);
2146 	if (!conn) {
2147 		LOG_ERR("No connection for handle %u", handle);
2148 		return;
2149 	}
2150 
2151 	if (!evt->status) {
2152 		conn->rv.version = evt->version;
2153 		conn->rv.manufacturer = sys_le16_to_cpu(evt->manufacturer);
2154 		conn->rv.subversion = sys_le16_to_cpu(evt->subversion);
2155 	}
2156 
2157 	atomic_set_bit(conn->flags, BT_CONN_AUTO_VERSION_INFO);
2158 
2159 	if (IS_ENABLED(CONFIG_BT_REMOTE_INFO)) {
2160 		/* Remote features is already present */
2161 		notify_remote_info(conn);
2162 	}
2163 
2164 	bt_conn_unref(conn);
2165 }
2166 #endif /* CONFIG_BT_REMOTE_VERSION */
2167 
hci_hardware_error(struct net_buf * buf)2168 static void hci_hardware_error(struct net_buf *buf)
2169 {
2170 	struct bt_hci_evt_hardware_error *evt;
2171 
2172 	evt = net_buf_pull_mem(buf, sizeof(*evt));
2173 
2174 	LOG_ERR("Hardware error, hardware code: %d", evt->hardware_code);
2175 }
2176 
2177 #if defined(CONFIG_BT_SMP)
le_ltk_neg_reply(uint16_t handle)2178 static void le_ltk_neg_reply(uint16_t handle)
2179 {
2180 	struct bt_hci_cp_le_ltk_req_neg_reply *cp;
2181 	struct net_buf *buf;
2182 
2183 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_LTK_REQ_NEG_REPLY, sizeof(*cp));
2184 	if (!buf) {
2185 		LOG_ERR("Out of command buffers");
2186 
2187 		return;
2188 	}
2189 
2190 	cp = net_buf_add(buf, sizeof(*cp));
2191 	cp->handle = sys_cpu_to_le16(handle);
2192 
2193 	bt_hci_cmd_send(BT_HCI_OP_LE_LTK_REQ_NEG_REPLY, buf);
2194 }
2195 
le_ltk_reply(uint16_t handle,uint8_t * ltk)2196 static void le_ltk_reply(uint16_t handle, uint8_t *ltk)
2197 {
2198 	struct bt_hci_cp_le_ltk_req_reply *cp;
2199 	struct net_buf *buf;
2200 
2201 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_LTK_REQ_REPLY,
2202 				sizeof(*cp));
2203 	if (!buf) {
2204 		LOG_ERR("Out of command buffers");
2205 		return;
2206 	}
2207 
2208 	cp = net_buf_add(buf, sizeof(*cp));
2209 	cp->handle = sys_cpu_to_le16(handle);
2210 	memcpy(cp->ltk, ltk, sizeof(cp->ltk));
2211 
2212 	bt_hci_cmd_send(BT_HCI_OP_LE_LTK_REQ_REPLY, buf);
2213 }
2214 
le_ltk_request(struct net_buf * buf)2215 static void le_ltk_request(struct net_buf *buf)
2216 {
2217 	struct bt_hci_evt_le_ltk_request *evt = (void *)buf->data;
2218 	struct bt_conn *conn;
2219 	uint16_t handle;
2220 	uint8_t ltk[16];
2221 
2222 	handle = sys_le16_to_cpu(evt->handle);
2223 
2224 	LOG_DBG("handle %u", handle);
2225 
2226 	conn = bt_conn_lookup_handle(handle);
2227 	if (!conn) {
2228 		LOG_ERR("Unable to lookup conn for handle %u", handle);
2229 		return;
2230 	}
2231 
2232 	if (bt_smp_request_ltk(conn, evt->rand, evt->ediv, ltk)) {
2233 		le_ltk_reply(handle, ltk);
2234 	} else {
2235 		le_ltk_neg_reply(handle);
2236 	}
2237 
2238 	bt_conn_unref(conn);
2239 }
2240 #endif /* CONFIG_BT_SMP */
2241 
hci_reset_complete(struct net_buf * buf)2242 static void hci_reset_complete(struct net_buf *buf)
2243 {
2244 	uint8_t status = buf->data[0];
2245 	atomic_t flags;
2246 
2247 	LOG_DBG("status 0x%02x", status);
2248 
2249 	if (status) {
2250 		return;
2251 	}
2252 
2253 	if (IS_ENABLED(CONFIG_BT_OBSERVER)) {
2254 		bt_scan_reset();
2255 	}
2256 
2257 #if defined(CONFIG_BT_BREDR)
2258 	bt_br_discovery_reset();
2259 #endif /* CONFIG_BT_BREDR */
2260 
2261 	flags = (atomic_get(bt_dev.flags) & BT_DEV_PERSISTENT_FLAGS);
2262 	atomic_set(bt_dev.flags, flags);
2263 }
2264 
hci_cmd_done(uint16_t opcode,uint8_t status,struct net_buf * buf)2265 static void hci_cmd_done(uint16_t opcode, uint8_t status, struct net_buf *buf)
2266 {
2267 	LOG_DBG("opcode 0x%04x status 0x%02x buf %p", opcode, status, buf);
2268 
2269 	if (net_buf_pool_get(buf->pool_id) != &hci_cmd_pool) {
2270 		LOG_WRN("opcode 0x%04x pool id %u pool %p != &hci_cmd_pool %p", opcode,
2271 			buf->pool_id, net_buf_pool_get(buf->pool_id), &hci_cmd_pool);
2272 		return;
2273 	}
2274 
2275 	if (cmd(buf)->opcode != opcode) {
2276 		LOG_WRN("OpCode 0x%04x completed instead of expected 0x%04x", opcode,
2277 			cmd(buf)->opcode);
2278 		return;
2279 	}
2280 
2281 	if (bt_dev.sent_cmd) {
2282 		net_buf_unref(bt_dev.sent_cmd);
2283 		bt_dev.sent_cmd = NULL;
2284 	}
2285 
2286 	if (cmd(buf)->state && !status) {
2287 		struct bt_hci_cmd_state_set *update = cmd(buf)->state;
2288 
2289 		atomic_set_bit_to(update->target, update->bit, update->val);
2290 	}
2291 
2292 	/* If the command was synchronous wake up bt_hci_cmd_send_sync() */
2293 	if (cmd(buf)->sync) {
2294 		cmd(buf)->status = status;
2295 		k_sem_give(cmd(buf)->sync);
2296 	}
2297 }
2298 
hci_cmd_complete(struct net_buf * buf)2299 static void hci_cmd_complete(struct net_buf *buf)
2300 {
2301 	struct bt_hci_evt_cmd_complete *evt;
2302 	uint8_t status, ncmd;
2303 	uint16_t opcode;
2304 
2305 	evt = net_buf_pull_mem(buf, sizeof(*evt));
2306 	ncmd = evt->ncmd;
2307 	opcode = sys_le16_to_cpu(evt->opcode);
2308 
2309 	LOG_DBG("opcode 0x%04x", opcode);
2310 
2311 	/* All command return parameters have a 1-byte status in the
2312 	 * beginning, so we can safely make this generalization.
2313 	 */
2314 	status = buf->data[0];
2315 
2316 	hci_cmd_done(opcode, status, buf);
2317 
2318 	/* Allow next command to be sent */
2319 	if (ncmd) {
2320 		k_sem_give(&bt_dev.ncmd_sem);
2321 	}
2322 }
2323 
hci_cmd_status(struct net_buf * buf)2324 static void hci_cmd_status(struct net_buf *buf)
2325 {
2326 	struct bt_hci_evt_cmd_status *evt;
2327 	uint16_t opcode;
2328 	uint8_t ncmd;
2329 
2330 	evt = net_buf_pull_mem(buf, sizeof(*evt));
2331 	opcode = sys_le16_to_cpu(evt->opcode);
2332 	ncmd = evt->ncmd;
2333 
2334 	LOG_DBG("opcode 0x%04x", opcode);
2335 
2336 	hci_cmd_done(opcode, evt->status, buf);
2337 
2338 	/* Allow next command to be sent */
2339 	if (ncmd) {
2340 		k_sem_give(&bt_dev.ncmd_sem);
2341 	}
2342 }
2343 
bt_hci_get_conn_handle(const struct bt_conn * conn,uint16_t * conn_handle)2344 int bt_hci_get_conn_handle(const struct bt_conn *conn, uint16_t *conn_handle)
2345 {
2346 	if (conn->state != BT_CONN_CONNECTED) {
2347 		return -ENOTCONN;
2348 	}
2349 
2350 	*conn_handle = conn->handle;
2351 	return 0;
2352 }
2353 
2354 #if defined(CONFIG_BT_EXT_ADV)
bt_hci_get_adv_handle(const struct bt_le_ext_adv * adv,uint8_t * adv_handle)2355 int bt_hci_get_adv_handle(const struct bt_le_ext_adv *adv, uint8_t *adv_handle)
2356 {
2357 	if (!atomic_test_bit(adv->flags, BT_ADV_CREATED)) {
2358 		return -EINVAL;
2359 	}
2360 
2361 	*adv_handle = adv->handle;
2362 	return 0;
2363 }
2364 #endif /* CONFIG_BT_EXT_ADV */
2365 
2366 #if defined(CONFIG_BT_HCI_VS_EVT_USER)
bt_hci_register_vnd_evt_cb(bt_hci_vnd_evt_cb_t cb)2367 int bt_hci_register_vnd_evt_cb(bt_hci_vnd_evt_cb_t cb)
2368 {
2369 	hci_vnd_evt_cb = cb;
2370 	return 0;
2371 }
2372 #endif /* CONFIG_BT_HCI_VS_EVT_USER */
2373 
2374 static const struct event_handler vs_events[] = {
2375 #if defined(CONFIG_BT_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES)
2376 	EVENT_HANDLER(BT_HCI_EVT_VS_LE_CONNECTIONLESS_IQ_REPORT,
2377 		      bt_hci_le_vs_df_connectionless_iq_report,
2378 		      sizeof(struct bt_hci_evt_vs_le_connectionless_iq_report)),
2379 #endif /* CONFIG_BT_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES */
2380 #if defined(CONFIG_BT_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES)
2381 	EVENT_HANDLER(BT_HCI_EVT_VS_LE_CONNECTION_IQ_REPORT, bt_hci_le_vs_df_connection_iq_report,
2382 		      sizeof(struct bt_hci_evt_vs_le_connection_iq_report)),
2383 #endif /* CONFIG_BT_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES */
2384 };
2385 
hci_vendor_event(struct net_buf * buf)2386 static void hci_vendor_event(struct net_buf *buf)
2387 {
2388 	bool handled = false;
2389 
2390 #if defined(CONFIG_BT_HCI_VS_EVT_USER)
2391 	if (hci_vnd_evt_cb) {
2392 		struct net_buf_simple_state state;
2393 
2394 		net_buf_simple_save(&buf->b, &state);
2395 
2396 		handled = hci_vnd_evt_cb(&buf->b);
2397 
2398 		net_buf_simple_restore(&buf->b, &state);
2399 	}
2400 #endif /* CONFIG_BT_HCI_VS_EVT_USER */
2401 
2402 	if (IS_ENABLED(CONFIG_BT_HCI_VS_EVT) && !handled) {
2403 		struct bt_hci_evt_vs *evt;
2404 
2405 		evt = net_buf_pull_mem(buf, sizeof(*evt));
2406 
2407 		LOG_DBG("subevent 0x%02x", evt->subevent);
2408 
2409 		handle_vs_event(evt->subevent, buf, vs_events, ARRAY_SIZE(vs_events));
2410 	}
2411 }
2412 
2413 static const struct event_handler meta_events[] = {
2414 #if defined(CONFIG_BT_OBSERVER)
2415 	EVENT_HANDLER(BT_HCI_EVT_LE_ADVERTISING_REPORT, bt_hci_le_adv_report,
2416 		      sizeof(struct bt_hci_evt_le_advertising_report)),
2417 #endif /* CONFIG_BT_OBSERVER */
2418 #if defined(CONFIG_BT_CONN)
2419 	EVENT_HANDLER(BT_HCI_EVT_LE_CONN_COMPLETE, le_legacy_conn_complete,
2420 		      sizeof(struct bt_hci_evt_le_conn_complete)),
2421 	EVENT_HANDLER(BT_HCI_EVT_LE_ENH_CONN_COMPLETE, le_enh_conn_complete,
2422 		      sizeof(struct bt_hci_evt_le_enh_conn_complete)),
2423 	EVENT_HANDLER(BT_HCI_EVT_LE_CONN_UPDATE_COMPLETE,
2424 		      le_conn_update_complete,
2425 		      sizeof(struct bt_hci_evt_le_conn_update_complete)),
2426 	EVENT_HANDLER(BT_HCI_EVT_LE_REMOTE_FEAT_COMPLETE,
2427 		      le_remote_feat_complete,
2428 		      sizeof(struct bt_hci_evt_le_remote_feat_complete)),
2429 	EVENT_HANDLER(BT_HCI_EVT_LE_CONN_PARAM_REQ, le_conn_param_req,
2430 		      sizeof(struct bt_hci_evt_le_conn_param_req)),
2431 #if defined(CONFIG_BT_DATA_LEN_UPDATE)
2432 	EVENT_HANDLER(BT_HCI_EVT_LE_DATA_LEN_CHANGE, le_data_len_change,
2433 		      sizeof(struct bt_hci_evt_le_data_len_change)),
2434 #endif /* CONFIG_BT_DATA_LEN_UPDATE */
2435 #if defined(CONFIG_BT_PHY_UPDATE)
2436 	EVENT_HANDLER(BT_HCI_EVT_LE_PHY_UPDATE_COMPLETE,
2437 		      le_phy_update_complete,
2438 		      sizeof(struct bt_hci_evt_le_phy_update_complete)),
2439 #endif /* CONFIG_BT_PHY_UPDATE */
2440 #endif /* CONFIG_BT_CONN */
2441 #if defined(CONFIG_BT_SMP)
2442 	EVENT_HANDLER(BT_HCI_EVT_LE_LTK_REQUEST, le_ltk_request,
2443 		      sizeof(struct bt_hci_evt_le_ltk_request)),
2444 #endif /* CONFIG_BT_SMP */
2445 #if defined(CONFIG_BT_ECC)
2446 	EVENT_HANDLER(BT_HCI_EVT_LE_P256_PUBLIC_KEY_COMPLETE,
2447 		      bt_hci_evt_le_pkey_complete,
2448 		      sizeof(struct bt_hci_evt_le_p256_public_key_complete)),
2449 	EVENT_HANDLER(BT_HCI_EVT_LE_GENERATE_DHKEY_COMPLETE,
2450 		      bt_hci_evt_le_dhkey_complete,
2451 		      sizeof(struct bt_hci_evt_le_generate_dhkey_complete)),
2452 #endif /* CONFIG_BT_SMP */
2453 #if defined(CONFIG_BT_EXT_ADV)
2454 #if defined(CONFIG_BT_BROADCASTER)
2455 	EVENT_HANDLER(BT_HCI_EVT_LE_ADV_SET_TERMINATED, bt_hci_le_adv_set_terminated,
2456 		      sizeof(struct bt_hci_evt_le_adv_set_terminated)),
2457 	EVENT_HANDLER(BT_HCI_EVT_LE_SCAN_REQ_RECEIVED, bt_hci_le_scan_req_received,
2458 		      sizeof(struct bt_hci_evt_le_scan_req_received)),
2459 #endif
2460 #if defined(CONFIG_BT_OBSERVER)
2461 	EVENT_HANDLER(BT_HCI_EVT_LE_SCAN_TIMEOUT, bt_hci_le_scan_timeout,
2462 		      0),
2463 	EVENT_HANDLER(BT_HCI_EVT_LE_EXT_ADVERTISING_REPORT, bt_hci_le_adv_ext_report,
2464 		      sizeof(struct bt_hci_evt_le_ext_advertising_report)),
2465 #endif /* defined(CONFIG_BT_OBSERVER) */
2466 #if defined(CONFIG_BT_PER_ADV_SYNC)
2467 	EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADV_SYNC_ESTABLISHED,
2468 		      bt_hci_le_per_adv_sync_established,
2469 		      sizeof(struct bt_hci_evt_le_per_adv_sync_established)),
2470 	EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADVERTISING_REPORT, bt_hci_le_per_adv_report,
2471 		      sizeof(struct bt_hci_evt_le_per_advertising_report)),
2472 	EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADV_SYNC_LOST, bt_hci_le_per_adv_sync_lost,
2473 		      sizeof(struct bt_hci_evt_le_per_adv_sync_lost)),
2474 #if defined(CONFIG_BT_CONN)
2475 	EVENT_HANDLER(BT_HCI_EVT_LE_PAST_RECEIVED, bt_hci_le_past_received,
2476 		      sizeof(struct bt_hci_evt_le_past_received)),
2477 #endif /* CONFIG_BT_CONN */
2478 #endif /* defined(CONFIG_BT_PER_ADV_SYNC) */
2479 #endif /* defined(CONFIG_BT_EXT_ADV) */
2480 #if defined(CONFIG_BT_ISO_UNICAST)
2481 	EVENT_HANDLER(BT_HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_established,
2482 		      sizeof(struct bt_hci_evt_le_cis_established)),
2483 #if defined(CONFIG_BT_ISO_PERIPHERAL)
2484 	EVENT_HANDLER(BT_HCI_EVT_LE_CIS_REQ, hci_le_cis_req,
2485 		      sizeof(struct bt_hci_evt_le_cis_req)),
2486 #endif /* (CONFIG_BT_ISO_PERIPHERAL) */
2487 #endif /* (CONFIG_BT_ISO_UNICAST) */
2488 #if defined(CONFIG_BT_ISO_BROADCASTER)
2489 	EVENT_HANDLER(BT_HCI_EVT_LE_BIG_COMPLETE,
2490 		      hci_le_big_complete,
2491 		      sizeof(struct bt_hci_evt_le_big_complete)),
2492 	EVENT_HANDLER(BT_HCI_EVT_LE_BIG_TERMINATE,
2493 		      hci_le_big_terminate,
2494 		      sizeof(struct bt_hci_evt_le_big_terminate)),
2495 #endif /* CONFIG_BT_ISO_BROADCASTER */
2496 #if defined(CONFIG_BT_ISO_SYNC_RECEIVER)
2497 	EVENT_HANDLER(BT_HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
2498 		      hci_le_big_sync_established,
2499 		      sizeof(struct bt_hci_evt_le_big_sync_established)),
2500 	EVENT_HANDLER(BT_HCI_EVT_LE_BIG_SYNC_LOST,
2501 		      hci_le_big_sync_lost,
2502 		      sizeof(struct bt_hci_evt_le_big_sync_lost)),
2503 	EVENT_HANDLER(BT_HCI_EVT_LE_BIGINFO_ADV_REPORT,
2504 		      bt_hci_le_biginfo_adv_report,
2505 		      sizeof(struct bt_hci_evt_le_biginfo_adv_report)),
2506 #endif /* CONFIG_BT_ISO_SYNC_RECEIVER */
2507 #if defined(CONFIG_BT_DF_CONNECTIONLESS_CTE_RX)
2508 	EVENT_HANDLER(BT_HCI_EVT_LE_CONNECTIONLESS_IQ_REPORT, bt_hci_le_df_connectionless_iq_report,
2509 		      sizeof(struct bt_hci_evt_le_connectionless_iq_report)),
2510 #endif /* CONFIG_BT_DF_CONNECTIONLESS_CTE_RX */
2511 #if defined(CONFIG_BT_DF_CONNECTION_CTE_RX)
2512 	EVENT_HANDLER(BT_HCI_EVT_LE_CONNECTION_IQ_REPORT, bt_hci_le_df_connection_iq_report,
2513 		      sizeof(struct bt_hci_evt_le_connection_iq_report)),
2514 #endif /* CONFIG_BT_DF_CONNECTION_CTE_RX */
2515 #if defined(CONFIG_BT_DF_CONNECTION_CTE_REQ)
2516 	EVENT_HANDLER(BT_HCI_EVT_LE_CTE_REQUEST_FAILED, bt_hci_le_df_cte_req_failed,
2517 		      sizeof(struct bt_hci_evt_le_cte_req_failed)),
2518 #endif /* CONFIG_BT_DF_CONNECTION_CTE_REQ */
2519 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
2520 	EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADVERTISING_REPORT_V2, bt_hci_le_per_adv_report_v2,
2521 		      sizeof(struct bt_hci_evt_le_per_advertising_report_v2)),
2522 	EVENT_HANDLER(BT_HCI_EVT_LE_PAST_RECEIVED_V2, bt_hci_le_past_received_v2,
2523 		      sizeof(struct bt_hci_evt_le_past_received_v2)),
2524 	EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADV_SYNC_ESTABLISHED_V2,
2525 		      bt_hci_le_per_adv_sync_established_v2,
2526 		      sizeof(struct bt_hci_evt_le_per_adv_sync_established_v2)),
2527 #endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
2528 #if defined(CONFIG_BT_PER_ADV_RSP)
2529 	EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADV_SUBEVENT_DATA_REQUEST,
2530 		      bt_hci_le_per_adv_subevent_data_request,
2531 		      sizeof(struct bt_hci_evt_le_per_adv_subevent_data_request)),
2532 	EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADV_RESPONSE_REPORT, bt_hci_le_per_adv_response_report,
2533 		      sizeof(struct bt_hci_evt_le_per_adv_response_report)),
2534 #endif /* CONFIG_BT_PER_ADV_RSP */
2535 #if defined(CONFIG_BT_CONN)
2536 #if defined(CONFIG_BT_PER_ADV_RSP) || defined(CONFIG_BT_PER_ADV_SYNC_RSP)
2537 	EVENT_HANDLER(BT_HCI_EVT_LE_ENH_CONN_COMPLETE_V2, le_enh_conn_complete_v2,
2538 		      sizeof(struct bt_hci_evt_le_enh_conn_complete_v2)),
2539 #endif /* CONFIG_BT_PER_ADV_RSP || CONFIG_BT_PER_ADV_SYNC_RSP */
2540 #endif /* CONFIG_BT_CONN */
2541 
2542 };
2543 
hci_le_meta_event(struct net_buf * buf)2544 static void hci_le_meta_event(struct net_buf *buf)
2545 {
2546 	struct bt_hci_evt_le_meta_event *evt;
2547 
2548 	evt = net_buf_pull_mem(buf, sizeof(*evt));
2549 
2550 	LOG_DBG("subevent 0x%02x", evt->subevent);
2551 
2552 	handle_event(evt->subevent, buf, meta_events, ARRAY_SIZE(meta_events));
2553 }
2554 
2555 static const struct event_handler normal_events[] = {
2556 	EVENT_HANDLER(BT_HCI_EVT_VENDOR, hci_vendor_event,
2557 		      sizeof(struct bt_hci_evt_vs)),
2558 	EVENT_HANDLER(BT_HCI_EVT_LE_META_EVENT, hci_le_meta_event,
2559 		      sizeof(struct bt_hci_evt_le_meta_event)),
2560 #if defined(CONFIG_BT_BREDR)
2561 	EVENT_HANDLER(BT_HCI_EVT_CONN_REQUEST, bt_hci_conn_req,
2562 		      sizeof(struct bt_hci_evt_conn_request)),
2563 	EVENT_HANDLER(BT_HCI_EVT_CONN_COMPLETE, bt_hci_conn_complete,
2564 		      sizeof(struct bt_hci_evt_conn_complete)),
2565 	EVENT_HANDLER(BT_HCI_EVT_PIN_CODE_REQ, bt_hci_pin_code_req,
2566 		      sizeof(struct bt_hci_evt_pin_code_req)),
2567 	EVENT_HANDLER(BT_HCI_EVT_LINK_KEY_NOTIFY, bt_hci_link_key_notify,
2568 		      sizeof(struct bt_hci_evt_link_key_notify)),
2569 	EVENT_HANDLER(BT_HCI_EVT_LINK_KEY_REQ, bt_hci_link_key_req,
2570 		      sizeof(struct bt_hci_evt_link_key_req)),
2571 	EVENT_HANDLER(BT_HCI_EVT_IO_CAPA_RESP, bt_hci_io_capa_resp,
2572 		      sizeof(struct bt_hci_evt_io_capa_resp)),
2573 	EVENT_HANDLER(BT_HCI_EVT_IO_CAPA_REQ, bt_hci_io_capa_req,
2574 		      sizeof(struct bt_hci_evt_io_capa_req)),
2575 	EVENT_HANDLER(BT_HCI_EVT_SSP_COMPLETE, bt_hci_ssp_complete,
2576 		      sizeof(struct bt_hci_evt_ssp_complete)),
2577 	EVENT_HANDLER(BT_HCI_EVT_USER_CONFIRM_REQ, bt_hci_user_confirm_req,
2578 		      sizeof(struct bt_hci_evt_user_confirm_req)),
2579 	EVENT_HANDLER(BT_HCI_EVT_USER_PASSKEY_NOTIFY,
2580 		      bt_hci_user_passkey_notify,
2581 		      sizeof(struct bt_hci_evt_user_passkey_notify)),
2582 	EVENT_HANDLER(BT_HCI_EVT_USER_PASSKEY_REQ, bt_hci_user_passkey_req,
2583 		      sizeof(struct bt_hci_evt_user_passkey_req)),
2584 	EVENT_HANDLER(BT_HCI_EVT_INQUIRY_COMPLETE, bt_hci_inquiry_complete,
2585 		      sizeof(struct bt_hci_evt_inquiry_complete)),
2586 	EVENT_HANDLER(BT_HCI_EVT_INQUIRY_RESULT_WITH_RSSI,
2587 		      bt_hci_inquiry_result_with_rssi,
2588 		      sizeof(struct bt_hci_evt_inquiry_result_with_rssi)),
2589 	EVENT_HANDLER(BT_HCI_EVT_EXTENDED_INQUIRY_RESULT,
2590 		      bt_hci_extended_inquiry_result,
2591 		      sizeof(struct bt_hci_evt_extended_inquiry_result)),
2592 	EVENT_HANDLER(BT_HCI_EVT_REMOTE_NAME_REQ_COMPLETE,
2593 		      bt_hci_remote_name_request_complete,
2594 		      sizeof(struct bt_hci_evt_remote_name_req_complete)),
2595 	EVENT_HANDLER(BT_HCI_EVT_AUTH_COMPLETE, bt_hci_auth_complete,
2596 		      sizeof(struct bt_hci_evt_auth_complete)),
2597 	EVENT_HANDLER(BT_HCI_EVT_REMOTE_FEATURES,
2598 		      bt_hci_read_remote_features_complete,
2599 		      sizeof(struct bt_hci_evt_remote_features)),
2600 	EVENT_HANDLER(BT_HCI_EVT_REMOTE_EXT_FEATURES,
2601 		      bt_hci_read_remote_ext_features_complete,
2602 		      sizeof(struct bt_hci_evt_remote_ext_features)),
2603 	EVENT_HANDLER(BT_HCI_EVT_ROLE_CHANGE, bt_hci_role_change,
2604 		      sizeof(struct bt_hci_evt_role_change)),
2605 	EVENT_HANDLER(BT_HCI_EVT_SYNC_CONN_COMPLETE, bt_hci_synchronous_conn_complete,
2606 		      sizeof(struct bt_hci_evt_sync_conn_complete)),
2607 #endif /* CONFIG_BT_BREDR */
2608 #if defined(CONFIG_BT_CONN)
2609 	EVENT_HANDLER(BT_HCI_EVT_DISCONN_COMPLETE, hci_disconn_complete,
2610 		      sizeof(struct bt_hci_evt_disconn_complete)),
2611 #endif /* CONFIG_BT_CONN */
2612 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_BREDR)
2613 	EVENT_HANDLER(BT_HCI_EVT_ENCRYPT_CHANGE, hci_encrypt_change,
2614 		      sizeof(struct bt_hci_evt_encrypt_change)),
2615 	EVENT_HANDLER(BT_HCI_EVT_ENCRYPT_KEY_REFRESH_COMPLETE,
2616 		      hci_encrypt_key_refresh_complete,
2617 		      sizeof(struct bt_hci_evt_encrypt_key_refresh_complete)),
2618 #endif /* CONFIG_BT_SMP || CONFIG_BT_BREDR */
2619 #if defined(CONFIG_BT_REMOTE_VERSION)
2620 	EVENT_HANDLER(BT_HCI_EVT_REMOTE_VERSION_INFO,
2621 		      bt_hci_evt_read_remote_version_complete,
2622 		      sizeof(struct bt_hci_evt_remote_version_info)),
2623 #endif /* CONFIG_BT_REMOTE_VERSION */
2624 	EVENT_HANDLER(BT_HCI_EVT_HARDWARE_ERROR, hci_hardware_error,
2625 		      sizeof(struct bt_hci_evt_hardware_error)),
2626 };
2627 
hci_event(struct net_buf * buf)2628 static void hci_event(struct net_buf *buf)
2629 {
2630 	struct bt_hci_evt_hdr *hdr;
2631 
2632 	BT_ASSERT(buf->len >= sizeof(*hdr));
2633 
2634 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2635 	LOG_DBG("event 0x%02x", hdr->evt);
2636 	BT_ASSERT(bt_hci_evt_get_flags(hdr->evt) & BT_HCI_EVT_FLAG_RECV);
2637 
2638 	handle_event(hdr->evt, buf, normal_events, ARRAY_SIZE(normal_events));
2639 
2640 	net_buf_unref(buf);
2641 }
2642 
send_cmd(void)2643 static void send_cmd(void)
2644 {
2645 	struct net_buf *buf;
2646 	int err;
2647 
2648 	/* Get next command */
2649 	LOG_DBG("calling net_buf_get");
2650 	buf = net_buf_get(&bt_dev.cmd_tx_queue, K_NO_WAIT);
2651 	BT_ASSERT(buf);
2652 
2653 	/* Wait until ncmd > 0 */
2654 	LOG_DBG("calling sem_take_wait");
2655 	k_sem_take(&bt_dev.ncmd_sem, K_FOREVER);
2656 
2657 	/* Clear out any existing sent command */
2658 	if (bt_dev.sent_cmd) {
2659 		LOG_ERR("Uncleared pending sent_cmd");
2660 		net_buf_unref(bt_dev.sent_cmd);
2661 		bt_dev.sent_cmd = NULL;
2662 	}
2663 
2664 	bt_dev.sent_cmd = net_buf_ref(buf);
2665 
2666 	LOG_DBG("Sending command 0x%04x (buf %p) to driver", cmd(buf)->opcode, buf);
2667 
2668 	err = bt_send(buf);
2669 	if (err) {
2670 		LOG_ERR("Unable to send to driver (err %d)", err);
2671 		k_sem_give(&bt_dev.ncmd_sem);
2672 		hci_cmd_done(cmd(buf)->opcode, BT_HCI_ERR_UNSPECIFIED, buf);
2673 		net_buf_unref(buf);
2674 	}
2675 }
2676 
process_events(struct k_poll_event * ev,int count)2677 static void process_events(struct k_poll_event *ev, int count)
2678 {
2679 	LOG_DBG("count %d", count);
2680 
2681 	for (; count; ev++, count--) {
2682 		LOG_DBG("ev->state %u", ev->state);
2683 
2684 		switch (ev->state) {
2685 		case K_POLL_STATE_SIGNALED:
2686 			break;
2687 		case K_POLL_STATE_SEM_AVAILABLE:
2688 			/* After this fn is exec'd, `bt_conn_prepare_events()`
2689 			 * will be called once again, and this time buffers will
2690 			 * be available, so the FIFO will be added to the poll
2691 			 * list instead of the ctlr buffers semaphore.
2692 			 */
2693 			break;
2694 		case K_POLL_STATE_FIFO_DATA_AVAILABLE:
2695 			if (ev->tag == BT_EVENT_CMD_TX) {
2696 				send_cmd();
2697 			} else if (IS_ENABLED(CONFIG_BT_CONN) ||
2698 				   IS_ENABLED(CONFIG_BT_ISO)) {
2699 				struct bt_conn *conn;
2700 
2701 				if (ev->tag == BT_EVENT_CONN_TX_QUEUE) {
2702 					conn = CONTAINER_OF(ev->fifo,
2703 							    struct bt_conn,
2704 							    tx_queue);
2705 					bt_conn_process_tx(conn);
2706 				}
2707 			}
2708 			break;
2709 		case K_POLL_STATE_NOT_READY:
2710 			break;
2711 		default:
2712 			LOG_WRN("Unexpected k_poll event state %u", ev->state);
2713 			break;
2714 		}
2715 	}
2716 }
2717 
2718 #if defined(CONFIG_BT_CONN)
2719 #if defined(CONFIG_BT_ISO)
2720 /* command FIFO + conn_change signal + MAX_CONN + ISO_MAX_CHAN */
2721 #define EV_COUNT (2 + CONFIG_BT_MAX_CONN + CONFIG_BT_ISO_MAX_CHAN)
2722 #else
2723 /* command FIFO + conn_change signal + MAX_CONN */
2724 #define EV_COUNT (2 + CONFIG_BT_MAX_CONN)
2725 #endif /* CONFIG_BT_ISO */
2726 #else
2727 #if defined(CONFIG_BT_ISO)
2728 /* command FIFO + conn_change signal + ISO_MAX_CHAN */
2729 #define EV_COUNT (2 + CONFIG_BT_ISO_MAX_CHAN)
2730 #else
2731 /* command FIFO */
2732 #define EV_COUNT 1
2733 #endif /* CONFIG_BT_ISO */
2734 #endif /* CONFIG_BT_CONN */
2735 
hci_tx_thread(void * p1,void * p2,void * p3)2736 static void hci_tx_thread(void *p1, void *p2, void *p3)
2737 {
2738 	static struct k_poll_event events[EV_COUNT] = {
2739 		K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE,
2740 						K_POLL_MODE_NOTIFY_ONLY,
2741 						&bt_dev.cmd_tx_queue,
2742 						BT_EVENT_CMD_TX),
2743 	};
2744 
2745 	LOG_DBG("Started");
2746 
2747 	while (1) {
2748 		int ev_count, err;
2749 
2750 		events[0].state = K_POLL_STATE_NOT_READY;
2751 		ev_count = 1;
2752 
2753 		/* This adds the FIFO per-connection */
2754 		if (IS_ENABLED(CONFIG_BT_CONN) || IS_ENABLED(CONFIG_BT_ISO)) {
2755 			ev_count += bt_conn_prepare_events(&events[1]);
2756 		}
2757 
2758 		LOG_DBG("Calling k_poll with %d events", ev_count);
2759 
2760 		err = k_poll(events, ev_count, K_FOREVER);
2761 		BT_ASSERT(err == 0);
2762 
2763 		process_events(events, ev_count);
2764 
2765 		/* Make sure we don't hog the CPU if there's all the time
2766 		 * some ready events.
2767 		 */
2768 		k_yield();
2769 	}
2770 }
2771 
2772 
read_local_ver_complete(struct net_buf * buf)2773 static void read_local_ver_complete(struct net_buf *buf)
2774 {
2775 	struct bt_hci_rp_read_local_version_info *rp = (void *)buf->data;
2776 
2777 	LOG_DBG("status 0x%02x", rp->status);
2778 
2779 	bt_dev.hci_version = rp->hci_version;
2780 	bt_dev.hci_revision = sys_le16_to_cpu(rp->hci_revision);
2781 	bt_dev.lmp_version = rp->lmp_version;
2782 	bt_dev.lmp_subversion = sys_le16_to_cpu(rp->lmp_subversion);
2783 	bt_dev.manufacturer = sys_le16_to_cpu(rp->manufacturer);
2784 }
2785 
read_le_features_complete(struct net_buf * buf)2786 static void read_le_features_complete(struct net_buf *buf)
2787 {
2788 	struct bt_hci_rp_le_read_local_features *rp = (void *)buf->data;
2789 
2790 	LOG_DBG("status 0x%02x", rp->status);
2791 
2792 	memcpy(bt_dev.le.features, rp->features, sizeof(bt_dev.le.features));
2793 }
2794 
2795 #if defined(CONFIG_BT_CONN)
2796 #if !defined(CONFIG_BT_BREDR)
read_buffer_size_complete(struct net_buf * buf)2797 static void read_buffer_size_complete(struct net_buf *buf)
2798 {
2799 	struct bt_hci_rp_read_buffer_size *rp = (void *)buf->data;
2800 	uint16_t pkts;
2801 
2802 	LOG_DBG("status 0x%02x", rp->status);
2803 
2804 	/* If LE-side has buffers we can ignore the BR/EDR values */
2805 	if (bt_dev.le.acl_mtu) {
2806 		return;
2807 	}
2808 
2809 	bt_dev.le.acl_mtu = sys_le16_to_cpu(rp->acl_max_len);
2810 	pkts = sys_le16_to_cpu(rp->acl_max_num);
2811 
2812 	LOG_DBG("ACL BR/EDR buffers: pkts %u mtu %u", pkts, bt_dev.le.acl_mtu);
2813 
2814 	k_sem_init(&bt_dev.le.acl_pkts, pkts, pkts);
2815 }
2816 #endif /* !defined(CONFIG_BT_BREDR) */
2817 #endif /* CONFIG_BT_CONN */
2818 
le_read_buffer_size_complete(struct net_buf * buf)2819 static void le_read_buffer_size_complete(struct net_buf *buf)
2820 {
2821 	struct bt_hci_rp_le_read_buffer_size *rp = (void *)buf->data;
2822 
2823 	LOG_DBG("status 0x%02x", rp->status);
2824 
2825 #if defined(CONFIG_BT_CONN)
2826 	uint16_t acl_mtu = sys_le16_to_cpu(rp->le_max_len);
2827 
2828 	if (!acl_mtu || !rp->le_max_num) {
2829 		return;
2830 	}
2831 
2832 	bt_dev.le.acl_mtu = acl_mtu;
2833 
2834 	LOG_DBG("ACL LE buffers: pkts %u mtu %u", rp->le_max_num, bt_dev.le.acl_mtu);
2835 
2836 	k_sem_init(&bt_dev.le.acl_pkts, rp->le_max_num, rp->le_max_num);
2837 #endif /* CONFIG_BT_CONN */
2838 }
2839 
read_buffer_size_v2_complete(struct net_buf * buf)2840 static void read_buffer_size_v2_complete(struct net_buf *buf)
2841 {
2842 #if defined(CONFIG_BT_ISO)
2843 	struct bt_hci_rp_le_read_buffer_size_v2 *rp = (void *)buf->data;
2844 
2845 	LOG_DBG("status %u", rp->status);
2846 
2847 #if defined(CONFIG_BT_CONN)
2848 	uint16_t acl_mtu = sys_le16_to_cpu(rp->acl_max_len);
2849 
2850 	if (acl_mtu && rp->acl_max_num) {
2851 		bt_dev.le.acl_mtu = acl_mtu;
2852 		LOG_DBG("ACL LE buffers: pkts %u mtu %u", rp->acl_max_num, bt_dev.le.acl_mtu);
2853 
2854 		k_sem_init(&bt_dev.le.acl_pkts, rp->acl_max_num, rp->acl_max_num);
2855 	}
2856 #endif /* CONFIG_BT_CONN */
2857 
2858 	uint16_t iso_mtu = sys_le16_to_cpu(rp->iso_max_len);
2859 
2860 	if (!iso_mtu || !rp->iso_max_num) {
2861 		LOG_ERR("ISO buffer size not set");
2862 		return;
2863 	}
2864 
2865 	bt_dev.le.iso_mtu = iso_mtu;
2866 
2867 	LOG_DBG("ISO buffers: pkts %u mtu %u", rp->iso_max_num, bt_dev.le.iso_mtu);
2868 
2869 	k_sem_init(&bt_dev.le.iso_pkts, rp->iso_max_num, rp->iso_max_num);
2870 	bt_dev.le.iso_limit = rp->iso_max_num;
2871 #endif /* CONFIG_BT_ISO */
2872 }
2873 
le_set_host_feature(uint8_t bit_number,uint8_t bit_value)2874 static int le_set_host_feature(uint8_t bit_number, uint8_t bit_value)
2875 {
2876 	struct bt_hci_cp_le_set_host_feature *cp;
2877 	struct net_buf *buf;
2878 
2879 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_HOST_FEATURE, sizeof(*cp));
2880 	if (!buf) {
2881 		return -ENOBUFS;
2882 	}
2883 
2884 	cp = net_buf_add(buf, sizeof(*cp));
2885 	cp->bit_number = bit_number;
2886 	cp->bit_value = bit_value;
2887 
2888 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_HOST_FEATURE, buf, NULL);
2889 }
2890 
read_supported_commands_complete(struct net_buf * buf)2891 static void read_supported_commands_complete(struct net_buf *buf)
2892 {
2893 	struct bt_hci_rp_read_supported_commands *rp = (void *)buf->data;
2894 
2895 	LOG_DBG("status 0x%02x", rp->status);
2896 
2897 	memcpy(bt_dev.supported_commands, rp->commands,
2898 	       sizeof(bt_dev.supported_commands));
2899 
2900 	/* Report additional HCI commands used for ECDH as
2901 	 * supported if TinyCrypt ECC is used for emulation.
2902 	 */
2903 	if (IS_ENABLED(CONFIG_BT_TINYCRYPT_ECC)) {
2904 		bt_hci_ecc_supported_commands(bt_dev.supported_commands);
2905 	}
2906 }
2907 
read_local_features_complete(struct net_buf * buf)2908 static void read_local_features_complete(struct net_buf *buf)
2909 {
2910 	struct bt_hci_rp_read_local_features *rp = (void *)buf->data;
2911 
2912 	LOG_DBG("status 0x%02x", rp->status);
2913 
2914 	memcpy(bt_dev.features[0], rp->features, sizeof(bt_dev.features[0]));
2915 }
2916 
le_read_supp_states_complete(struct net_buf * buf)2917 static void le_read_supp_states_complete(struct net_buf *buf)
2918 {
2919 	struct bt_hci_rp_le_read_supp_states *rp = (void *)buf->data;
2920 
2921 	LOG_DBG("status 0x%02x", rp->status);
2922 
2923 	bt_dev.le.states = sys_get_le64(rp->le_states);
2924 }
2925 
2926 #if defined(CONFIG_BT_SMP)
le_read_resolving_list_size_complete(struct net_buf * buf)2927 static void le_read_resolving_list_size_complete(struct net_buf *buf)
2928 {
2929 	struct bt_hci_rp_le_read_rl_size *rp = (void *)buf->data;
2930 
2931 	LOG_DBG("Resolving List size %u", rp->rl_size);
2932 
2933 	bt_dev.le.rl_size = rp->rl_size;
2934 }
2935 #endif /* defined(CONFIG_BT_SMP) */
2936 
common_init(void)2937 static int common_init(void)
2938 {
2939 	struct net_buf *rsp;
2940 	int err;
2941 
2942 	if (!(bt_dev.drv->quirks & BT_QUIRK_NO_RESET)) {
2943 		/* Send HCI_RESET */
2944 		err = bt_hci_cmd_send_sync(BT_HCI_OP_RESET, NULL, &rsp);
2945 		if (err) {
2946 			return err;
2947 		}
2948 		hci_reset_complete(rsp);
2949 		net_buf_unref(rsp);
2950 	}
2951 
2952 	/* Read Local Supported Features */
2953 	err = bt_hci_cmd_send_sync(BT_HCI_OP_READ_LOCAL_FEATURES, NULL, &rsp);
2954 	if (err) {
2955 		return err;
2956 	}
2957 	read_local_features_complete(rsp);
2958 	net_buf_unref(rsp);
2959 
2960 	/* Read Local Version Information */
2961 	err = bt_hci_cmd_send_sync(BT_HCI_OP_READ_LOCAL_VERSION_INFO, NULL,
2962 				   &rsp);
2963 	if (err) {
2964 		return err;
2965 	}
2966 	read_local_ver_complete(rsp);
2967 	net_buf_unref(rsp);
2968 
2969 	/* Read Local Supported Commands */
2970 	err = bt_hci_cmd_send_sync(BT_HCI_OP_READ_SUPPORTED_COMMANDS, NULL,
2971 				   &rsp);
2972 	if (err) {
2973 		return err;
2974 	}
2975 	read_supported_commands_complete(rsp);
2976 	net_buf_unref(rsp);
2977 
2978 	if (IS_ENABLED(CONFIG_BT_HOST_CRYPTO_PRNG)) {
2979 		/* Initialize the PRNG so that it is safe to use it later
2980 		 * on in the initialization process.
2981 		 */
2982 		err = prng_init();
2983 		if (err) {
2984 			return err;
2985 		}
2986 	}
2987 
2988 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
2989 	err = set_flow_control();
2990 	if (err) {
2991 		return err;
2992 	}
2993 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
2994 
2995 	return 0;
2996 }
2997 
le_set_event_mask(void)2998 static int le_set_event_mask(void)
2999 {
3000 	struct bt_hci_cp_le_set_event_mask *cp_mask;
3001 	struct net_buf *buf;
3002 	uint64_t mask = 0U;
3003 
3004 	/* Set LE event mask */
3005 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_EVENT_MASK, sizeof(*cp_mask));
3006 	if (!buf) {
3007 		return -ENOBUFS;
3008 	}
3009 
3010 	cp_mask = net_buf_add(buf, sizeof(*cp_mask));
3011 
3012 	mask |= BT_EVT_MASK_LE_ADVERTISING_REPORT;
3013 
3014 	if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
3015 	    BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
3016 		mask |= BT_EVT_MASK_LE_ADV_SET_TERMINATED;
3017 		mask |= BT_EVT_MASK_LE_SCAN_REQ_RECEIVED;
3018 		mask |= BT_EVT_MASK_LE_EXT_ADVERTISING_REPORT;
3019 		mask |= BT_EVT_MASK_LE_SCAN_TIMEOUT;
3020 		if (IS_ENABLED(CONFIG_BT_PER_ADV_SYNC)) {
3021 			mask |= BT_EVT_MASK_LE_PER_ADV_SYNC_ESTABLISHED;
3022 			mask |= BT_EVT_MASK_LE_PER_ADVERTISING_REPORT;
3023 			mask |= BT_EVT_MASK_LE_PER_ADV_SYNC_LOST;
3024 			mask |= BT_EVT_MASK_LE_PAST_RECEIVED;
3025 		}
3026 	}
3027 
3028 	if (IS_ENABLED(CONFIG_BT_CONN)) {
3029 		if ((IS_ENABLED(CONFIG_BT_SMP) &&
3030 		     BT_FEAT_LE_PRIVACY(bt_dev.le.features)) ||
3031 		    (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
3032 		     BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
3033 			/* C24:
3034 			 * Mandatory if the LE Controller supports Connection
3035 			 * State and either LE Feature (LL Privacy) or
3036 			 * LE Feature (Extended Advertising) is supported, ...
3037 			 */
3038 			mask |= BT_EVT_MASK_LE_ENH_CONN_COMPLETE;
3039 		} else {
3040 			mask |= BT_EVT_MASK_LE_CONN_COMPLETE;
3041 		}
3042 
3043 		mask |= BT_EVT_MASK_LE_CONN_UPDATE_COMPLETE;
3044 		mask |= BT_EVT_MASK_LE_REMOTE_FEAT_COMPLETE;
3045 
3046 		if (BT_FEAT_LE_CONN_PARAM_REQ_PROC(bt_dev.le.features)) {
3047 			mask |= BT_EVT_MASK_LE_CONN_PARAM_REQ;
3048 		}
3049 
3050 		if (IS_ENABLED(CONFIG_BT_DATA_LEN_UPDATE) &&
3051 		    BT_FEAT_LE_DLE(bt_dev.le.features)) {
3052 			mask |= BT_EVT_MASK_LE_DATA_LEN_CHANGE;
3053 		}
3054 
3055 		if (IS_ENABLED(CONFIG_BT_PHY_UPDATE) &&
3056 		    (BT_FEAT_LE_PHY_2M(bt_dev.le.features) ||
3057 		     BT_FEAT_LE_PHY_CODED(bt_dev.le.features))) {
3058 			mask |= BT_EVT_MASK_LE_PHY_UPDATE_COMPLETE;
3059 		}
3060 	}
3061 
3062 	if (IS_ENABLED(CONFIG_BT_SMP) &&
3063 	    BT_FEAT_LE_ENCR(bt_dev.le.features)) {
3064 		mask |= BT_EVT_MASK_LE_LTK_REQUEST;
3065 	}
3066 
3067 	/*
3068 	 * If "LE Read Local P-256 Public Key" and "LE Generate DH Key" are
3069 	 * supported we need to enable events generated by those commands.
3070 	 */
3071 	if (IS_ENABLED(CONFIG_BT_ECC) &&
3072 	    (BT_CMD_TEST(bt_dev.supported_commands, 34, 1)) &&
3073 	    (BT_CMD_TEST(bt_dev.supported_commands, 34, 2))) {
3074 		mask |= BT_EVT_MASK_LE_P256_PUBLIC_KEY_COMPLETE;
3075 		mask |= BT_EVT_MASK_LE_GENERATE_DHKEY_COMPLETE;
3076 	}
3077 
3078 	/*
3079 	 * Enable CIS events only if ISO connections are enabled and controller
3080 	 * support them.
3081 	 */
3082 	if (IS_ENABLED(CONFIG_BT_ISO) &&
3083 	    BT_FEAT_LE_CIS(bt_dev.le.features)) {
3084 		mask |= BT_EVT_MASK_LE_CIS_ESTABLISHED;
3085 		if (BT_FEAT_LE_CIS_PERIPHERAL(bt_dev.le.features)) {
3086 			mask |= BT_EVT_MASK_LE_CIS_REQ;
3087 		}
3088 	}
3089 
3090 	/* Enable BIS events for broadcaster and/or receiver */
3091 	if (IS_ENABLED(CONFIG_BT_ISO) && BT_FEAT_LE_BIS(bt_dev.le.features)) {
3092 		if (IS_ENABLED(CONFIG_BT_ISO_BROADCASTER) &&
3093 		    BT_FEAT_LE_ISO_BROADCASTER(bt_dev.le.features)) {
3094 			mask |= BT_EVT_MASK_LE_BIG_COMPLETE;
3095 			mask |= BT_EVT_MASK_LE_BIG_TERMINATED;
3096 		}
3097 		if (IS_ENABLED(CONFIG_BT_ISO_SYNC_RECEIVER) &&
3098 		    BT_FEAT_LE_SYNC_RECEIVER(bt_dev.le.features)) {
3099 			mask |= BT_EVT_MASK_LE_BIG_SYNC_ESTABLISHED;
3100 			mask |= BT_EVT_MASK_LE_BIG_SYNC_LOST;
3101 			mask |= BT_EVT_MASK_LE_BIGINFO_ADV_REPORT;
3102 		}
3103 	}
3104 
3105 	/* Enable IQ samples report events receiver */
3106 	if (IS_ENABLED(CONFIG_BT_DF_CONNECTIONLESS_CTE_RX)) {
3107 		mask |= BT_EVT_MASK_LE_CONNECTIONLESS_IQ_REPORT;
3108 	}
3109 
3110 	if (IS_ENABLED(CONFIG_BT_DF_CONNECTION_CTE_RX)) {
3111 		mask |= BT_EVT_MASK_LE_CONNECTION_IQ_REPORT;
3112 		mask |= BT_EVT_MASK_LE_CTE_REQUEST_FAILED;
3113 	}
3114 
3115 	if (IS_ENABLED(CONFIG_BT_PER_ADV_RSP)) {
3116 		mask |= BT_EVT_MASK_LE_PER_ADV_SUBEVENT_DATA_REQ;
3117 		mask |= BT_EVT_MASK_LE_PER_ADV_RESPONSE_REPORT;
3118 	}
3119 
3120 	if (IS_ENABLED(CONFIG_BT_PER_ADV_SYNC_RSP)) {
3121 		mask |= BT_EVT_MASK_LE_PER_ADVERTISING_REPORT_V2;
3122 		mask |= BT_EVT_MASK_LE_PER_ADV_SYNC_ESTABLISHED_V2;
3123 		mask |= BT_EVT_MASK_LE_PAST_RECEIVED_V2;
3124 	}
3125 
3126 	if (IS_ENABLED(CONFIG_BT_CONN) &&
3127 	    (IS_ENABLED(CONFIG_BT_PER_ADV_RSP) || IS_ENABLED(CONFIG_BT_PER_ADV_SYNC_RSP))) {
3128 		mask |= BT_EVT_MASK_LE_ENH_CONN_COMPLETE_V2;
3129 	}
3130 
3131 	sys_put_le64(mask, cp_mask->events);
3132 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_EVENT_MASK, buf, NULL);
3133 }
3134 
le_init_iso(void)3135 static int le_init_iso(void)
3136 {
3137 	int err;
3138 	struct net_buf *rsp;
3139 
3140 	if (IS_ENABLED(CONFIG_BT_ISO_UNICAST)) {
3141 		/* Set Connected Isochronous Streams - Host support */
3142 		err = le_set_host_feature(BT_LE_FEAT_BIT_ISO_CHANNELS, 1);
3143 		if (err) {
3144 			return err;
3145 		}
3146 	}
3147 
3148 	/* Octet 41, bit 5 is read buffer size V2 */
3149 	if (BT_CMD_TEST(bt_dev.supported_commands, 41, 5)) {
3150 		/* Read ISO Buffer Size V2 */
3151 		err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_BUFFER_SIZE_V2,
3152 					   NULL, &rsp);
3153 		if (err) {
3154 			return err;
3155 		}
3156 
3157 		read_buffer_size_v2_complete(rsp);
3158 
3159 		net_buf_unref(rsp);
3160 	} else if (IS_ENABLED(CONFIG_BT_CONN)) {
3161 		LOG_WRN("Read Buffer Size V2 command is not supported."
3162 			"No ISO buffers will be available");
3163 
3164 		/* Read LE Buffer Size */
3165 		err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_BUFFER_SIZE,
3166 					   NULL, &rsp);
3167 		if (err) {
3168 			return err;
3169 		}
3170 
3171 		le_read_buffer_size_complete(rsp);
3172 
3173 		net_buf_unref(rsp);
3174 	}
3175 
3176 	return 0;
3177 }
3178 
le_init(void)3179 static int le_init(void)
3180 {
3181 	struct bt_hci_cp_write_le_host_supp *cp_le;
3182 	struct net_buf *buf, *rsp;
3183 	int err;
3184 
3185 	/* For now we only support LE capable controllers */
3186 	if (!BT_FEAT_LE(bt_dev.features)) {
3187 		LOG_ERR("Non-LE capable controller detected!");
3188 		return -ENODEV;
3189 	}
3190 
3191 	/* Read Low Energy Supported Features */
3192 	err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_LOCAL_FEATURES, NULL,
3193 				   &rsp);
3194 	if (err) {
3195 		return err;
3196 	}
3197 
3198 	read_le_features_complete(rsp);
3199 	net_buf_unref(rsp);
3200 
3201 	if (IS_ENABLED(CONFIG_BT_ISO) &&
3202 	    BT_FEAT_LE_ISO(bt_dev.le.features)) {
3203 		err = le_init_iso();
3204 		if (err) {
3205 			return err;
3206 		}
3207 	} else if (IS_ENABLED(CONFIG_BT_CONN)) {
3208 		/* Read LE Buffer Size */
3209 		err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_BUFFER_SIZE,
3210 					   NULL, &rsp);
3211 		if (err) {
3212 			return err;
3213 		}
3214 
3215 		le_read_buffer_size_complete(rsp);
3216 
3217 		net_buf_unref(rsp);
3218 	}
3219 
3220 	if (BT_FEAT_BREDR(bt_dev.features)) {
3221 		buf = bt_hci_cmd_create(BT_HCI_OP_LE_WRITE_LE_HOST_SUPP,
3222 					sizeof(*cp_le));
3223 		if (!buf) {
3224 			return -ENOBUFS;
3225 		}
3226 
3227 		cp_le = net_buf_add(buf, sizeof(*cp_le));
3228 
3229 		/* Explicitly enable LE for dual-mode controllers */
3230 		cp_le->le = 0x01;
3231 		cp_le->simul = 0x00;
3232 		err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_WRITE_LE_HOST_SUPP, buf,
3233 					   NULL);
3234 		if (err) {
3235 			return err;
3236 		}
3237 	}
3238 
3239 	/* Read LE Supported States */
3240 	if (BT_CMD_LE_STATES(bt_dev.supported_commands)) {
3241 		err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_SUPP_STATES, NULL,
3242 					   &rsp);
3243 		if (err) {
3244 			return err;
3245 		}
3246 
3247 		le_read_supp_states_complete(rsp);
3248 		net_buf_unref(rsp);
3249 	}
3250 
3251 	if (IS_ENABLED(CONFIG_BT_CONN) &&
3252 	    IS_ENABLED(CONFIG_BT_DATA_LEN_UPDATE) &&
3253 	    IS_ENABLED(CONFIG_BT_AUTO_DATA_LEN_UPDATE) &&
3254 	    BT_FEAT_LE_DLE(bt_dev.le.features)) {
3255 		struct bt_hci_cp_le_write_default_data_len *cp;
3256 		uint16_t tx_octets, tx_time;
3257 
3258 		err = hci_le_read_max_data_len(&tx_octets, &tx_time);
3259 		if (err) {
3260 			return err;
3261 		}
3262 
3263 		buf = bt_hci_cmd_create(BT_HCI_OP_LE_WRITE_DEFAULT_DATA_LEN,
3264 					sizeof(*cp));
3265 		if (!buf) {
3266 			return -ENOBUFS;
3267 		}
3268 
3269 		cp = net_buf_add(buf, sizeof(*cp));
3270 		cp->max_tx_octets = sys_cpu_to_le16(tx_octets);
3271 		cp->max_tx_time = sys_cpu_to_le16(tx_time);
3272 
3273 		err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_WRITE_DEFAULT_DATA_LEN,
3274 					   buf, NULL);
3275 		if (err) {
3276 			return err;
3277 		}
3278 	}
3279 
3280 #if defined(CONFIG_BT_SMP)
3281 	if (BT_FEAT_LE_PRIVACY(bt_dev.le.features)) {
3282 #if defined(CONFIG_BT_PRIVACY)
3283 		struct bt_hci_cp_le_set_rpa_timeout *cp;
3284 
3285 		buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_RPA_TIMEOUT,
3286 					sizeof(*cp));
3287 		if (!buf) {
3288 			return -ENOBUFS;
3289 		}
3290 
3291 		cp = net_buf_add(buf, sizeof(*cp));
3292 		cp->rpa_timeout = sys_cpu_to_le16(bt_dev.rpa_timeout);
3293 		err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_RPA_TIMEOUT, buf,
3294 					   NULL);
3295 		if (err) {
3296 			return err;
3297 		}
3298 #endif /* defined(CONFIG_BT_PRIVACY) */
3299 
3300 		err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_RL_SIZE, NULL,
3301 					   &rsp);
3302 		if (err) {
3303 			return err;
3304 		}
3305 		le_read_resolving_list_size_complete(rsp);
3306 		net_buf_unref(rsp);
3307 	}
3308 #endif
3309 
3310 #if defined(CONFIG_BT_DF)
3311 	if (BT_FEAT_LE_CONNECTIONLESS_CTE_TX(bt_dev.le.features) ||
3312 	    BT_FEAT_LE_CONNECTIONLESS_CTE_RX(bt_dev.le.features) ||
3313 	    BT_FEAT_LE_RX_CTE(bt_dev.le.features)) {
3314 		err = le_df_init();
3315 		if (err) {
3316 			return err;
3317 		}
3318 	}
3319 #endif /* CONFIG_BT_DF */
3320 
3321 	return  le_set_event_mask();
3322 }
3323 
3324 #if !defined(CONFIG_BT_BREDR)
bt_br_init(void)3325 static int bt_br_init(void)
3326 {
3327 #if defined(CONFIG_BT_CONN)
3328 	struct net_buf *rsp;
3329 	int err;
3330 
3331 	if (bt_dev.le.acl_mtu) {
3332 		return 0;
3333 	}
3334 
3335 	/* Use BR/EDR buffer size if LE reports zero buffers */
3336 	err = bt_hci_cmd_send_sync(BT_HCI_OP_READ_BUFFER_SIZE, NULL, &rsp);
3337 	if (err) {
3338 		return err;
3339 	}
3340 
3341 	read_buffer_size_complete(rsp);
3342 	net_buf_unref(rsp);
3343 #endif /* CONFIG_BT_CONN */
3344 
3345 	return 0;
3346 }
3347 #endif /* !defined(CONFIG_BT_BREDR) */
3348 
set_event_mask(void)3349 static int set_event_mask(void)
3350 {
3351 	struct bt_hci_cp_set_event_mask *ev;
3352 	struct net_buf *buf;
3353 	uint64_t mask = 0U;
3354 
3355 	buf = bt_hci_cmd_create(BT_HCI_OP_SET_EVENT_MASK, sizeof(*ev));
3356 	if (!buf) {
3357 		return -ENOBUFS;
3358 	}
3359 
3360 	ev = net_buf_add(buf, sizeof(*ev));
3361 
3362 	if (IS_ENABLED(CONFIG_BT_BREDR)) {
3363 		/* Since we require LE support, we can count on a
3364 		 * Bluetooth 4.0 feature set
3365 		 */
3366 		mask |= BT_EVT_MASK_INQUIRY_COMPLETE;
3367 		mask |= BT_EVT_MASK_CONN_COMPLETE;
3368 		mask |= BT_EVT_MASK_CONN_REQUEST;
3369 		mask |= BT_EVT_MASK_AUTH_COMPLETE;
3370 		mask |= BT_EVT_MASK_REMOTE_NAME_REQ_COMPLETE;
3371 		mask |= BT_EVT_MASK_REMOTE_FEATURES;
3372 		mask |= BT_EVT_MASK_ROLE_CHANGE;
3373 		mask |= BT_EVT_MASK_PIN_CODE_REQ;
3374 		mask |= BT_EVT_MASK_LINK_KEY_REQ;
3375 		mask |= BT_EVT_MASK_LINK_KEY_NOTIFY;
3376 		mask |= BT_EVT_MASK_INQUIRY_RESULT_WITH_RSSI;
3377 		mask |= BT_EVT_MASK_REMOTE_EXT_FEATURES;
3378 		mask |= BT_EVT_MASK_SYNC_CONN_COMPLETE;
3379 		mask |= BT_EVT_MASK_EXTENDED_INQUIRY_RESULT;
3380 		mask |= BT_EVT_MASK_IO_CAPA_REQ;
3381 		mask |= BT_EVT_MASK_IO_CAPA_RESP;
3382 		mask |= BT_EVT_MASK_USER_CONFIRM_REQ;
3383 		mask |= BT_EVT_MASK_USER_PASSKEY_REQ;
3384 		mask |= BT_EVT_MASK_SSP_COMPLETE;
3385 		mask |= BT_EVT_MASK_USER_PASSKEY_NOTIFY;
3386 	}
3387 
3388 	mask |= BT_EVT_MASK_HARDWARE_ERROR;
3389 	mask |= BT_EVT_MASK_DATA_BUFFER_OVERFLOW;
3390 	mask |= BT_EVT_MASK_LE_META_EVENT;
3391 
3392 	if (IS_ENABLED(CONFIG_BT_CONN)) {
3393 		mask |= BT_EVT_MASK_DISCONN_COMPLETE;
3394 		mask |= BT_EVT_MASK_REMOTE_VERSION_INFO;
3395 	}
3396 
3397 	if (IS_ENABLED(CONFIG_BT_SMP) &&
3398 	    BT_FEAT_LE_ENCR(bt_dev.le.features)) {
3399 		mask |= BT_EVT_MASK_ENCRYPT_CHANGE;
3400 		mask |= BT_EVT_MASK_ENCRYPT_KEY_REFRESH_COMPLETE;
3401 	}
3402 
3403 	sys_put_le64(mask, ev->events);
3404 	return bt_hci_cmd_send_sync(BT_HCI_OP_SET_EVENT_MASK, buf, NULL);
3405 }
3406 
ver_str(uint8_t ver)3407 static const char *ver_str(uint8_t ver)
3408 {
3409 	const char * const str[] = {
3410 		"1.0b", "1.1", "1.2", "2.0", "2.1", "3.0", "4.0", "4.1", "4.2",
3411 		"5.0", "5.1", "5.2", "5.3", "5.4"
3412 	};
3413 
3414 	if (ver < ARRAY_SIZE(str)) {
3415 		return str[ver];
3416 	}
3417 
3418 	return "unknown";
3419 }
3420 
bt_dev_show_info(void)3421 static void bt_dev_show_info(void)
3422 {
3423 	int i;
3424 
3425 	LOG_INF("Identity%s: %s", bt_dev.id_count > 1 ? "[0]" : "",
3426 		bt_addr_le_str(&bt_dev.id_addr[0]));
3427 
3428 	if (IS_ENABLED(CONFIG_BT_LOG_SNIFFER_INFO)) {
3429 #if defined(CONFIG_BT_PRIVACY)
3430 		uint8_t irk[16];
3431 
3432 		sys_memcpy_swap(irk, bt_dev.irk[0], 16);
3433 		LOG_INF("IRK%s: 0x%s", bt_dev.id_count > 1 ? "[0]" : "", bt_hex(irk, 16));
3434 #endif
3435 	}
3436 
3437 	for (i = 1; i < bt_dev.id_count; i++) {
3438 		LOG_INF("Identity[%d]: %s", i, bt_addr_le_str(&bt_dev.id_addr[i]));
3439 
3440 		if (IS_ENABLED(CONFIG_BT_LOG_SNIFFER_INFO)) {
3441 #if defined(CONFIG_BT_PRIVACY)
3442 			uint8_t irk[16];
3443 
3444 			sys_memcpy_swap(irk, bt_dev.irk[i], 16);
3445 			LOG_INF("IRK[%d]: 0x%s", i, bt_hex(irk, 16));
3446 #endif
3447 		}
3448 	}
3449 
3450 	if (IS_ENABLED(CONFIG_BT_SMP) &&
3451 	    IS_ENABLED(CONFIG_BT_LOG_SNIFFER_INFO)) {
3452 		bt_keys_foreach_type(BT_KEYS_ALL, bt_keys_show_sniffer_info, NULL);
3453 	}
3454 
3455 	LOG_INF("HCI: version %s (0x%02x) revision 0x%04x, manufacturer 0x%04x",
3456 		ver_str(bt_dev.hci_version), bt_dev.hci_version, bt_dev.hci_revision,
3457 		bt_dev.manufacturer);
3458 	LOG_INF("LMP: version %s (0x%02x) subver 0x%04x", ver_str(bt_dev.lmp_version),
3459 		bt_dev.lmp_version, bt_dev.lmp_subversion);
3460 }
3461 
3462 #if defined(CONFIG_BT_HCI_VS_EXT)
vs_hw_platform(uint16_t platform)3463 static const char *vs_hw_platform(uint16_t platform)
3464 {
3465 	static const char * const plat_str[] = {
3466 		"reserved", "Intel Corporation", "Nordic Semiconductor",
3467 		"NXP Semiconductors" };
3468 
3469 	if (platform < ARRAY_SIZE(plat_str)) {
3470 		return plat_str[platform];
3471 	}
3472 
3473 	return "unknown";
3474 }
3475 
vs_hw_variant(uint16_t platform,uint16_t variant)3476 static const char *vs_hw_variant(uint16_t platform, uint16_t variant)
3477 {
3478 	static const char * const nordic_str[] = {
3479 		"reserved", "nRF51x", "nRF52x", "nRF53x"
3480 	};
3481 
3482 	if (platform != BT_HCI_VS_HW_PLAT_NORDIC) {
3483 		return "unknown";
3484 	}
3485 
3486 	if (variant < ARRAY_SIZE(nordic_str)) {
3487 		return nordic_str[variant];
3488 	}
3489 
3490 	return "unknown";
3491 }
3492 
vs_fw_variant(uint8_t variant)3493 static const char *vs_fw_variant(uint8_t variant)
3494 {
3495 	static const char * const var_str[] = {
3496 		"Standard Bluetooth controller",
3497 		"Vendor specific controller",
3498 		"Firmware loader",
3499 		"Rescue image",
3500 	};
3501 
3502 	if (variant < ARRAY_SIZE(var_str)) {
3503 		return var_str[variant];
3504 	}
3505 
3506 	return "unknown";
3507 }
3508 
hci_vs_init(void)3509 static void hci_vs_init(void)
3510 {
3511 	union {
3512 		struct bt_hci_rp_vs_read_version_info *info;
3513 		struct bt_hci_rp_vs_read_supported_commands *cmds;
3514 		struct bt_hci_rp_vs_read_supported_features *feat;
3515 	} rp;
3516 	struct net_buf *rsp;
3517 	int err;
3518 
3519 	/* If heuristics is enabled, try to guess HCI VS support by looking
3520 	 * at the HCI version and identity address. We haven't set any addresses
3521 	 * at this point. So we need to read the public address.
3522 	 */
3523 	if (IS_ENABLED(CONFIG_BT_HCI_VS_EXT_DETECT)) {
3524 		bt_addr_le_t addr;
3525 
3526 		if ((bt_dev.hci_version < BT_HCI_VERSION_5_0) ||
3527 		    bt_id_read_public_addr(&addr)) {
3528 			LOG_WRN("Controller doesn't seem to support "
3529 				"Zephyr vendor HCI");
3530 			return;
3531 		}
3532 	}
3533 
3534 	err = bt_hci_cmd_send_sync(BT_HCI_OP_VS_READ_VERSION_INFO, NULL, &rsp);
3535 	if (err) {
3536 		LOG_WRN("Vendor HCI extensions not available");
3537 		return;
3538 	}
3539 
3540 	if (IS_ENABLED(CONFIG_BT_HCI_VS_EXT_DETECT) &&
3541 	    rsp->len != sizeof(struct bt_hci_rp_vs_read_version_info)) {
3542 		LOG_WRN("Invalid Vendor HCI extensions");
3543 		net_buf_unref(rsp);
3544 		return;
3545 	}
3546 
3547 	rp.info = (void *)rsp->data;
3548 	LOG_INF("HW Platform: %s (0x%04x)", vs_hw_platform(sys_le16_to_cpu(rp.info->hw_platform)),
3549 		sys_le16_to_cpu(rp.info->hw_platform));
3550 	LOG_INF("HW Variant: %s (0x%04x)",
3551 		vs_hw_variant(sys_le16_to_cpu(rp.info->hw_platform),
3552 			      sys_le16_to_cpu(rp.info->hw_variant)),
3553 		sys_le16_to_cpu(rp.info->hw_variant));
3554 	LOG_INF("Firmware: %s (0x%02x) Version %u.%u Build %u", vs_fw_variant(rp.info->fw_variant),
3555 		rp.info->fw_variant, rp.info->fw_version, sys_le16_to_cpu(rp.info->fw_revision),
3556 		sys_le32_to_cpu(rp.info->fw_build));
3557 
3558 	net_buf_unref(rsp);
3559 
3560 	err = bt_hci_cmd_send_sync(BT_HCI_OP_VS_READ_SUPPORTED_COMMANDS,
3561 				   NULL, &rsp);
3562 	if (err) {
3563 		LOG_WRN("Failed to read supported vendor commands");
3564 		return;
3565 	}
3566 
3567 	if (IS_ENABLED(CONFIG_BT_HCI_VS_EXT_DETECT) &&
3568 	    rsp->len != sizeof(struct bt_hci_rp_vs_read_supported_commands)) {
3569 		LOG_WRN("Invalid Vendor HCI extensions");
3570 		net_buf_unref(rsp);
3571 		return;
3572 	}
3573 
3574 	rp.cmds = (void *)rsp->data;
3575 	memcpy(bt_dev.vs_commands, rp.cmds->commands, BT_DEV_VS_CMDS_MAX);
3576 	net_buf_unref(rsp);
3577 
3578 	if (BT_VS_CMD_SUP_FEAT(bt_dev.vs_commands)) {
3579 		err = bt_hci_cmd_send_sync(BT_HCI_OP_VS_READ_SUPPORTED_FEATURES,
3580 					   NULL, &rsp);
3581 		if (err) {
3582 			LOG_WRN("Failed to read supported vendor features");
3583 			return;
3584 		}
3585 
3586 		if (IS_ENABLED(CONFIG_BT_HCI_VS_EXT_DETECT) &&
3587 		    rsp->len !=
3588 		    sizeof(struct bt_hci_rp_vs_read_supported_features)) {
3589 			LOG_WRN("Invalid Vendor HCI extensions");
3590 			net_buf_unref(rsp);
3591 			return;
3592 		}
3593 
3594 		rp.feat = (void *)rsp->data;
3595 		memcpy(bt_dev.vs_features, rp.feat->features,
3596 		       BT_DEV_VS_FEAT_MAX);
3597 		net_buf_unref(rsp);
3598 	}
3599 }
3600 #endif /* CONFIG_BT_HCI_VS_EXT */
3601 
hci_init(void)3602 static int hci_init(void)
3603 {
3604 	int err;
3605 #if defined(CONFIG_BT_HCI_SETUP)
3606 	if (bt_dev.drv->setup) {
3607 		err = bt_dev.drv->setup();
3608 		if (err) {
3609 			return err;
3610 		}
3611 	}
3612 #endif /* defined(CONFIG_BT_HCI_SETUP) */
3613 
3614 	err = common_init();
3615 	if (err) {
3616 		return err;
3617 	}
3618 
3619 	err = le_init();
3620 	if (err) {
3621 		return err;
3622 	}
3623 
3624 	if (BT_FEAT_BREDR(bt_dev.features)) {
3625 		err = bt_br_init();
3626 		if (err) {
3627 			return err;
3628 		}
3629 	} else if (IS_ENABLED(CONFIG_BT_BREDR)) {
3630 		LOG_ERR("Non-BR/EDR controller detected");
3631 		return -EIO;
3632 	}
3633 #if defined(CONFIG_BT_CONN)
3634 	else if (!bt_dev.le.acl_mtu) {
3635 		LOG_ERR("ACL BR/EDR buffers not initialized");
3636 		return -EIO;
3637 	}
3638 #endif
3639 
3640 	err = set_event_mask();
3641 	if (err) {
3642 		return err;
3643 	}
3644 
3645 #if defined(CONFIG_BT_HCI_VS_EXT)
3646 	hci_vs_init();
3647 #endif
3648 	err = bt_id_init();
3649 	if (err) {
3650 		return err;
3651 	}
3652 
3653 	return 0;
3654 }
3655 
bt_send(struct net_buf * buf)3656 int bt_send(struct net_buf *buf)
3657 {
3658 	LOG_DBG("buf %p len %u type %u", buf, buf->len, bt_buf_get_type(buf));
3659 
3660 	bt_monitor_send(bt_monitor_opcode(buf), buf->data, buf->len);
3661 
3662 	if (IS_ENABLED(CONFIG_BT_TINYCRYPT_ECC)) {
3663 		return bt_hci_ecc_send(buf);
3664 	}
3665 
3666 	return bt_dev.drv->send(buf);
3667 }
3668 
3669 static const struct event_handler prio_events[] = {
3670 	EVENT_HANDLER(BT_HCI_EVT_CMD_COMPLETE, hci_cmd_complete,
3671 		      sizeof(struct bt_hci_evt_cmd_complete)),
3672 	EVENT_HANDLER(BT_HCI_EVT_CMD_STATUS, hci_cmd_status,
3673 		      sizeof(struct bt_hci_evt_cmd_status)),
3674 #if defined(CONFIG_BT_CONN)
3675 	EVENT_HANDLER(BT_HCI_EVT_DATA_BUF_OVERFLOW,
3676 		      hci_data_buf_overflow,
3677 		      sizeof(struct bt_hci_evt_data_buf_overflow)),
3678 	EVENT_HANDLER(BT_HCI_EVT_DISCONN_COMPLETE, hci_disconn_complete_prio,
3679 		      sizeof(struct bt_hci_evt_disconn_complete)),
3680 #endif /* CONFIG_BT_CONN */
3681 #if defined(CONFIG_BT_CONN_TX)
3682 	EVENT_HANDLER(BT_HCI_EVT_NUM_COMPLETED_PACKETS,
3683 		      hci_num_completed_packets,
3684 		      sizeof(struct bt_hci_evt_num_completed_packets)),
3685 #endif /* CONFIG_BT_CONN_TX */
3686 };
3687 
hci_event_prio(struct net_buf * buf)3688 void hci_event_prio(struct net_buf *buf)
3689 {
3690 	struct net_buf_simple_state state;
3691 	struct bt_hci_evt_hdr *hdr;
3692 	uint8_t evt_flags;
3693 
3694 	net_buf_simple_save(&buf->b, &state);
3695 
3696 	BT_ASSERT(buf->len >= sizeof(*hdr));
3697 
3698 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
3699 	evt_flags = bt_hci_evt_get_flags(hdr->evt);
3700 	BT_ASSERT(evt_flags & BT_HCI_EVT_FLAG_RECV_PRIO);
3701 
3702 	handle_event(hdr->evt, buf, prio_events, ARRAY_SIZE(prio_events));
3703 
3704 	if (evt_flags & BT_HCI_EVT_FLAG_RECV) {
3705 		net_buf_simple_restore(&buf->b, &state);
3706 	} else {
3707 		net_buf_unref(buf);
3708 	}
3709 }
3710 
3711 #if !defined(CONFIG_BT_RECV_BLOCKING)
rx_queue_put(struct net_buf * buf)3712 static void rx_queue_put(struct net_buf *buf)
3713 {
3714 	net_buf_slist_put(&bt_dev.rx_queue, buf);
3715 
3716 #if defined(CONFIG_BT_RECV_WORKQ_SYS)
3717 	const int err = k_work_submit(&rx_work);
3718 #elif defined(CONFIG_BT_RECV_WORKQ_BT)
3719 	const int err = k_work_submit_to_queue(&bt_workq, &rx_work);
3720 #endif /* CONFIG_BT_RECV_WORKQ_SYS */
3721 	if (err < 0) {
3722 		LOG_ERR("Could not submit rx_work: %d", err);
3723 	}
3724 }
3725 #endif /* !CONFIG_BT_RECV_BLOCKING */
3726 
bt_recv(struct net_buf * buf)3727 int bt_recv(struct net_buf *buf)
3728 {
3729 	bt_monitor_send(bt_monitor_opcode(buf), buf->data, buf->len);
3730 
3731 	LOG_DBG("buf %p len %u", buf, buf->len);
3732 
3733 	switch (bt_buf_get_type(buf)) {
3734 #if defined(CONFIG_BT_CONN)
3735 	case BT_BUF_ACL_IN:
3736 #if defined(CONFIG_BT_RECV_BLOCKING)
3737 		hci_acl(buf);
3738 #else
3739 		rx_queue_put(buf);
3740 #endif
3741 		return 0;
3742 #endif /* BT_CONN */
3743 	case BT_BUF_EVT:
3744 	{
3745 #if defined(CONFIG_BT_RECV_BLOCKING)
3746 		hci_event(buf);
3747 #else
3748 		struct bt_hci_evt_hdr *hdr = (void *)buf->data;
3749 		uint8_t evt_flags = bt_hci_evt_get_flags(hdr->evt);
3750 
3751 		if (evt_flags & BT_HCI_EVT_FLAG_RECV_PRIO) {
3752 			hci_event_prio(buf);
3753 		}
3754 
3755 		if (evt_flags & BT_HCI_EVT_FLAG_RECV) {
3756 			rx_queue_put(buf);
3757 		}
3758 #endif
3759 		return 0;
3760 
3761 	}
3762 #if defined(CONFIG_BT_ISO)
3763 	case BT_BUF_ISO_IN:
3764 #if defined(CONFIG_BT_RECV_BLOCKING)
3765 		hci_iso(buf);
3766 #else
3767 		rx_queue_put(buf);
3768 #endif
3769 		return 0;
3770 #endif /* CONFIG_BT_ISO */
3771 	default:
3772 		LOG_ERR("Invalid buf type %u", bt_buf_get_type(buf));
3773 		net_buf_unref(buf);
3774 		return -EINVAL;
3775 	}
3776 }
3777 
bt_recv_prio(struct net_buf * buf)3778 int bt_recv_prio(struct net_buf *buf)
3779 {
3780 	bt_monitor_send(bt_monitor_opcode(buf), buf->data, buf->len);
3781 
3782 	BT_ASSERT(bt_buf_get_type(buf) == BT_BUF_EVT);
3783 
3784 	hci_event_prio(buf);
3785 
3786 	return 0;
3787 }
3788 
bt_hci_driver_register(const struct bt_hci_driver * drv)3789 int bt_hci_driver_register(const struct bt_hci_driver *drv)
3790 {
3791 	if (bt_dev.drv) {
3792 		return -EALREADY;
3793 	}
3794 
3795 	if (!drv->open || !drv->send) {
3796 		return -EINVAL;
3797 	}
3798 
3799 	bt_dev.drv = drv;
3800 
3801 	LOG_DBG("Registered %s", drv->name ? drv->name : "");
3802 
3803 	bt_monitor_new_index(BT_MONITOR_TYPE_PRIMARY, drv->bus,
3804 			     BT_ADDR_ANY, drv->name ? drv->name : "bt0");
3805 
3806 	return 0;
3807 }
3808 
bt_finalize_init(void)3809 void bt_finalize_init(void)
3810 {
3811 	atomic_set_bit(bt_dev.flags, BT_DEV_READY);
3812 
3813 	if (IS_ENABLED(CONFIG_BT_OBSERVER)) {
3814 		bt_le_scan_update(false);
3815 	}
3816 
3817 	bt_dev_show_info();
3818 }
3819 
bt_init(void)3820 static int bt_init(void)
3821 {
3822 	int err;
3823 
3824 	err = hci_init();
3825 	if (err) {
3826 		return err;
3827 	}
3828 
3829 	if (IS_ENABLED(CONFIG_BT_CONN)) {
3830 		err = bt_conn_init();
3831 		if (err) {
3832 			return err;
3833 		}
3834 	}
3835 
3836 	if (IS_ENABLED(CONFIG_BT_ISO)) {
3837 		err = bt_conn_iso_init();
3838 		if (err) {
3839 			return err;
3840 		}
3841 	}
3842 
3843 	if (IS_ENABLED(CONFIG_BT_SETTINGS)) {
3844 		if (!bt_dev.id_count) {
3845 			LOG_INF("No ID address. App must call settings_load()");
3846 			return 0;
3847 		}
3848 
3849 		atomic_set_bit(bt_dev.flags, BT_DEV_PRESET_ID);
3850 	}
3851 
3852 	bt_finalize_init();
3853 	return 0;
3854 }
3855 
init_work(struct k_work * work)3856 static void init_work(struct k_work *work)
3857 {
3858 	int err;
3859 
3860 	err = bt_init();
3861 	if (ready_cb) {
3862 		ready_cb(err);
3863 	}
3864 }
3865 
3866 #if !defined(CONFIG_BT_RECV_BLOCKING)
rx_work_handler(struct k_work * work)3867 static void rx_work_handler(struct k_work *work)
3868 {
3869 	int err;
3870 
3871 	struct net_buf *buf;
3872 
3873 	LOG_DBG("Getting net_buf from queue");
3874 	buf = net_buf_slist_get(&bt_dev.rx_queue);
3875 	if (!buf) {
3876 		return;
3877 	}
3878 
3879 	LOG_DBG("buf %p type %u len %u", buf, bt_buf_get_type(buf), buf->len);
3880 
3881 	switch (bt_buf_get_type(buf)) {
3882 #if defined(CONFIG_BT_CONN)
3883 	case BT_BUF_ACL_IN:
3884 		hci_acl(buf);
3885 		break;
3886 #endif /* CONFIG_BT_CONN */
3887 #if defined(CONFIG_BT_ISO)
3888 	case BT_BUF_ISO_IN:
3889 		hci_iso(buf);
3890 		break;
3891 #endif /* CONFIG_BT_ISO */
3892 	case BT_BUF_EVT:
3893 		hci_event(buf);
3894 		break;
3895 	default:
3896 		LOG_ERR("Unknown buf type %u", bt_buf_get_type(buf));
3897 		net_buf_unref(buf);
3898 		break;
3899 	}
3900 
3901 	/* Schedule the work handler to be executed again if there are
3902 	 * additional items in the queue. This allows for other users of the
3903 	 * work queue to get a chance at running, which wouldn't be possible if
3904 	 * we used a while() loop with a k_yield() statement.
3905 	 */
3906 	if (!sys_slist_is_empty(&bt_dev.rx_queue)) {
3907 
3908 #if defined(CONFIG_BT_RECV_WORKQ_SYS)
3909 		err = k_work_submit(&rx_work);
3910 #elif defined(CONFIG_BT_RECV_WORKQ_BT)
3911 		err = k_work_submit_to_queue(&bt_workq, &rx_work);
3912 #endif
3913 		if (err < 0) {
3914 			LOG_ERR("Could not submit rx_work: %d", err);
3915 		}
3916 	}
3917 }
3918 #endif /* !CONFIG_BT_RECV_BLOCKING */
3919 
bt_enable(bt_ready_cb_t cb)3920 int bt_enable(bt_ready_cb_t cb)
3921 {
3922 	int err;
3923 
3924 	if (!bt_dev.drv) {
3925 		LOG_ERR("No HCI driver registered");
3926 		return -ENODEV;
3927 	}
3928 
3929 	atomic_clear_bit(bt_dev.flags, BT_DEV_DISABLE);
3930 
3931 	if (atomic_test_and_set_bit(bt_dev.flags, BT_DEV_ENABLE)) {
3932 		return -EALREADY;
3933 	}
3934 
3935 	if (IS_ENABLED(CONFIG_BT_SETTINGS)) {
3936 		err = bt_settings_init();
3937 		if (err) {
3938 			return err;
3939 		}
3940 	} else if (IS_ENABLED(CONFIG_BT_DEVICE_NAME_DYNAMIC)) {
3941 		err = bt_set_name(CONFIG_BT_DEVICE_NAME);
3942 		if (err) {
3943 			LOG_WRN("Failed to set device name (%d)", err);
3944 		}
3945 	}
3946 
3947 	ready_cb = cb;
3948 
3949 	/* Give cmd_sem allowing to send first HCI_Reset cmd, the only
3950 	 * exception is if the controller requests to wait for an
3951 	 * initial Command Complete for NOP.
3952 	 */
3953 	if (!IS_ENABLED(CONFIG_BT_WAIT_NOP)) {
3954 		k_sem_init(&bt_dev.ncmd_sem, 1, 1);
3955 	} else {
3956 		k_sem_init(&bt_dev.ncmd_sem, 0, 1);
3957 	}
3958 	k_fifo_init(&bt_dev.cmd_tx_queue);
3959 	/* TX thread */
3960 	k_thread_create(&tx_thread_data, tx_thread_stack,
3961 			K_KERNEL_STACK_SIZEOF(tx_thread_stack),
3962 			hci_tx_thread, NULL, NULL, NULL,
3963 			K_PRIO_COOP(CONFIG_BT_HCI_TX_PRIO),
3964 			0, K_NO_WAIT);
3965 	k_thread_name_set(&tx_thread_data, "BT TX");
3966 
3967 #if defined(CONFIG_BT_RECV_WORKQ_BT)
3968 	/* RX thread */
3969 	k_work_queue_init(&bt_workq);
3970 	k_work_queue_start(&bt_workq, rx_thread_stack,
3971 			   CONFIG_BT_RX_STACK_SIZE,
3972 			   K_PRIO_COOP(CONFIG_BT_RX_PRIO), NULL);
3973 	k_thread_name_set(&bt_workq.thread, "BT RX");
3974 #endif
3975 
3976 	err = bt_dev.drv->open();
3977 	if (err) {
3978 		LOG_ERR("HCI driver open failed (%d)", err);
3979 		return err;
3980 	}
3981 
3982 	bt_monitor_send(BT_MONITOR_OPEN_INDEX, NULL, 0);
3983 
3984 	if (!cb) {
3985 		return bt_init();
3986 	}
3987 
3988 	k_work_submit(&bt_dev.init);
3989 	return 0;
3990 }
3991 
bt_disable(void)3992 int bt_disable(void)
3993 {
3994 	int err;
3995 
3996 	if (!bt_dev.drv) {
3997 		LOG_ERR("No HCI driver registered");
3998 		return -ENODEV;
3999 	}
4000 
4001 	if (!bt_dev.drv->close) {
4002 		return -ENOTSUP;
4003 	}
4004 
4005 	if (atomic_test_and_set_bit(bt_dev.flags, BT_DEV_DISABLE)) {
4006 		return -EALREADY;
4007 	}
4008 
4009 	/* Clear BT_DEV_READY before disabling HCI link */
4010 	atomic_clear_bit(bt_dev.flags, BT_DEV_READY);
4011 
4012 	err = bt_dev.drv->close();
4013 	if (err) {
4014 		LOG_ERR("HCI driver close failed (%d)", err);
4015 
4016 		/* Re-enable BT_DEV_READY to avoid inconsistent stack state */
4017 		atomic_set_bit(bt_dev.flags, BT_DEV_READY);
4018 
4019 		return err;
4020 	}
4021 
4022 	/* Some functions rely on checking this bitfield */
4023 	memset(bt_dev.supported_commands, 0x00, sizeof(bt_dev.supported_commands));
4024 
4025 	/* If random address was set up - clear it */
4026 	bt_addr_le_copy(&bt_dev.random_addr, BT_ADDR_LE_ANY);
4027 
4028 #if defined(CONFIG_BT_BROADCASTER)
4029 	bt_adv_reset_adv_pool();
4030 #endif /* CONFIG_BT_BROADCASTER */
4031 
4032 #if defined(CONFIG_BT_PRIVACY)
4033 	k_work_cancel_delayable(&bt_dev.rpa_update);
4034 #endif /* CONFIG_BT_PRIVACY */
4035 
4036 #if defined(CONFIG_BT_PER_ADV_SYNC)
4037 	bt_periodic_sync_disable();
4038 #endif /* CONFIG_BT_PER_ADV_SYNC */
4039 
4040 #if defined(CONFIG_BT_CONN)
4041 	if (IS_ENABLED(CONFIG_BT_SMP)) {
4042 		bt_pub_key_hci_disrupted();
4043 	}
4044 	bt_conn_cleanup_all();
4045 	disconnected_handles_reset();
4046 #endif /* CONFIG_BT_CONN */
4047 
4048 	/* Abort TX thread */
4049 	k_thread_abort(&tx_thread_data);
4050 
4051 #if defined(CONFIG_BT_RECV_WORKQ_BT)
4052 	/* Abort RX thread */
4053 	k_thread_abort(&bt_workq.thread);
4054 #endif
4055 
4056 	bt_monitor_send(BT_MONITOR_CLOSE_INDEX, NULL, 0);
4057 
4058 	/* Clear BT_DEV_ENABLE here to prevent early bt_enable() calls, before disable is
4059 	 * completed.
4060 	 */
4061 	atomic_clear_bit(bt_dev.flags, BT_DEV_ENABLE);
4062 
4063 	return 0;
4064 }
4065 
bt_is_ready(void)4066 bool bt_is_ready(void)
4067 {
4068 	return atomic_test_bit(bt_dev.flags, BT_DEV_READY);
4069 }
4070 
4071 #define DEVICE_NAME_LEN (sizeof(CONFIG_BT_DEVICE_NAME) - 1)
4072 #if defined(CONFIG_BT_DEVICE_NAME_DYNAMIC)
4073 BUILD_ASSERT(DEVICE_NAME_LEN < CONFIG_BT_DEVICE_NAME_MAX);
4074 #else
4075 BUILD_ASSERT(DEVICE_NAME_LEN < 248);
4076 #endif
4077 
bt_set_name(const char * name)4078 int bt_set_name(const char *name)
4079 {
4080 #if defined(CONFIG_BT_DEVICE_NAME_DYNAMIC)
4081 	size_t len = strlen(name);
4082 	int err;
4083 
4084 	if (len > CONFIG_BT_DEVICE_NAME_MAX) {
4085 		return -ENOMEM;
4086 	}
4087 
4088 	if (!strcmp(bt_dev.name, name)) {
4089 		return 0;
4090 	}
4091 
4092 	strncpy(bt_dev.name, name, len);
4093 	bt_dev.name[len] = '\0';
4094 
4095 	if (IS_ENABLED(CONFIG_BT_SETTINGS)) {
4096 		err = settings_save_one("bt/name", bt_dev.name, len);
4097 		if (err) {
4098 			LOG_WRN("Unable to store name");
4099 		}
4100 	}
4101 
4102 	return 0;
4103 #else
4104 	return -ENOMEM;
4105 #endif
4106 }
4107 
bt_get_name(void)4108 const char *bt_get_name(void)
4109 {
4110 #if defined(CONFIG_BT_DEVICE_NAME_DYNAMIC)
4111 	return bt_dev.name;
4112 #else
4113 	return CONFIG_BT_DEVICE_NAME;
4114 #endif
4115 }
4116 
bt_get_appearance(void)4117 uint16_t bt_get_appearance(void)
4118 {
4119 #if defined(CONFIG_BT_DEVICE_APPEARANCE_DYNAMIC)
4120 	return bt_dev.appearance;
4121 #else
4122 	return CONFIG_BT_DEVICE_APPEARANCE;
4123 #endif
4124 }
4125 
4126 #if defined(CONFIG_BT_DEVICE_APPEARANCE_DYNAMIC)
bt_set_appearance(uint16_t appearance)4127 int bt_set_appearance(uint16_t appearance)
4128 {
4129 	if (bt_dev.appearance != appearance) {
4130 		if (IS_ENABLED(CONFIG_BT_SETTINGS)) {
4131 			int err = settings_save_one("bt/appearance", &appearance,
4132 					sizeof(appearance));
4133 
4134 			if (err) {
4135 				LOG_ERR("Unable to save setting 'bt/appearance' (err %d).", err);
4136 				return err;
4137 			}
4138 		}
4139 
4140 		bt_dev.appearance = appearance;
4141 	}
4142 
4143 	return 0;
4144 }
4145 #endif
4146 
bt_addr_le_is_bonded(uint8_t id,const bt_addr_le_t * addr)4147 bool bt_addr_le_is_bonded(uint8_t id, const bt_addr_le_t *addr)
4148 {
4149 	if (IS_ENABLED(CONFIG_BT_SMP)) {
4150 		struct bt_keys *keys = bt_keys_find_addr(id, addr);
4151 
4152 		/* if there are any keys stored then device is bonded */
4153 		return keys && keys->keys;
4154 	} else {
4155 		return false;
4156 	}
4157 }
4158 
4159 #if defined(CONFIG_BT_FILTER_ACCEPT_LIST)
bt_le_filter_accept_list_add(const bt_addr_le_t * addr)4160 int bt_le_filter_accept_list_add(const bt_addr_le_t *addr)
4161 {
4162 	struct bt_hci_cp_le_add_dev_to_fal *cp;
4163 	struct net_buf *buf;
4164 	int err;
4165 
4166 	if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
4167 		return -EAGAIN;
4168 	}
4169 
4170 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_ADD_DEV_TO_FAL, sizeof(*cp));
4171 	if (!buf) {
4172 		return -ENOBUFS;
4173 	}
4174 
4175 	cp = net_buf_add(buf, sizeof(*cp));
4176 	bt_addr_le_copy(&cp->addr, addr);
4177 
4178 	err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_ADD_DEV_TO_FAL, buf, NULL);
4179 	if (err) {
4180 		LOG_ERR("Failed to add device to filter accept list");
4181 
4182 		return err;
4183 	}
4184 
4185 	return 0;
4186 }
4187 
bt_le_filter_accept_list_remove(const bt_addr_le_t * addr)4188 int bt_le_filter_accept_list_remove(const bt_addr_le_t *addr)
4189 {
4190 	struct bt_hci_cp_le_rem_dev_from_fal *cp;
4191 	struct net_buf *buf;
4192 	int err;
4193 
4194 	if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
4195 		return -EAGAIN;
4196 	}
4197 
4198 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_REM_DEV_FROM_FAL, sizeof(*cp));
4199 	if (!buf) {
4200 		return -ENOBUFS;
4201 	}
4202 
4203 	cp = net_buf_add(buf, sizeof(*cp));
4204 	bt_addr_le_copy(&cp->addr, addr);
4205 
4206 	err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_REM_DEV_FROM_FAL, buf, NULL);
4207 	if (err) {
4208 		LOG_ERR("Failed to remove device from filter accept list");
4209 		return err;
4210 	}
4211 
4212 	return 0;
4213 }
4214 
bt_le_filter_accept_list_clear(void)4215 int bt_le_filter_accept_list_clear(void)
4216 {
4217 	int err;
4218 
4219 	if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
4220 		return -EAGAIN;
4221 	}
4222 
4223 	err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_CLEAR_FAL, NULL, NULL);
4224 	if (err) {
4225 		LOG_ERR("Failed to clear filter accept list");
4226 		return err;
4227 	}
4228 
4229 	return 0;
4230 }
4231 #endif /* defined(CONFIG_BT_FILTER_ACCEPT_LIST) */
4232 
bt_le_set_chan_map(uint8_t chan_map[5])4233 int bt_le_set_chan_map(uint8_t chan_map[5])
4234 {
4235 	struct bt_hci_cp_le_set_host_chan_classif *cp;
4236 	struct net_buf *buf;
4237 
4238 	if (!IS_ENABLED(CONFIG_BT_CENTRAL)) {
4239 		return -ENOTSUP;
4240 	}
4241 
4242 	if (!BT_CMD_TEST(bt_dev.supported_commands, 27, 3)) {
4243 		LOG_WRN("Set Host Channel Classification command is "
4244 			"not supported");
4245 		return -ENOTSUP;
4246 	}
4247 
4248 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_HOST_CHAN_CLASSIF,
4249 				sizeof(*cp));
4250 	if (!buf) {
4251 		return -ENOBUFS;
4252 	}
4253 
4254 	cp = net_buf_add(buf, sizeof(*cp));
4255 
4256 	memcpy(&cp->ch_map[0], &chan_map[0], 4);
4257 	cp->ch_map[4] = chan_map[4] & BIT_MASK(5);
4258 
4259 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_HOST_CHAN_CLASSIF,
4260 				    buf, NULL);
4261 }
4262 
4263 #if defined(CONFIG_BT_RPA_TIMEOUT_DYNAMIC)
bt_le_set_rpa_timeout(uint16_t new_rpa_timeout)4264 int bt_le_set_rpa_timeout(uint16_t new_rpa_timeout)
4265 {
4266 	if ((new_rpa_timeout == 0) || (new_rpa_timeout > 3600)) {
4267 		return -EINVAL;
4268 	}
4269 
4270 	if (new_rpa_timeout == bt_dev.rpa_timeout) {
4271 		return 0;
4272 	}
4273 
4274 	bt_dev.rpa_timeout = new_rpa_timeout;
4275 	atomic_set_bit(bt_dev.flags, BT_DEV_RPA_TIMEOUT_CHANGED);
4276 
4277 	return 0;
4278 }
4279 #endif
4280 
bt_configure_data_path(uint8_t dir,uint8_t id,uint8_t vs_config_len,const uint8_t * vs_config)4281 int bt_configure_data_path(uint8_t dir, uint8_t id, uint8_t vs_config_len,
4282 			   const uint8_t *vs_config)
4283 {
4284 	struct bt_hci_rp_configure_data_path *rp;
4285 	struct bt_hci_cp_configure_data_path *cp;
4286 	struct net_buf *rsp;
4287 	struct net_buf *buf;
4288 	int err;
4289 
4290 	buf = bt_hci_cmd_create(BT_HCI_OP_CONFIGURE_DATA_PATH, sizeof(*cp) +
4291 				vs_config_len);
4292 	if (!buf) {
4293 		return -ENOBUFS;
4294 	}
4295 
4296 	cp = net_buf_add(buf, sizeof(*cp));
4297 	cp->data_path_dir = dir;
4298 	cp->data_path_id  = id;
4299 	cp->vs_config_len = vs_config_len;
4300 	if (vs_config_len) {
4301 		(void)memcpy(cp->vs_config, vs_config, vs_config_len);
4302 	}
4303 
4304 	err = bt_hci_cmd_send_sync(BT_HCI_OP_CONFIGURE_DATA_PATH, buf, &rsp);
4305 	if (err) {
4306 		return err;
4307 	}
4308 
4309 	rp = (void *)rsp->data;
4310 	if (rp->status) {
4311 		err = -EIO;
4312 	}
4313 	net_buf_unref(rsp);
4314 
4315 	return err;
4316 }
4317