1 /* hci_core.c - HCI core Bluetooth handling */
2 
3 /*
4  * Copyright (c) 2017-2021 Nordic Semiconductor ASA
5  * Copyright (c) 2015-2016 Intel Corporation
6  *
7  * SPDX-License-Identifier: Apache-2.0
8  */
9 
10 #include <zephyr/kernel.h>
11 #include <string.h>
12 #include <stdio.h>
13 #include <errno.h>
14 #include <zephyr/net/buf.h>
15 #include <zephyr/sys/atomic.h>
16 #include <zephyr/sys/check.h>
17 #include <zephyr/sys/util.h>
18 #include <zephyr/sys/slist.h>
19 #include <zephyr/sys/byteorder.h>
20 #include <zephyr/debug/stack.h>
21 #include <zephyr/sys/__assert.h>
22 #include <soc.h>
23 
24 #include <zephyr/settings/settings.h>
25 
26 #include <zephyr/bluetooth/bluetooth.h>
27 #include <zephyr/bluetooth/conn.h>
28 #include <zephyr/bluetooth/l2cap.h>
29 #include <zephyr/bluetooth/hci.h>
30 #include <zephyr/bluetooth/hci_vs.h>
31 #if DT_HAS_CHOSEN(zephyr_bt_hci)
32 #include <zephyr/drivers/bluetooth.h>
33 #else
34 #include <zephyr/drivers/bluetooth/hci_driver.h>
35 #endif
36 
37 #include "common/bt_str.h"
38 #include "common/assert.h"
39 
40 #include "common/rpa.h"
41 #include "keys.h"
42 #include "monitor.h"
43 #include "hci_core.h"
44 #include "hci_ecc.h"
45 #include "ecc.h"
46 #include "id.h"
47 #include "adv.h"
48 #include "scan.h"
49 
50 #include "addr_internal.h"
51 #include "conn_internal.h"
52 #include "iso_internal.h"
53 #include "l2cap_internal.h"
54 #include "gatt_internal.h"
55 #include "smp.h"
56 #include "crypto.h"
57 #include "settings.h"
58 
59 #if defined(CONFIG_BT_CLASSIC)
60 #include "classic/br.h"
61 #endif
62 
63 #if defined(CONFIG_BT_DF)
64 #include "direction_internal.h"
65 #endif /* CONFIG_BT_DF */
66 
67 #define LOG_LEVEL CONFIG_BT_HCI_CORE_LOG_LEVEL
68 #include <zephyr/logging/log.h>
69 LOG_MODULE_REGISTER(bt_hci_core);
70 
71 #define BT_HCI_DEV  DT_CHOSEN(zephyr_bt_hci)
72 #define BT_HCI_BUS  BT_DT_HCI_BUS_GET(BT_HCI_DEV)
73 #define BT_HCI_NAME BT_DT_HCI_NAME_GET(BT_HCI_DEV)
74 
75 void bt_tx_irq_raise(void);
76 
77 #define HCI_CMD_TIMEOUT      K_SECONDS(10)
78 
79 /* Stacks for the threads */
80 static void rx_work_handler(struct k_work *work);
81 static K_WORK_DEFINE(rx_work, rx_work_handler);
82 #if defined(CONFIG_BT_RECV_WORKQ_BT)
83 static struct k_work_q bt_workq;
84 static K_KERNEL_STACK_DEFINE(rx_thread_stack, CONFIG_BT_RX_STACK_SIZE);
85 #endif /* CONFIG_BT_RECV_WORKQ_BT */
86 
87 static void init_work(struct k_work *work);
88 
89 struct bt_dev bt_dev = {
90 	.init          = Z_WORK_INITIALIZER(init_work),
91 #if defined(CONFIG_BT_PRIVACY)
92 	.rpa_timeout   = CONFIG_BT_RPA_TIMEOUT,
93 #endif
94 #if defined(CONFIG_BT_DEVICE_APPEARANCE_DYNAMIC)
95 	.appearance = CONFIG_BT_DEVICE_APPEARANCE,
96 #endif
97 #if DT_HAS_CHOSEN(zephyr_bt_hci)
98 	.hci = DEVICE_DT_GET(BT_HCI_DEV),
99 #endif
100 };
101 
102 static bt_ready_cb_t ready_cb;
103 
104 #if defined(CONFIG_BT_HCI_VS_EVT_USER)
105 static bt_hci_vnd_evt_cb_t *hci_vnd_evt_cb;
106 #endif /* CONFIG_BT_HCI_VS_EVT_USER */
107 
108 struct cmd_data {
109 	/** HCI status of the command completion */
110 	uint8_t  status;
111 
112 	/** The command OpCode that the buffer contains */
113 	uint16_t opcode;
114 
115 	/** The state to update when command completes with success. */
116 	struct bt_hci_cmd_state_set *state;
117 
118 	/** Used by bt_hci_cmd_send_sync. */
119 	struct k_sem *sync;
120 };
121 
122 static struct cmd_data cmd_data[CONFIG_BT_BUF_CMD_TX_COUNT];
123 
124 #define cmd(buf) (&cmd_data[net_buf_id(buf)])
125 #define acl(buf) ((struct acl_data *)net_buf_user_data(buf))
126 
127 #if DT_HAS_CHOSEN(zephyr_bt_hci)
drv_quirk_no_reset(void)128 static bool drv_quirk_no_reset(void)
129 {
130 	return  ((BT_DT_HCI_QUIRKS_GET(DT_CHOSEN(zephyr_bt_hci)) & BT_HCI_QUIRK_NO_RESET) != 0);
131 }
132 
drv_quirk_no_auto_dle(void)133 __maybe_unused static bool drv_quirk_no_auto_dle(void)
134 {
135 	return  ((BT_DT_HCI_QUIRKS_GET(DT_CHOSEN(zephyr_bt_hci)) & BT_HCI_QUIRK_NO_AUTO_DLE) != 0);
136 }
137 #else
drv_quirk_no_reset(void)138 static bool drv_quirk_no_reset(void)
139 {
140 	return  ((bt_dev.drv->quirks & BT_QUIRK_NO_RESET) != 0);
141 }
142 
drv_quirk_no_auto_dle(void)143 __maybe_unused static bool drv_quirk_no_auto_dle(void)
144 {
145 	return  ((bt_dev.drv->quirks & BT_QUIRK_NO_AUTO_DLE) != 0);
146 }
147 #endif
148 
bt_hci_cmd_state_set_init(struct net_buf * buf,struct bt_hci_cmd_state_set * state,atomic_t * target,int bit,bool val)149 void bt_hci_cmd_state_set_init(struct net_buf *buf,
150 			       struct bt_hci_cmd_state_set *state,
151 			       atomic_t *target, int bit, bool val)
152 {
153 	state->target = target;
154 	state->bit = bit;
155 	state->val = val;
156 	cmd(buf)->state = state;
157 }
158 
159 /* HCI command buffers. Derive the needed size from both Command and Event
160  * buffer length since the buffer is also used for the response event i.e
161  * command complete or command status.
162  */
163 #define CMD_BUF_SIZE MAX(BT_BUF_EVT_RX_SIZE, BT_BUF_CMD_TX_SIZE)
164 NET_BUF_POOL_FIXED_DEFINE(hci_cmd_pool, CONFIG_BT_BUF_CMD_TX_COUNT,
165 			  CMD_BUF_SIZE, sizeof(struct bt_buf_data), NULL);
166 
167 struct event_handler {
168 	uint8_t event;
169 	uint8_t min_len;
170 	void (*handler)(struct net_buf *buf);
171 };
172 
173 #define EVENT_HANDLER(_evt, _handler, _min_len) \
174 { \
175 	.event = _evt, \
176 	.handler = _handler, \
177 	.min_len = _min_len, \
178 }
179 
handle_event_common(uint8_t event,struct net_buf * buf,const struct event_handler * handlers,size_t num_handlers)180 static int handle_event_common(uint8_t event, struct net_buf *buf,
181 			       const struct event_handler *handlers, size_t num_handlers)
182 {
183 	size_t i;
184 
185 	for (i = 0; i < num_handlers; i++) {
186 		const struct event_handler *handler = &handlers[i];
187 
188 		if (handler->event != event) {
189 			continue;
190 		}
191 
192 		if (buf->len < handler->min_len) {
193 			LOG_ERR("Too small (%u bytes) event 0x%02x", buf->len, event);
194 			return -EINVAL;
195 		}
196 
197 		handler->handler(buf);
198 		return 0;
199 	}
200 
201 	return -EOPNOTSUPP;
202 }
203 
handle_event(uint8_t event,struct net_buf * buf,const struct event_handler * handlers,size_t num_handlers)204 static void handle_event(uint8_t event, struct net_buf *buf, const struct event_handler *handlers,
205 			 size_t num_handlers)
206 {
207 	int err;
208 
209 	err = handle_event_common(event, buf, handlers, num_handlers);
210 	if (err == -EOPNOTSUPP) {
211 		LOG_WRN("Unhandled event 0x%02x len %u: %s", event, buf->len,
212 			bt_hex(buf->data, buf->len));
213 	}
214 
215 	/* Other possible errors are handled by handle_event_common function */
216 }
217 
handle_vs_event(uint8_t event,struct net_buf * buf,const struct event_handler * handlers,size_t num_handlers)218 static void handle_vs_event(uint8_t event, struct net_buf *buf,
219 			    const struct event_handler *handlers, size_t num_handlers)
220 {
221 	int err;
222 
223 	err = handle_event_common(event, buf, handlers, num_handlers);
224 	if (err == -EOPNOTSUPP) {
225 		LOG_WRN("Unhandled vendor-specific event: %s", bt_hex(buf->data, buf->len));
226 	}
227 
228 	/* Other possible errors are handled by handle_event_common function */
229 }
230 
bt_acl_set_ncp_sent(struct net_buf * packet,bool value)231 void bt_acl_set_ncp_sent(struct net_buf *packet, bool value)
232 {
233 	acl(packet)->host_ncp_sent = value;
234 }
235 
bt_send_one_host_num_completed_packets(uint16_t handle)236 void bt_send_one_host_num_completed_packets(uint16_t handle)
237 {
238 	if (!IS_ENABLED(CONFIG_BT_HCI_ACL_FLOW_CONTROL)) {
239 		ARG_UNUSED(handle);
240 		return;
241 	}
242 
243 	struct bt_hci_cp_host_num_completed_packets *cp;
244 	struct bt_hci_handle_count *hc;
245 	struct net_buf *buf;
246 	int err;
247 
248 	LOG_DBG("Reporting completed packet for handle %u", handle);
249 
250 	buf = bt_hci_cmd_create(BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS,
251 				sizeof(*cp) + sizeof(*hc));
252 	BT_ASSERT_MSG(buf, "Unable to alloc for Host NCP");
253 
254 	cp = net_buf_add(buf, sizeof(*cp));
255 	cp->num_handles = sys_cpu_to_le16(1);
256 
257 	hc = net_buf_add(buf, sizeof(*hc));
258 	hc->handle = sys_cpu_to_le16(handle);
259 	hc->count  = sys_cpu_to_le16(1);
260 
261 	err = bt_hci_cmd_send(BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS, buf);
262 	BT_ASSERT_MSG(err == 0, "Unable to send Host NCP (err %d)", err);
263 }
264 
265 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
bt_hci_host_num_completed_packets(struct net_buf * buf)266 void bt_hci_host_num_completed_packets(struct net_buf *buf)
267 {
268 	uint16_t handle = acl(buf)->handle;
269 	struct bt_conn *conn;
270 	uint8_t index = acl(buf)->index;
271 
272 	net_buf_destroy(buf);
273 
274 	if (acl(buf)->host_ncp_sent) {
275 		return;
276 	}
277 
278 	/* Do nothing if controller to host flow control is not supported */
279 	if (!BT_CMD_TEST(bt_dev.supported_commands, 10, 5)) {
280 		return;
281 	}
282 
283 	conn = bt_conn_lookup_index(index);
284 	if (!conn) {
285 		LOG_WRN("Unable to look up conn with index 0x%02x", index);
286 		return;
287 	}
288 
289 	if (conn->state != BT_CONN_CONNECTED &&
290 	    conn->state != BT_CONN_DISCONNECTING) {
291 		LOG_WRN("Not reporting packet for non-connected conn");
292 		bt_conn_unref(conn);
293 		return;
294 	}
295 
296 	bt_conn_unref(conn);
297 
298 	bt_send_one_host_num_completed_packets(handle);
299 }
300 #endif /* defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL) */
301 
bt_hci_cmd_create(uint16_t opcode,uint8_t param_len)302 struct net_buf *bt_hci_cmd_create(uint16_t opcode, uint8_t param_len)
303 {
304 	struct bt_hci_cmd_hdr *hdr;
305 	struct net_buf *buf;
306 
307 	LOG_DBG("opcode 0x%04x param_len %u", opcode, param_len);
308 
309 	/* net_buf_alloc(K_FOREVER) can fail when run from the syswq */
310 	buf = net_buf_alloc(&hci_cmd_pool, K_FOREVER);
311 	if (!buf) {
312 		LOG_DBG("Unable to allocate a command buffer");
313 		return NULL;
314 	}
315 
316 	LOG_DBG("buf %p", buf);
317 
318 	net_buf_reserve(buf, BT_BUF_RESERVE);
319 
320 	bt_buf_set_type(buf, BT_BUF_CMD);
321 
322 	cmd(buf)->opcode = opcode;
323 	cmd(buf)->sync = NULL;
324 	cmd(buf)->state = NULL;
325 
326 	hdr = net_buf_add(buf, sizeof(*hdr));
327 	hdr->opcode = sys_cpu_to_le16(opcode);
328 	hdr->param_len = param_len;
329 
330 	return buf;
331 }
332 
bt_hci_cmd_send(uint16_t opcode,struct net_buf * buf)333 int bt_hci_cmd_send(uint16_t opcode, struct net_buf *buf)
334 {
335 	if (!buf) {
336 		buf = bt_hci_cmd_create(opcode, 0);
337 		if (!buf) {
338 			return -ENOBUFS;
339 		}
340 	}
341 
342 	LOG_DBG("opcode 0x%04x len %u", opcode, buf->len);
343 
344 	/* Host Number of Completed Packets can ignore the ncmd value
345 	 * and does not generate any cmd complete/status events.
346 	 */
347 	if (opcode == BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS) {
348 		int err;
349 
350 		err = bt_send(buf);
351 		if (err) {
352 			LOG_ERR("Unable to send to driver (err %d)", err);
353 			net_buf_unref(buf);
354 		}
355 
356 		return err;
357 	}
358 
359 	net_buf_put(&bt_dev.cmd_tx_queue, buf);
360 	bt_tx_irq_raise();
361 
362 	return 0;
363 }
364 
365 static bool process_pending_cmd(k_timeout_t timeout);
bt_hci_cmd_send_sync(uint16_t opcode,struct net_buf * buf,struct net_buf ** rsp)366 int bt_hci_cmd_send_sync(uint16_t opcode, struct net_buf *buf,
367 			 struct net_buf **rsp)
368 {
369 	struct k_sem sync_sem;
370 	uint8_t status;
371 	int err;
372 
373 	if (!buf) {
374 		buf = bt_hci_cmd_create(opcode, 0);
375 		if (!buf) {
376 			return -ENOBUFS;
377 		}
378 	} else {
379 		/* `cmd(buf)` depends on this  */
380 		if (net_buf_pool_get(buf->pool_id) != &hci_cmd_pool) {
381 			__ASSERT_NO_MSG(false);
382 			return -EINVAL;
383 		}
384 	}
385 
386 	LOG_DBG("buf %p opcode 0x%04x len %u", buf, opcode, buf->len);
387 
388 	/* This local sem is just for suspending the current thread until the
389 	 * command is processed by the LL. It is given (and we are awaken) by
390 	 * the cmd_complete/status handlers.
391 	 */
392 	k_sem_init(&sync_sem, 0, 1);
393 	cmd(buf)->sync = &sync_sem;
394 
395 	net_buf_put(&bt_dev.cmd_tx_queue, net_buf_ref(buf));
396 	bt_tx_irq_raise();
397 
398 	/* TODO: disallow sending sync commands from syswq altogether */
399 
400 	/* Since the commands are now processed in the syswq, we cannot suspend
401 	 * and wait. We have to send the command from the current context.
402 	 */
403 	if (k_current_get() == &k_sys_work_q.thread) {
404 		/* drain the command queue until we get to send the command of interest. */
405 		struct net_buf *cmd = NULL;
406 
407 		do {
408 			cmd = k_fifo_peek_head(&bt_dev.cmd_tx_queue);
409 			LOG_DBG("process cmd %p want %p", cmd, buf);
410 
411 			/* Wait for a response from the Bluetooth Controller.
412 			 * The Controller may fail to respond if:
413 			 *  - It was never programmed or connected.
414 			 *  - There was a fatal error.
415 			 *
416 			 * See the `BT_HCI_OP_` macros in hci_types.h or
417 			 * Core_v5.4, Vol 4, Part E, Section 5.4.1 and Section 7
418 			 * to map the opcode to the HCI command documentation.
419 			 * Example: 0x0c03 represents HCI_Reset command.
420 			 */
421 			__maybe_unused bool success = process_pending_cmd(HCI_CMD_TIMEOUT);
422 
423 			BT_ASSERT_MSG(success, "command opcode 0x%04x timeout", opcode);
424 		} while (buf != cmd);
425 	}
426 
427 	/* Now that we have sent the command, suspend until the LL replies */
428 	err = k_sem_take(&sync_sem, HCI_CMD_TIMEOUT);
429 	BT_ASSERT_MSG(err == 0,
430 		      "Controller unresponsive, command opcode 0x%04x timeout with err %d",
431 		      opcode, err);
432 
433 	status = cmd(buf)->status;
434 	if (status) {
435 		LOG_WRN("opcode 0x%04x status 0x%02x %s", opcode,
436 			status, bt_hci_err_to_str(status));
437 		net_buf_unref(buf);
438 
439 		switch (status) {
440 		case BT_HCI_ERR_CONN_LIMIT_EXCEEDED:
441 			return -ECONNREFUSED;
442 		case BT_HCI_ERR_INSUFFICIENT_RESOURCES:
443 			return -ENOMEM;
444 		case BT_HCI_ERR_INVALID_PARAM:
445 			return -EINVAL;
446 		case BT_HCI_ERR_CMD_DISALLOWED:
447 			return -EACCES;
448 		default:
449 			return -EIO;
450 		}
451 	}
452 
453 	LOG_DBG("rsp %p opcode 0x%04x len %u", buf, opcode, buf->len);
454 
455 	if (rsp) {
456 		*rsp = buf;
457 	} else {
458 		net_buf_unref(buf);
459 	}
460 
461 	return 0;
462 }
463 
bt_hci_le_rand(void * buffer,size_t len)464 int bt_hci_le_rand(void *buffer, size_t len)
465 {
466 	struct bt_hci_rp_le_rand *rp;
467 	struct net_buf *rsp;
468 	size_t count;
469 	int err;
470 
471 	/* Check first that HCI_LE_Rand is supported */
472 	if (!BT_CMD_TEST(bt_dev.supported_commands, 27, 7)) {
473 		return -ENOTSUP;
474 	}
475 
476 	while (len > 0) {
477 		/* Number of bytes to fill on this iteration */
478 		count = MIN(len, sizeof(rp->rand));
479 		/* Request the next 8 bytes over HCI */
480 		err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_RAND, NULL, &rsp);
481 		if (err) {
482 			return err;
483 		}
484 		/* Copy random data into buffer */
485 		rp = (void *)rsp->data;
486 		memcpy(buffer, rp->rand, count);
487 
488 		net_buf_unref(rsp);
489 		buffer = (uint8_t *)buffer + count;
490 		len -= count;
491 	}
492 
493 	return 0;
494 }
495 
hci_le_read_max_data_len(uint16_t * tx_octets,uint16_t * tx_time)496 static int hci_le_read_max_data_len(uint16_t *tx_octets, uint16_t *tx_time)
497 {
498 	struct bt_hci_rp_le_read_max_data_len *rp;
499 	struct net_buf *rsp;
500 	int err;
501 
502 	err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_MAX_DATA_LEN, NULL, &rsp);
503 	if (err) {
504 		LOG_ERR("Failed to read DLE max data len");
505 		return err;
506 	}
507 
508 	rp = (void *)rsp->data;
509 	*tx_octets = sys_le16_to_cpu(rp->max_tx_octets);
510 	*tx_time = sys_le16_to_cpu(rp->max_tx_time);
511 	net_buf_unref(rsp);
512 
513 	if (!IN_RANGE(*tx_octets, BT_HCI_LE_MAX_TX_OCTETS_MIN, BT_HCI_LE_MAX_TX_OCTETS_MAX)) {
514 		LOG_WRN("tx_octets exceeds the valid range %u", *tx_octets);
515 	}
516 	if (!IN_RANGE(*tx_time, BT_HCI_LE_MAX_TX_TIME_MIN, BT_HCI_LE_MAX_TX_TIME_MAX)) {
517 		LOG_WRN("tx_time exceeds the valid range %u", *tx_time);
518 	}
519 
520 	return 0;
521 }
522 
bt_get_phy(uint8_t hci_phy)523 uint8_t bt_get_phy(uint8_t hci_phy)
524 {
525 	switch (hci_phy) {
526 	case BT_HCI_LE_PHY_1M:
527 		return BT_GAP_LE_PHY_1M;
528 	case BT_HCI_LE_PHY_2M:
529 		return BT_GAP_LE_PHY_2M;
530 	case BT_HCI_LE_PHY_CODED:
531 		return BT_GAP_LE_PHY_CODED;
532 	default:
533 		return 0;
534 	}
535 }
536 
bt_get_df_cte_type(uint8_t hci_cte_type)537 int bt_get_df_cte_type(uint8_t hci_cte_type)
538 {
539 	switch (hci_cte_type) {
540 	case BT_HCI_LE_AOA_CTE:
541 		return BT_DF_CTE_TYPE_AOA;
542 	case BT_HCI_LE_AOD_CTE_1US:
543 		return BT_DF_CTE_TYPE_AOD_1US;
544 	case BT_HCI_LE_AOD_CTE_2US:
545 		return BT_DF_CTE_TYPE_AOD_2US;
546 	case BT_HCI_LE_NO_CTE:
547 		return BT_DF_CTE_TYPE_NONE;
548 	default:
549 		return BT_DF_CTE_TYPE_NONE;
550 	}
551 }
552 
553 #if defined(CONFIG_BT_CONN_TX)
hci_num_completed_packets(struct net_buf * buf)554 static void hci_num_completed_packets(struct net_buf *buf)
555 {
556 	struct bt_hci_evt_num_completed_packets *evt = (void *)buf->data;
557 	int i;
558 
559 	if (sizeof(*evt) + sizeof(evt->h[0]) * evt->num_handles > buf->len) {
560 		LOG_ERR("evt num_handles (=%u) too large (%u > %u)",
561 			evt->num_handles,
562 			sizeof(*evt) + sizeof(evt->h[0]) * evt->num_handles,
563 			buf->len);
564 		return;
565 	}
566 
567 	LOG_DBG("num_handles %u", evt->num_handles);
568 
569 	for (i = 0; i < evt->num_handles; i++) {
570 		uint16_t handle, count;
571 		struct bt_conn *conn;
572 
573 		handle = sys_le16_to_cpu(evt->h[i].handle);
574 		count = sys_le16_to_cpu(evt->h[i].count);
575 
576 		LOG_DBG("handle %u count %u", handle, count);
577 
578 		conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_ALL);
579 		if (!conn) {
580 			LOG_ERR("No connection for handle %u", handle);
581 			continue;
582 		}
583 
584 		while (count--) {
585 			sys_snode_t *node;
586 
587 			k_sem_give(bt_conn_get_pkts(conn));
588 
589 			/* move the next TX context from the `pending` list to
590 			 * the `complete` list.
591 			 */
592 			node = sys_slist_get(&conn->tx_pending);
593 
594 			if (!node) {
595 				LOG_ERR("packets count mismatch");
596 				__ASSERT_NO_MSG(0);
597 				break;
598 			}
599 
600 			sys_slist_append(&conn->tx_complete, node);
601 
602 			/* align the `pending` value */
603 			__ASSERT_NO_MSG(atomic_get(&conn->in_ll));
604 			atomic_dec(&conn->in_ll);
605 
606 			/* TX context free + callback happens in there */
607 			k_work_submit(&conn->tx_complete_work);
608 		}
609 
610 		bt_conn_unref(conn);
611 	}
612 }
613 #endif /* CONFIG_BT_CONN_TX */
614 
615 #if defined(CONFIG_BT_CONN)
hci_acl(struct net_buf * buf)616 static void hci_acl(struct net_buf *buf)
617 {
618 	struct bt_hci_acl_hdr *hdr;
619 	uint16_t handle, len;
620 	struct bt_conn *conn;
621 	uint8_t flags;
622 
623 	LOG_DBG("buf %p", buf);
624 	if (buf->len < sizeof(*hdr)) {
625 		LOG_ERR("Invalid HCI ACL packet size (%u)", buf->len);
626 		net_buf_unref(buf);
627 		return;
628 	}
629 
630 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
631 	len = sys_le16_to_cpu(hdr->len);
632 	handle = sys_le16_to_cpu(hdr->handle);
633 	flags = bt_acl_flags(handle);
634 
635 	acl(buf)->handle = bt_acl_handle(handle);
636 	acl(buf)->index = BT_CONN_INDEX_INVALID;
637 
638 	LOG_DBG("handle %u len %u flags %u", acl(buf)->handle, len, flags);
639 
640 	if (buf->len != len) {
641 		LOG_ERR("ACL data length mismatch (%u != %u)", buf->len, len);
642 		net_buf_unref(buf);
643 		return;
644 	}
645 
646 	conn = bt_conn_lookup_handle(acl(buf)->handle, BT_CONN_TYPE_ALL);
647 	if (!conn) {
648 		LOG_ERR("Unable to find conn for handle %u", acl(buf)->handle);
649 		net_buf_unref(buf);
650 		return;
651 	}
652 
653 	acl(buf)->index = bt_conn_index(conn);
654 
655 	bt_conn_recv(conn, buf, flags);
656 	bt_conn_unref(conn);
657 }
658 
hci_data_buf_overflow(struct net_buf * buf)659 static void hci_data_buf_overflow(struct net_buf *buf)
660 {
661 	struct bt_hci_evt_data_buf_overflow *evt = (void *)buf->data;
662 
663 	LOG_WRN("Data buffer overflow (link type 0x%02x)", evt->link_type);
664 }
665 
666 #if defined(CONFIG_BT_CENTRAL)
set_phy_conn_param(const struct bt_conn * conn,struct bt_hci_ext_conn_phy * phy)667 static void set_phy_conn_param(const struct bt_conn *conn,
668 			       struct bt_hci_ext_conn_phy *phy)
669 {
670 	phy->conn_interval_min = sys_cpu_to_le16(conn->le.interval_min);
671 	phy->conn_interval_max = sys_cpu_to_le16(conn->le.interval_max);
672 	phy->conn_latency = sys_cpu_to_le16(conn->le.latency);
673 	phy->supervision_timeout = sys_cpu_to_le16(conn->le.timeout);
674 
675 	phy->min_ce_len = 0;
676 	phy->max_ce_len = 0;
677 }
678 
bt_le_create_conn_ext(const struct bt_conn * conn)679 int bt_le_create_conn_ext(const struct bt_conn *conn)
680 {
681 	struct bt_hci_cp_le_ext_create_conn *cp;
682 	struct bt_hci_ext_conn_phy *phy;
683 	struct bt_hci_cmd_state_set state;
684 	bool use_filter = false;
685 	struct net_buf *buf;
686 	uint8_t own_addr_type;
687 	uint8_t num_phys;
688 	int err;
689 
690 	if (IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST)) {
691 		use_filter = atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT);
692 	}
693 
694 	err = bt_id_set_create_conn_own_addr(use_filter, &own_addr_type);
695 	if (err) {
696 		return err;
697 	}
698 
699 	num_phys = (!(bt_dev.create_param.options &
700 		      BT_CONN_LE_OPT_NO_1M) ? 1 : 0) +
701 		   ((bt_dev.create_param.options &
702 		      BT_CONN_LE_OPT_CODED) ? 1 : 0);
703 
704 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_EXT_CREATE_CONN, sizeof(*cp) +
705 				num_phys * sizeof(*phy));
706 	if (!buf) {
707 		return -ENOBUFS;
708 	}
709 
710 	cp = net_buf_add(buf, sizeof(*cp));
711 	(void)memset(cp, 0, sizeof(*cp));
712 
713 	if (use_filter) {
714 		/* User Initiated procedure use fast scan parameters. */
715 		bt_addr_le_copy(&cp->peer_addr, BT_ADDR_LE_ANY);
716 		cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_FILTER;
717 	} else {
718 		const bt_addr_le_t *peer_addr = &conn->le.dst;
719 
720 #if defined(CONFIG_BT_SMP)
721 		if (bt_dev.le.rl_entries > bt_dev.le.rl_size) {
722 			/* Host resolving is used, use the RPA directly. */
723 			peer_addr = &conn->le.resp_addr;
724 		}
725 #endif
726 		bt_addr_le_copy(&cp->peer_addr, peer_addr);
727 		cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_NO_FILTER;
728 	}
729 
730 	cp->own_addr_type = own_addr_type;
731 	cp->phys = 0;
732 
733 	if (!(bt_dev.create_param.options & BT_CONN_LE_OPT_NO_1M)) {
734 		cp->phys |= BT_HCI_LE_EXT_SCAN_PHY_1M;
735 		phy = net_buf_add(buf, sizeof(*phy));
736 		phy->scan_interval = sys_cpu_to_le16(
737 			bt_dev.create_param.interval);
738 		phy->scan_window = sys_cpu_to_le16(
739 			bt_dev.create_param.window);
740 		set_phy_conn_param(conn, phy);
741 	}
742 
743 	if (bt_dev.create_param.options & BT_CONN_LE_OPT_CODED) {
744 		cp->phys |= BT_HCI_LE_EXT_SCAN_PHY_CODED;
745 		phy = net_buf_add(buf, sizeof(*phy));
746 		phy->scan_interval = sys_cpu_to_le16(
747 			bt_dev.create_param.interval_coded);
748 		phy->scan_window = sys_cpu_to_le16(
749 			bt_dev.create_param.window_coded);
750 		set_phy_conn_param(conn, phy);
751 	}
752 
753 	bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags,
754 				  BT_DEV_INITIATING, true);
755 
756 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_EXT_CREATE_CONN, buf, NULL);
757 }
758 
bt_le_create_conn_synced(const struct bt_conn * conn,const struct bt_le_ext_adv * adv,uint8_t subevent)759 int bt_le_create_conn_synced(const struct bt_conn *conn, const struct bt_le_ext_adv *adv,
760 			     uint8_t subevent)
761 {
762 	struct bt_hci_cp_le_ext_create_conn_v2 *cp;
763 	struct bt_hci_ext_conn_phy *phy;
764 	struct bt_hci_cmd_state_set state;
765 	struct net_buf *buf;
766 	uint8_t own_addr_type;
767 	int err;
768 
769 	err = bt_id_set_create_conn_own_addr(false, &own_addr_type);
770 	if (err) {
771 		return err;
772 	}
773 
774 	/* There shall only be one Initiating_PHYs */
775 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_EXT_CREATE_CONN_V2, sizeof(*cp) + sizeof(*phy));
776 	if (!buf) {
777 		return -ENOBUFS;
778 	}
779 
780 	cp = net_buf_add(buf, sizeof(*cp));
781 	(void)memset(cp, 0, sizeof(*cp));
782 
783 	cp->subevent = subevent;
784 	cp->adv_handle = adv->handle;
785 	bt_addr_le_copy(&cp->peer_addr, &conn->le.dst);
786 	cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_NO_FILTER;
787 	cp->own_addr_type = own_addr_type;
788 
789 	/* The Initiating_PHY is the secondary phy of the corresponding ext adv set */
790 	if (adv->options & BT_LE_ADV_OPT_CODED) {
791 		cp->phys = BT_HCI_LE_EXT_SCAN_PHY_CODED;
792 	} else if (adv->options & BT_LE_ADV_OPT_NO_2M) {
793 		cp->phys = BT_HCI_LE_EXT_SCAN_PHY_1M;
794 	} else {
795 		cp->phys = BT_HCI_LE_EXT_SCAN_PHY_2M;
796 	}
797 
798 	phy = net_buf_add(buf, sizeof(*phy));
799 	(void)memset(phy, 0, sizeof(*phy));
800 	set_phy_conn_param(conn, phy);
801 
802 	bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags, BT_DEV_INITIATING, true);
803 
804 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_EXT_CREATE_CONN_V2, buf, NULL);
805 }
806 
bt_le_create_conn_legacy(const struct bt_conn * conn)807 static int bt_le_create_conn_legacy(const struct bt_conn *conn)
808 {
809 	struct bt_hci_cp_le_create_conn *cp;
810 	struct bt_hci_cmd_state_set state;
811 	bool use_filter = false;
812 	struct net_buf *buf;
813 	uint8_t own_addr_type;
814 	int err;
815 
816 	if (IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST)) {
817 		use_filter = atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT);
818 	}
819 
820 	err = bt_id_set_create_conn_own_addr(use_filter, &own_addr_type);
821 	if (err) {
822 		return err;
823 	}
824 
825 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_CREATE_CONN, sizeof(*cp));
826 	if (!buf) {
827 		return -ENOBUFS;
828 	}
829 
830 	cp = net_buf_add(buf, sizeof(*cp));
831 	memset(cp, 0, sizeof(*cp));
832 	cp->own_addr_type = own_addr_type;
833 
834 	if (use_filter) {
835 		/* User Initiated procedure use fast scan parameters. */
836 		bt_addr_le_copy(&cp->peer_addr, BT_ADDR_LE_ANY);
837 		cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_FILTER;
838 	} else {
839 		const bt_addr_le_t *peer_addr = &conn->le.dst;
840 
841 #if defined(CONFIG_BT_SMP)
842 		if (bt_dev.le.rl_entries > bt_dev.le.rl_size) {
843 			/* Host resolving is used, use the RPA directly. */
844 			peer_addr = &conn->le.resp_addr;
845 		}
846 #endif
847 		bt_addr_le_copy(&cp->peer_addr, peer_addr);
848 		cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_NO_FILTER;
849 	}
850 
851 	cp->scan_interval = sys_cpu_to_le16(bt_dev.create_param.interval);
852 	cp->scan_window = sys_cpu_to_le16(bt_dev.create_param.window);
853 
854 	cp->conn_interval_min = sys_cpu_to_le16(conn->le.interval_min);
855 	cp->conn_interval_max = sys_cpu_to_le16(conn->le.interval_max);
856 	cp->conn_latency = sys_cpu_to_le16(conn->le.latency);
857 	cp->supervision_timeout = sys_cpu_to_le16(conn->le.timeout);
858 
859 	bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags,
860 				  BT_DEV_INITIATING, true);
861 
862 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_CREATE_CONN, buf, NULL);
863 }
864 
bt_le_create_conn(const struct bt_conn * conn)865 int bt_le_create_conn(const struct bt_conn *conn)
866 {
867 	if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
868 	    BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
869 		return bt_le_create_conn_ext(conn);
870 	}
871 
872 	return bt_le_create_conn_legacy(conn);
873 }
874 
bt_le_create_conn_cancel(void)875 int bt_le_create_conn_cancel(void)
876 {
877 	struct net_buf *buf;
878 	struct bt_hci_cmd_state_set state;
879 
880 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_CREATE_CONN_CANCEL, 0);
881 
882 	bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags,
883 				  BT_DEV_INITIATING, false);
884 
885 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_CREATE_CONN_CANCEL, buf, NULL);
886 }
887 #endif /* CONFIG_BT_CENTRAL */
888 
bt_hci_disconnect(uint16_t handle,uint8_t reason)889 int bt_hci_disconnect(uint16_t handle, uint8_t reason)
890 {
891 	struct net_buf *buf;
892 	struct bt_hci_cp_disconnect *disconn;
893 
894 	buf = bt_hci_cmd_create(BT_HCI_OP_DISCONNECT, sizeof(*disconn));
895 	if (!buf) {
896 		return -ENOBUFS;
897 	}
898 
899 	disconn = net_buf_add(buf, sizeof(*disconn));
900 	disconn->handle = sys_cpu_to_le16(handle);
901 	disconn->reason = reason;
902 
903 	return bt_hci_cmd_send_sync(BT_HCI_OP_DISCONNECT, buf, NULL);
904 }
905 
906 static uint16_t disconnected_handles[CONFIG_BT_MAX_CONN];
907 static uint8_t disconnected_handles_reason[CONFIG_BT_MAX_CONN];
908 
disconnected_handles_reset(void)909 static void disconnected_handles_reset(void)
910 {
911 	(void)memset(disconnected_handles, 0, sizeof(disconnected_handles));
912 }
913 
conn_handle_disconnected(uint16_t handle,uint8_t disconnect_reason)914 static void conn_handle_disconnected(uint16_t handle, uint8_t disconnect_reason)
915 {
916 	for (int i = 0; i < ARRAY_SIZE(disconnected_handles); i++) {
917 		if (!disconnected_handles[i]) {
918 			/* Use invalid connection handle bits so that connection
919 			 * handle 0 can be used as a valid non-zero handle.
920 			 */
921 			disconnected_handles[i] = ~BT_ACL_HANDLE_MASK | handle;
922 			disconnected_handles_reason[i] = disconnect_reason;
923 		}
924 	}
925 }
926 
927 /** @returns the disconnect reason. */
conn_handle_is_disconnected(uint16_t handle)928 static uint8_t conn_handle_is_disconnected(uint16_t handle)
929 {
930 	handle |= ~BT_ACL_HANDLE_MASK;
931 
932 	for (int i = 0; i < ARRAY_SIZE(disconnected_handles); i++) {
933 		if (disconnected_handles[i] == handle) {
934 			disconnected_handles[i] = 0;
935 			return disconnected_handles_reason[i];
936 		}
937 	}
938 
939 	return 0;
940 }
941 
hci_disconn_complete_prio(struct net_buf * buf)942 static void hci_disconn_complete_prio(struct net_buf *buf)
943 {
944 	struct bt_hci_evt_disconn_complete *evt = (void *)buf->data;
945 	uint16_t handle = sys_le16_to_cpu(evt->handle);
946 	struct bt_conn *conn;
947 
948 	LOG_DBG("status 0x%02x %s handle %u reason 0x%02x",
949 		evt->status, bt_hci_err_to_str(evt->status), handle, evt->reason);
950 
951 	if (evt->status) {
952 		return;
953 	}
954 
955 	conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_ALL);
956 	if (!conn) {
957 		/* Priority disconnect complete event received before normal
958 		 * connection complete event.
959 		 */
960 		conn_handle_disconnected(handle, evt->reason);
961 		return;
962 	}
963 
964 	conn->err = evt->reason;
965 
966 	bt_conn_set_state(conn, BT_CONN_DISCONNECT_COMPLETE);
967 	bt_conn_unref(conn);
968 }
969 
hci_disconn_complete(struct net_buf * buf)970 static void hci_disconn_complete(struct net_buf *buf)
971 {
972 	struct bt_hci_evt_disconn_complete *evt = (void *)buf->data;
973 	uint16_t handle = sys_le16_to_cpu(evt->handle);
974 	struct bt_conn *conn;
975 
976 	LOG_DBG("status 0x%02x %s handle %u reason 0x%02x",
977 		evt->status, bt_hci_err_to_str(evt->status), handle, evt->reason);
978 
979 	if (evt->status) {
980 		return;
981 	}
982 
983 	conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_ALL);
984 	if (!conn) {
985 		LOG_ERR("Unable to look up conn with handle %u", handle);
986 		return;
987 	}
988 
989 	bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
990 
991 	if (conn->type != BT_CONN_TYPE_LE) {
992 #if defined(CONFIG_BT_CLASSIC)
993 		if (conn->type == BT_CONN_TYPE_SCO) {
994 			bt_sco_cleanup(conn);
995 			return;
996 		}
997 		/*
998 		 * If only for one connection session bond was set, clear keys
999 		 * database row for this connection.
1000 		 */
1001 		if (conn->type == BT_CONN_TYPE_BR &&
1002 		    atomic_test_and_clear_bit(conn->flags, BT_CONN_BR_NOBOND)) {
1003 			bt_keys_link_key_clear(conn->br.link_key);
1004 		}
1005 #endif
1006 		bt_conn_unref(conn);
1007 		return;
1008 	}
1009 
1010 #if defined(CONFIG_BT_CENTRAL) && !defined(CONFIG_BT_FILTER_ACCEPT_LIST)
1011 	if (atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT)) {
1012 		bt_conn_set_state(conn, BT_CONN_SCAN_BEFORE_INITIATING);
1013 		bt_le_scan_update(false);
1014 	}
1015 #endif /* defined(CONFIG_BT_CENTRAL) && !defined(CONFIG_BT_FILTER_ACCEPT_LIST) */
1016 
1017 	bt_conn_unref(conn);
1018 }
1019 
hci_le_read_remote_features(struct bt_conn * conn)1020 static int hci_le_read_remote_features(struct bt_conn *conn)
1021 {
1022 	struct bt_hci_cp_le_read_remote_features *cp;
1023 	struct net_buf *buf;
1024 
1025 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_READ_REMOTE_FEATURES,
1026 				sizeof(*cp));
1027 	if (!buf) {
1028 		return -ENOBUFS;
1029 	}
1030 
1031 	cp = net_buf_add(buf, sizeof(*cp));
1032 	cp->handle = sys_cpu_to_le16(conn->handle);
1033 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_REMOTE_FEATURES, buf, NULL);
1034 }
1035 
hci_read_remote_version(struct bt_conn * conn)1036 static int hci_read_remote_version(struct bt_conn *conn)
1037 {
1038 	struct bt_hci_cp_read_remote_version_info *cp;
1039 	struct net_buf *buf;
1040 
1041 	if (conn->state != BT_CONN_CONNECTED) {
1042 		return -ENOTCONN;
1043 	}
1044 
1045 	/* Remote version cannot change. */
1046 	if (atomic_test_bit(conn->flags, BT_CONN_AUTO_VERSION_INFO)) {
1047 		return 0;
1048 	}
1049 
1050 	buf = bt_hci_cmd_create(BT_HCI_OP_READ_REMOTE_VERSION_INFO,
1051 				sizeof(*cp));
1052 	if (!buf) {
1053 		return -ENOBUFS;
1054 	}
1055 
1056 	cp = net_buf_add(buf, sizeof(*cp));
1057 	cp->handle = sys_cpu_to_le16(conn->handle);
1058 
1059 	return bt_hci_cmd_send_sync(BT_HCI_OP_READ_REMOTE_VERSION_INFO, buf,
1060 				    NULL);
1061 }
1062 
1063 /* LE Data Length Change Event is optional so this function just ignore
1064  * error and stack will continue to use default values.
1065  */
bt_le_set_data_len(struct bt_conn * conn,uint16_t tx_octets,uint16_t tx_time)1066 int bt_le_set_data_len(struct bt_conn *conn, uint16_t tx_octets, uint16_t tx_time)
1067 {
1068 	struct bt_hci_cp_le_set_data_len *cp;
1069 	struct net_buf *buf;
1070 
1071 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_DATA_LEN, sizeof(*cp));
1072 	if (!buf) {
1073 		return -ENOBUFS;
1074 	}
1075 
1076 	cp = net_buf_add(buf, sizeof(*cp));
1077 	cp->handle = sys_cpu_to_le16(conn->handle);
1078 	cp->tx_octets = sys_cpu_to_le16(tx_octets);
1079 	cp->tx_time = sys_cpu_to_le16(tx_time);
1080 
1081 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_DATA_LEN, buf, NULL);
1082 }
1083 
1084 #if defined(CONFIG_BT_USER_PHY_UPDATE)
hci_le_read_phy(struct bt_conn * conn)1085 static int hci_le_read_phy(struct bt_conn *conn)
1086 {
1087 	struct bt_hci_cp_le_read_phy *cp;
1088 	struct bt_hci_rp_le_read_phy *rp;
1089 	struct net_buf *buf, *rsp;
1090 	int err;
1091 
1092 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_READ_PHY, sizeof(*cp));
1093 	if (!buf) {
1094 		return -ENOBUFS;
1095 	}
1096 
1097 	cp = net_buf_add(buf, sizeof(*cp));
1098 	cp->handle = sys_cpu_to_le16(conn->handle);
1099 
1100 	err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_PHY, buf, &rsp);
1101 	if (err) {
1102 		return err;
1103 	}
1104 
1105 	rp = (void *)rsp->data;
1106 	conn->le.phy.tx_phy = bt_get_phy(rp->tx_phy);
1107 	conn->le.phy.rx_phy = bt_get_phy(rp->rx_phy);
1108 	net_buf_unref(rsp);
1109 
1110 	return 0;
1111 }
1112 #endif /* defined(CONFIG_BT_USER_PHY_UPDATE) */
1113 
bt_le_set_phy(struct bt_conn * conn,uint8_t all_phys,uint8_t pref_tx_phy,uint8_t pref_rx_phy,uint8_t phy_opts)1114 int bt_le_set_phy(struct bt_conn *conn, uint8_t all_phys,
1115 		  uint8_t pref_tx_phy, uint8_t pref_rx_phy, uint8_t phy_opts)
1116 {
1117 	struct bt_hci_cp_le_set_phy *cp;
1118 	struct net_buf *buf;
1119 
1120 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_PHY, sizeof(*cp));
1121 	if (!buf) {
1122 		return -ENOBUFS;
1123 	}
1124 
1125 	cp = net_buf_add(buf, sizeof(*cp));
1126 	cp->handle = sys_cpu_to_le16(conn->handle);
1127 	cp->all_phys = all_phys;
1128 	cp->tx_phys = pref_tx_phy;
1129 	cp->rx_phys = pref_rx_phy;
1130 	cp->phy_opts = phy_opts;
1131 
1132 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_PHY, buf, NULL);
1133 }
1134 
find_pending_connect(uint8_t role,bt_addr_le_t * peer_addr)1135 static struct bt_conn *find_pending_connect(uint8_t role, bt_addr_le_t *peer_addr)
1136 {
1137 	struct bt_conn *conn;
1138 
1139 	/*
1140 	 * Make lookup to check if there's a connection object in
1141 	 * CONNECT or CONNECT_AUTO state associated with passed peer LE address.
1142 	 */
1143 	if (IS_ENABLED(CONFIG_BT_CENTRAL) && role == BT_HCI_ROLE_CENTRAL) {
1144 		conn = bt_conn_lookup_state_le(BT_ID_DEFAULT, peer_addr,
1145 					       BT_CONN_INITIATING);
1146 		if (IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST) && !conn) {
1147 			conn = bt_conn_lookup_state_le(BT_ID_DEFAULT,
1148 						       BT_ADDR_LE_NONE,
1149 						       BT_CONN_INITIATING_FILTER_LIST);
1150 		}
1151 
1152 		return conn;
1153 	}
1154 
1155 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && role == BT_HCI_ROLE_PERIPHERAL) {
1156 		conn = bt_conn_lookup_state_le(bt_dev.adv_conn_id, peer_addr,
1157 					       BT_CONN_ADV_DIR_CONNECTABLE);
1158 		if (!conn) {
1159 			conn = bt_conn_lookup_state_le(bt_dev.adv_conn_id,
1160 						       BT_ADDR_LE_NONE,
1161 						       BT_CONN_ADV_CONNECTABLE);
1162 		}
1163 
1164 		return conn;
1165 	}
1166 
1167 	return NULL;
1168 }
1169 
1170 /* We don't want the application to get a PHY update callback upon connection
1171  * establishment on 2M PHY. Therefore we must prevent issuing LE Set PHY
1172  * in this scenario.
1173  */
skip_auto_phy_update_on_conn_establishment(struct bt_conn * conn)1174 static bool skip_auto_phy_update_on_conn_establishment(struct bt_conn *conn)
1175 {
1176 #if defined(CONFIG_BT_USER_PHY_UPDATE)
1177 	if (IS_ENABLED(CONFIG_BT_AUTO_PHY_UPDATE) &&
1178 	    IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1179 	    BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
1180 		if (conn->le.phy.tx_phy == BT_HCI_LE_PHY_2M &&
1181 		    conn->le.phy.rx_phy == BT_HCI_LE_PHY_2M) {
1182 			return true;
1183 		}
1184 	}
1185 #else
1186 	ARG_UNUSED(conn);
1187 #endif /* defined(CONFIG_BT_USER_PHY_UPDATE) */
1188 
1189 	return false;
1190 }
1191 
conn_auto_initiate(struct bt_conn * conn)1192 static void conn_auto_initiate(struct bt_conn *conn)
1193 {
1194 	int err;
1195 
1196 	if (conn->state != BT_CONN_CONNECTED) {
1197 		/* It is possible that connection was disconnected directly from
1198 		 * connected callback so we must check state before doing
1199 		 * connection parameters update.
1200 		 */
1201 		return;
1202 	}
1203 
1204 	if (!atomic_test_bit(conn->flags, BT_CONN_AUTO_FEATURE_EXCH) &&
1205 	    ((conn->role == BT_HCI_ROLE_CENTRAL) ||
1206 	     BT_FEAT_LE_PER_INIT_FEAT_XCHG(bt_dev.le.features))) {
1207 		err = hci_le_read_remote_features(conn);
1208 		if (err) {
1209 			LOG_ERR("Failed read remote features (%d)", err);
1210 		}
1211 	}
1212 
1213 	if (IS_ENABLED(CONFIG_BT_REMOTE_VERSION) &&
1214 	    !atomic_test_bit(conn->flags, BT_CONN_AUTO_VERSION_INFO)) {
1215 		err = hci_read_remote_version(conn);
1216 		if (err) {
1217 			LOG_ERR("Failed read remote version (%d)", err);
1218 		}
1219 	}
1220 
1221 	if (IS_ENABLED(CONFIG_BT_AUTO_PHY_UPDATE) &&
1222 	    BT_FEAT_LE_PHY_2M(bt_dev.le.features) &&
1223 	    !skip_auto_phy_update_on_conn_establishment(conn)) {
1224 		err = bt_le_set_phy(conn, 0U, BT_HCI_LE_PHY_PREFER_2M,
1225 				    BT_HCI_LE_PHY_PREFER_2M,
1226 				    BT_HCI_LE_PHY_CODED_ANY);
1227 		if (err) {
1228 			LOG_ERR("Failed LE Set PHY (%d)", err);
1229 		}
1230 	}
1231 
1232 	if (IS_ENABLED(CONFIG_BT_AUTO_DATA_LEN_UPDATE) &&
1233 	    BT_FEAT_LE_DLE(bt_dev.le.features)) {
1234 		if (drv_quirk_no_auto_dle()) {
1235 			uint16_t tx_octets, tx_time;
1236 
1237 			err = hci_le_read_max_data_len(&tx_octets, &tx_time);
1238 			if (!err) {
1239 				err = bt_le_set_data_len(conn,
1240 						tx_octets, tx_time);
1241 				if (err) {
1242 					LOG_ERR("Failed to set data len (%d)", err);
1243 				}
1244 			}
1245 		} else {
1246 			/* No need to auto-initiate DLE procedure.
1247 			 * It is done by the controller.
1248 			 */
1249 		}
1250 	}
1251 }
1252 
le_conn_complete_cancel(uint8_t err)1253 static void le_conn_complete_cancel(uint8_t err)
1254 {
1255 	int ret;
1256 	struct bt_conn *conn;
1257 
1258 	/* Handle create connection cancel.
1259 	 *
1260 	 * There is no need to check ID address as only one
1261 	 * connection in central role can be in pending state.
1262 	 */
1263 	conn = find_pending_connect(BT_HCI_ROLE_CENTRAL, NULL);
1264 	if (!conn) {
1265 		LOG_ERR("No pending central connection");
1266 		return;
1267 	}
1268 
1269 	if (atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT)) {
1270 		if (!IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST)) {
1271 			/* Restart passive scanner for device */
1272 			bt_conn_set_state(conn, BT_CONN_SCAN_BEFORE_INITIATING);
1273 		} else {
1274 			/* Restart FAL initiator after RPA timeout. */
1275 			ret = bt_le_create_conn(conn);
1276 			if (ret) {
1277 				LOG_ERR("Failed to restart initiator");
1278 			}
1279 		}
1280 	} else {
1281 		int busy_status = k_work_delayable_busy_get(&conn->deferred_work);
1282 
1283 		if (!(busy_status & (K_WORK_QUEUED | K_WORK_DELAYED))) {
1284 			LOG_WRN("Connection creation timeout triggered");
1285 			conn->err = err;
1286 			bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
1287 		} else {
1288 			/* Restart initiator after RPA timeout. */
1289 			ret = bt_le_create_conn(conn);
1290 			if (ret) {
1291 				LOG_ERR("Failed to restart initiator");
1292 			}
1293 		}
1294 	}
1295 
1296 	bt_conn_unref(conn);
1297 }
1298 
le_conn_complete_adv_timeout(void)1299 static void le_conn_complete_adv_timeout(void)
1300 {
1301 	if (!(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1302 	      BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
1303 		struct bt_le_ext_adv *adv = bt_le_adv_lookup_legacy();
1304 		struct bt_conn *conn;
1305 
1306 		/* Handle advertising timeout after high duty cycle directed
1307 		 * advertising.
1308 		 */
1309 
1310 		atomic_clear_bit(adv->flags, BT_ADV_ENABLED);
1311 
1312 		if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1313 		    !BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
1314 			/* No advertising set terminated event, must be a
1315 			 * legacy advertiser set.
1316 			 */
1317 			bt_le_adv_delete_legacy();
1318 		}
1319 
1320 		/* There is no need to check ID address as only one
1321 		 * connection in peripheral role can be in pending state.
1322 		 */
1323 		conn = find_pending_connect(BT_HCI_ROLE_PERIPHERAL, NULL);
1324 		if (!conn) {
1325 			LOG_ERR("No pending peripheral connection");
1326 			return;
1327 		}
1328 
1329 		conn->err = BT_HCI_ERR_ADV_TIMEOUT;
1330 		bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
1331 
1332 		bt_conn_unref(conn);
1333 	}
1334 }
1335 
enh_conn_complete(struct bt_hci_evt_le_enh_conn_complete * evt)1336 static void enh_conn_complete(struct bt_hci_evt_le_enh_conn_complete *evt)
1337 {
1338 #if defined(CONFIG_BT_CONN) && (CONFIG_BT_EXT_ADV_MAX_ADV_SET > 1)
1339 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
1340 		evt->role == BT_HCI_ROLE_PERIPHERAL &&
1341 		evt->status == BT_HCI_ERR_SUCCESS &&
1342 		(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1343 				BT_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
1344 
1345 		/* Cache the connection complete event. Process it later.
1346 		 * See bt_dev.cached_conn_complete.
1347 		 */
1348 		for (int i = 0; i < ARRAY_SIZE(bt_dev.cached_conn_complete); i++) {
1349 			if (!bt_dev.cached_conn_complete[i].valid) {
1350 				(void)memcpy(&bt_dev.cached_conn_complete[i].evt,
1351 					evt,
1352 					sizeof(struct bt_hci_evt_le_enh_conn_complete));
1353 				bt_dev.cached_conn_complete[i].valid = true;
1354 				return;
1355 			}
1356 		}
1357 
1358 		__ASSERT(false, "No more cache entries available."
1359 				"This should not happen by design");
1360 
1361 		return;
1362 	}
1363 #endif
1364 	bt_hci_le_enh_conn_complete(evt);
1365 }
1366 
translate_addrs(bt_addr_le_t * peer_addr,bt_addr_le_t * id_addr,const struct bt_hci_evt_le_enh_conn_complete * evt,uint8_t id)1367 static void translate_addrs(bt_addr_le_t *peer_addr, bt_addr_le_t *id_addr,
1368 			    const struct bt_hci_evt_le_enh_conn_complete *evt, uint8_t id)
1369 {
1370 	if (bt_addr_le_is_resolved(&evt->peer_addr)) {
1371 		bt_addr_le_copy_resolved(id_addr, &evt->peer_addr);
1372 
1373 		bt_addr_copy(&peer_addr->a, &evt->peer_rpa);
1374 		peer_addr->type = BT_ADDR_LE_RANDOM;
1375 	} else {
1376 		bt_addr_le_copy(id_addr, bt_lookup_id_addr(id, &evt->peer_addr));
1377 		bt_addr_le_copy(peer_addr, &evt->peer_addr);
1378 	}
1379 }
1380 
update_conn(struct bt_conn * conn,const bt_addr_le_t * id_addr,const struct bt_hci_evt_le_enh_conn_complete * evt)1381 static void update_conn(struct bt_conn *conn, const bt_addr_le_t *id_addr,
1382 			const struct bt_hci_evt_le_enh_conn_complete *evt)
1383 {
1384 	conn->handle = sys_le16_to_cpu(evt->handle);
1385 	bt_addr_le_copy(&conn->le.dst, id_addr);
1386 	conn->le.interval = sys_le16_to_cpu(evt->interval);
1387 	conn->le.latency = sys_le16_to_cpu(evt->latency);
1388 	conn->le.timeout = sys_le16_to_cpu(evt->supv_timeout);
1389 	conn->role = evt->role;
1390 	conn->err = 0U;
1391 
1392 #if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
1393 	conn->le.data_len.tx_max_len = BT_GAP_DATA_LEN_DEFAULT;
1394 	conn->le.data_len.tx_max_time = BT_GAP_DATA_TIME_DEFAULT;
1395 	conn->le.data_len.rx_max_len = BT_GAP_DATA_LEN_DEFAULT;
1396 	conn->le.data_len.rx_max_time = BT_GAP_DATA_TIME_DEFAULT;
1397 #endif
1398 }
1399 
bt_hci_le_enh_conn_complete(struct bt_hci_evt_le_enh_conn_complete * evt)1400 void bt_hci_le_enh_conn_complete(struct bt_hci_evt_le_enh_conn_complete *evt)
1401 {
1402 	__ASSERT_NO_MSG(evt->status == BT_HCI_ERR_SUCCESS);
1403 
1404 	uint16_t handle = sys_le16_to_cpu(evt->handle);
1405 	uint8_t disconnect_reason = conn_handle_is_disconnected(handle);
1406 	bt_addr_le_t peer_addr, id_addr;
1407 	struct bt_conn *conn;
1408 	uint8_t id;
1409 
1410 	LOG_DBG("status 0x%02x %s handle %u role %u peer %s peer RPA %s",
1411 		evt->status, bt_hci_err_to_str(evt->status), handle,
1412 		evt->role, bt_addr_le_str(&evt->peer_addr), bt_addr_str(&evt->peer_rpa));
1413 	LOG_DBG("local RPA %s", bt_addr_str(&evt->local_rpa));
1414 
1415 #if defined(CONFIG_BT_SMP)
1416 	bt_id_pending_keys_update();
1417 #endif
1418 
1419 	id = evt->role == BT_HCI_ROLE_PERIPHERAL ? bt_dev.adv_conn_id : BT_ID_DEFAULT;
1420 	translate_addrs(&peer_addr, &id_addr, evt, id);
1421 
1422 	conn = find_pending_connect(evt->role, &id_addr);
1423 
1424 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
1425 	    evt->role == BT_HCI_ROLE_PERIPHERAL &&
1426 	    !(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1427 	      BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
1428 		struct bt_le_ext_adv *adv = bt_le_adv_lookup_legacy();
1429 		/* Clear advertising even if we are not able to add connection
1430 		 * object to keep host in sync with controller state.
1431 		 */
1432 		atomic_clear_bit(adv->flags, BT_ADV_ENABLED);
1433 		(void)bt_le_lim_adv_cancel_timeout(adv);
1434 	}
1435 
1436 	if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1437 	    evt->role == BT_HCI_ROLE_CENTRAL) {
1438 		/* Clear initiating even if we are not able to add connection
1439 		 * object to keep the host in sync with controller state.
1440 		 */
1441 		atomic_clear_bit(bt_dev.flags, BT_DEV_INITIATING);
1442 	}
1443 
1444 	if (!conn) {
1445 		LOG_ERR("No pending conn for peer %s", bt_addr_le_str(&evt->peer_addr));
1446 		bt_hci_disconnect(handle, BT_HCI_ERR_REMOTE_USER_TERM_CONN);
1447 		return;
1448 	}
1449 
1450 	update_conn(conn, &id_addr, evt);
1451 
1452 #if defined(CONFIG_BT_USER_PHY_UPDATE)
1453 	conn->le.phy.tx_phy = BT_GAP_LE_PHY_1M;
1454 	conn->le.phy.rx_phy = BT_GAP_LE_PHY_1M;
1455 #endif
1456 	/*
1457 	 * Use connection address (instead of identity address) as initiator
1458 	 * or responder address. Only peripheral needs to be updated. For central all
1459 	 * was set during outgoing connection creation.
1460 	 */
1461 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
1462 	    conn->role == BT_HCI_ROLE_PERIPHERAL) {
1463 		bt_addr_le_copy(&conn->le.init_addr, &peer_addr);
1464 
1465 		if (!(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1466 		      BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
1467 			struct bt_le_ext_adv *adv = bt_le_adv_lookup_legacy();
1468 
1469 			if (IS_ENABLED(CONFIG_BT_PRIVACY) &&
1470 			    !atomic_test_bit(adv->flags, BT_ADV_USE_IDENTITY)) {
1471 				conn->le.resp_addr.type = BT_ADDR_LE_RANDOM;
1472 				if (!bt_addr_eq(&evt->local_rpa, BT_ADDR_ANY)) {
1473 					bt_addr_copy(&conn->le.resp_addr.a,
1474 						     &evt->local_rpa);
1475 				} else {
1476 					bt_addr_copy(&conn->le.resp_addr.a,
1477 						     &bt_dev.random_addr.a);
1478 				}
1479 			} else {
1480 				bt_addr_le_copy(&conn->le.resp_addr,
1481 						&bt_dev.id_addr[conn->id]);
1482 			}
1483 		} else {
1484 			/* Copy the local RPA and handle this in advertising set
1485 			 * terminated event.
1486 			 */
1487 			bt_addr_copy(&conn->le.resp_addr.a, &evt->local_rpa);
1488 		}
1489 
1490 		/* if the controller supports, lets advertise for another
1491 		 * peripheral connection.
1492 		 * check for connectable advertising state is sufficient as
1493 		 * this is how this le connection complete for peripheral occurred.
1494 		 */
1495 		if (BT_LE_STATES_PER_CONN_ADV(bt_dev.le.states)) {
1496 			bt_le_adv_resume();
1497 		}
1498 
1499 		if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1500 		    !BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
1501 			struct bt_le_ext_adv *adv = bt_le_adv_lookup_legacy();
1502 			/* No advertising set terminated event, must be a
1503 			 * legacy advertiser set.
1504 			 */
1505 			if (!atomic_test_bit(adv->flags, BT_ADV_PERSIST)) {
1506 				bt_le_adv_delete_legacy();
1507 			}
1508 		}
1509 	}
1510 
1511 	if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1512 	    conn->role == BT_HCI_ROLE_CENTRAL) {
1513 		bt_addr_le_copy(&conn->le.resp_addr, &peer_addr);
1514 
1515 		if (IS_ENABLED(CONFIG_BT_PRIVACY)) {
1516 			conn->le.init_addr.type = BT_ADDR_LE_RANDOM;
1517 			if (!bt_addr_eq(&evt->local_rpa, BT_ADDR_ANY)) {
1518 				bt_addr_copy(&conn->le.init_addr.a,
1519 					     &evt->local_rpa);
1520 			} else {
1521 				bt_addr_copy(&conn->le.init_addr.a,
1522 					     &bt_dev.random_addr.a);
1523 			}
1524 		} else {
1525 			bt_addr_le_copy(&conn->le.init_addr,
1526 					&bt_dev.id_addr[conn->id]);
1527 		}
1528 	}
1529 
1530 #if defined(CONFIG_BT_USER_PHY_UPDATE)
1531 	if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1532 	    BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
1533 		int err;
1534 
1535 		err = hci_le_read_phy(conn);
1536 		if (err) {
1537 			LOG_WRN("Failed to read PHY (%d)", err);
1538 		}
1539 	}
1540 #endif /* defined(CONFIG_BT_USER_PHY_UPDATE) */
1541 
1542 	bt_conn_set_state(conn, BT_CONN_CONNECTED);
1543 
1544 	if (disconnect_reason) {
1545 		/* Mark the connection as already disconnected before calling
1546 		 * the connected callback, so that the application cannot
1547 		 * start sending packets
1548 		 */
1549 		conn->err = disconnect_reason;
1550 		bt_conn_set_state(conn, BT_CONN_DISCONNECT_COMPLETE);
1551 	}
1552 
1553 	bt_conn_connected(conn);
1554 
1555 	/* Start auto-initiated procedures */
1556 	conn_auto_initiate(conn);
1557 
1558 	bt_conn_unref(conn);
1559 
1560 	if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1561 	    conn->role == BT_HCI_ROLE_CENTRAL) {
1562 		bt_le_scan_update(false);
1563 	}
1564 }
1565 
1566 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
bt_hci_le_enh_conn_complete_sync(struct bt_hci_evt_le_enh_conn_complete_v2 * evt,struct bt_le_per_adv_sync * sync)1567 void bt_hci_le_enh_conn_complete_sync(struct bt_hci_evt_le_enh_conn_complete_v2 *evt,
1568 				      struct bt_le_per_adv_sync *sync)
1569 {
1570 	__ASSERT_NO_MSG(evt->status == BT_HCI_ERR_SUCCESS);
1571 
1572 	uint16_t handle = sys_le16_to_cpu(evt->handle);
1573 	uint8_t disconnect_reason = conn_handle_is_disconnected(handle);
1574 	bt_addr_le_t peer_addr, id_addr;
1575 	struct bt_conn *conn;
1576 
1577 	if (!sync->num_subevents) {
1578 		LOG_ERR("Unexpected connection complete event");
1579 
1580 		return;
1581 	}
1582 
1583 	conn = bt_conn_add_le(BT_ID_DEFAULT, BT_ADDR_LE_ANY);
1584 	if (!conn) {
1585 		LOG_ERR("Unable to allocate connection");
1586 		/* Tell the controller to disconnect to keep it in sync with
1587 		 * the host state and avoid a "rogue" connection.
1588 		 */
1589 		bt_hci_disconnect(handle, BT_HCI_ERR_REMOTE_USER_TERM_CONN);
1590 
1591 		return;
1592 	}
1593 
1594 	LOG_DBG("status 0x%02x %s handle %u role %u peer %s peer RPA %s",
1595 		evt->status, bt_hci_err_to_str(evt->status), handle,
1596 		evt->role, bt_addr_le_str(&evt->peer_addr), bt_addr_str(&evt->peer_rpa));
1597 	LOG_DBG("local RPA %s", bt_addr_str(&evt->local_rpa));
1598 
1599 	if (evt->role != BT_HCI_ROLE_PERIPHERAL) {
1600 		LOG_ERR("PAwR sync always becomes peripheral");
1601 
1602 		return;
1603 	}
1604 
1605 #if defined(CONFIG_BT_SMP)
1606 	bt_id_pending_keys_update();
1607 #endif
1608 
1609 	translate_addrs(&peer_addr, &id_addr, (const struct bt_hci_evt_le_enh_conn_complete *)evt,
1610 			BT_ID_DEFAULT);
1611 	update_conn(conn, &id_addr, (const struct bt_hci_evt_le_enh_conn_complete *)evt);
1612 
1613 #if defined(CONFIG_BT_USER_PHY_UPDATE)
1614 	/* The connection is always initiated on the same phy as the PAwR advertiser */
1615 	conn->le.phy.tx_phy = sync->phy;
1616 	conn->le.phy.rx_phy = sync->phy;
1617 #endif
1618 
1619 	bt_addr_le_copy(&conn->le.init_addr, &peer_addr);
1620 
1621 	if (IS_ENABLED(CONFIG_BT_PRIVACY)) {
1622 		conn->le.resp_addr.type = BT_ADDR_LE_RANDOM;
1623 		bt_addr_copy(&conn->le.resp_addr.a, &evt->local_rpa);
1624 	} else {
1625 		bt_addr_le_copy(&conn->le.resp_addr, &bt_dev.id_addr[conn->id]);
1626 	}
1627 
1628 	bt_conn_set_state(conn, BT_CONN_CONNECTED);
1629 
1630 	if (disconnect_reason) {
1631 		/* Mark the connection as already disconnected before calling
1632 		 * the connected callback, so that the application cannot
1633 		 * start sending packets
1634 		 */
1635 		conn->err = disconnect_reason;
1636 		bt_conn_set_state(conn, BT_CONN_DISCONNECT_COMPLETE);
1637 	}
1638 
1639 	bt_conn_connected(conn);
1640 
1641 	/* Since we don't give the application a reference to manage
1642 	 * for peripheral connections, we need to release this reference here.
1643 	 */
1644 	bt_conn_unref(conn);
1645 
1646 	/* Start auto-initiated procedures */
1647 	conn_auto_initiate(conn);
1648 }
1649 #endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
1650 
enh_conn_complete_error_handle(uint8_t status)1651 static void enh_conn_complete_error_handle(uint8_t status)
1652 {
1653 	if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && status == BT_HCI_ERR_ADV_TIMEOUT) {
1654 		le_conn_complete_adv_timeout();
1655 		return;
1656 	}
1657 
1658 	if (IS_ENABLED(CONFIG_BT_CENTRAL) && status == BT_HCI_ERR_UNKNOWN_CONN_ID) {
1659 		le_conn_complete_cancel(status);
1660 		bt_le_scan_update(false);
1661 		return;
1662 	}
1663 
1664 	if (IS_ENABLED(CONFIG_BT_CENTRAL) && IS_ENABLED(CONFIG_BT_PER_ADV_RSP) &&
1665 	    status == BT_HCI_ERR_CONN_FAIL_TO_ESTAB) {
1666 		le_conn_complete_cancel(status);
1667 
1668 		atomic_clear_bit(bt_dev.flags, BT_DEV_INITIATING);
1669 
1670 		return;
1671 	}
1672 
1673 	LOG_WRN("Unexpected status 0x%02x %s", status, bt_hci_err_to_str(status));
1674 }
1675 
le_enh_conn_complete(struct net_buf * buf)1676 static void le_enh_conn_complete(struct net_buf *buf)
1677 {
1678 	struct bt_hci_evt_le_enh_conn_complete *evt =
1679 		(struct bt_hci_evt_le_enh_conn_complete *)buf->data;
1680 
1681 	if (evt->status != BT_HCI_ERR_SUCCESS) {
1682 		enh_conn_complete_error_handle(evt->status);
1683 		return;
1684 	}
1685 
1686 	enh_conn_complete(evt);
1687 }
1688 
1689 #if defined(CONFIG_BT_PER_ADV_RSP) || defined(CONFIG_BT_PER_ADV_SYNC_RSP)
le_enh_conn_complete_v2(struct net_buf * buf)1690 static void le_enh_conn_complete_v2(struct net_buf *buf)
1691 {
1692 	struct bt_hci_evt_le_enh_conn_complete_v2 *evt =
1693 		(struct bt_hci_evt_le_enh_conn_complete_v2 *)buf->data;
1694 
1695 	if (evt->status != BT_HCI_ERR_SUCCESS) {
1696 		enh_conn_complete_error_handle(evt->status);
1697 		return;
1698 	}
1699 
1700 	if (evt->adv_handle == BT_HCI_ADV_HANDLE_INVALID &&
1701 	    evt->sync_handle == BT_HCI_SYNC_HANDLE_INVALID) {
1702 		/* The connection was not created via PAwR, handle the event like v1 */
1703 		enh_conn_complete((struct bt_hci_evt_le_enh_conn_complete *)evt);
1704 	}
1705 #if defined(CONFIG_BT_PER_ADV_RSP)
1706 	else if (evt->adv_handle != BT_HCI_ADV_HANDLE_INVALID &&
1707 		 evt->sync_handle == BT_HCI_SYNC_HANDLE_INVALID) {
1708 		/* The connection was created via PAwR advertiser, it can be handled like v1 */
1709 		enh_conn_complete((struct bt_hci_evt_le_enh_conn_complete *)evt);
1710 	}
1711 #endif /* CONFIG_BT_PER_ADV_RSP */
1712 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
1713 	else if (evt->adv_handle == BT_HCI_ADV_HANDLE_INVALID &&
1714 		 evt->sync_handle != BT_HCI_SYNC_HANDLE_INVALID) {
1715 		/* Created via PAwR sync, no adv set terminated event, needs separate handling */
1716 		struct bt_le_per_adv_sync *sync;
1717 
1718 		sync = bt_hci_per_adv_sync_lookup_handle(evt->sync_handle);
1719 		if (!sync) {
1720 			LOG_ERR("Unknown sync handle %d", evt->sync_handle);
1721 
1722 			return;
1723 		}
1724 
1725 		bt_hci_le_enh_conn_complete_sync(evt, sync);
1726 	}
1727 #endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
1728 	else {
1729 		LOG_ERR("Invalid connection complete event");
1730 	}
1731 }
1732 #endif /* CONFIG_BT_PER_ADV_RSP || CONFIG_BT_PER_ADV_SYNC_RSP */
1733 
le_legacy_conn_complete(struct net_buf * buf)1734 static void le_legacy_conn_complete(struct net_buf *buf)
1735 {
1736 	struct bt_hci_evt_le_conn_complete *evt = (void *)buf->data;
1737 	struct bt_hci_evt_le_enh_conn_complete enh;
1738 
1739 	if (evt->status != BT_HCI_ERR_SUCCESS) {
1740 		enh_conn_complete_error_handle(evt->status);
1741 		return;
1742 	}
1743 
1744 	LOG_DBG("status 0x%02x %s role %u %s",
1745 		evt->status, bt_hci_err_to_str(evt->status), evt->role,
1746 		bt_addr_le_str(&evt->peer_addr));
1747 
1748 	enh.status         = evt->status;
1749 	enh.handle         = evt->handle;
1750 	enh.role           = evt->role;
1751 	enh.interval       = evt->interval;
1752 	enh.latency        = evt->latency;
1753 	enh.supv_timeout   = evt->supv_timeout;
1754 	enh.clock_accuracy = evt->clock_accuracy;
1755 
1756 	bt_addr_le_copy(&enh.peer_addr, &evt->peer_addr);
1757 
1758 	if (IS_ENABLED(CONFIG_BT_PRIVACY)) {
1759 		bt_addr_copy(&enh.local_rpa, &bt_dev.random_addr.a);
1760 	} else {
1761 		bt_addr_copy(&enh.local_rpa, BT_ADDR_ANY);
1762 	}
1763 
1764 	bt_addr_copy(&enh.peer_rpa, BT_ADDR_ANY);
1765 
1766 	enh_conn_complete(&enh);
1767 }
1768 
le_remote_feat_complete(struct net_buf * buf)1769 static void le_remote_feat_complete(struct net_buf *buf)
1770 {
1771 	struct bt_hci_evt_le_remote_feat_complete *evt = (void *)buf->data;
1772 	uint16_t handle = sys_le16_to_cpu(evt->handle);
1773 	struct bt_conn *conn;
1774 
1775 	conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_LE);
1776 	if (!conn) {
1777 		LOG_ERR("Unable to lookup conn for handle %u", handle);
1778 		return;
1779 	}
1780 
1781 	if (!evt->status) {
1782 		memcpy(conn->le.features, evt->features,
1783 		       sizeof(conn->le.features));
1784 	}
1785 
1786 	atomic_set_bit(conn->flags, BT_CONN_AUTO_FEATURE_EXCH);
1787 
1788 	if (IS_ENABLED(CONFIG_BT_REMOTE_INFO) &&
1789 	    !IS_ENABLED(CONFIG_BT_REMOTE_VERSION)) {
1790 		notify_remote_info(conn);
1791 	}
1792 
1793 	bt_conn_unref(conn);
1794 }
1795 
1796 #if defined(CONFIG_BT_DATA_LEN_UPDATE)
le_data_len_change(struct net_buf * buf)1797 static void le_data_len_change(struct net_buf *buf)
1798 {
1799 	struct bt_hci_evt_le_data_len_change *evt = (void *)buf->data;
1800 	uint16_t handle = sys_le16_to_cpu(evt->handle);
1801 	struct bt_conn *conn;
1802 
1803 	conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_LE);
1804 	if (!conn) {
1805 		LOG_ERR("Unable to lookup conn for handle %u", handle);
1806 		return;
1807 	}
1808 
1809 #if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
1810 	uint16_t max_tx_octets = sys_le16_to_cpu(evt->max_tx_octets);
1811 	uint16_t max_rx_octets = sys_le16_to_cpu(evt->max_rx_octets);
1812 	uint16_t max_tx_time = sys_le16_to_cpu(evt->max_tx_time);
1813 	uint16_t max_rx_time = sys_le16_to_cpu(evt->max_rx_time);
1814 
1815 	if (!IN_RANGE(max_tx_octets, BT_HCI_LE_MAX_TX_OCTETS_MIN, BT_HCI_LE_MAX_TX_OCTETS_MAX)) {
1816 		LOG_WRN("max_tx_octets exceeds the valid range %u", max_tx_octets);
1817 	}
1818 	if (!IN_RANGE(max_rx_octets, BT_HCI_LE_MAX_RX_OCTETS_MIN, BT_HCI_LE_MAX_RX_OCTETS_MAX)) {
1819 		LOG_WRN("max_rx_octets exceeds the valid range %u", max_rx_octets);
1820 	}
1821 	if (!IN_RANGE(max_tx_time, BT_HCI_LE_MAX_TX_TIME_MIN, BT_HCI_LE_MAX_TX_TIME_MAX)) {
1822 		LOG_WRN("max_tx_time exceeds the valid range %u", max_tx_time);
1823 	}
1824 	if (!IN_RANGE(max_rx_time, BT_HCI_LE_MAX_RX_TIME_MIN, BT_HCI_LE_MAX_RX_TIME_MAX)) {
1825 		LOG_WRN("max_rx_time exceeds the valid range %u", max_rx_time);
1826 	}
1827 
1828 	LOG_DBG("max. tx: %u (%uus), max. rx: %u (%uus)", max_tx_octets, max_tx_time, max_rx_octets,
1829 		max_rx_time);
1830 
1831 	conn->le.data_len.tx_max_len = max_tx_octets;
1832 	conn->le.data_len.tx_max_time = max_tx_time;
1833 	conn->le.data_len.rx_max_len = max_rx_octets;
1834 	conn->le.data_len.rx_max_time = max_rx_time;
1835 	notify_le_data_len_updated(conn);
1836 #endif
1837 
1838 	bt_conn_unref(conn);
1839 }
1840 #endif /* CONFIG_BT_DATA_LEN_UPDATE */
1841 
1842 #if defined(CONFIG_BT_PHY_UPDATE)
le_phy_update_complete(struct net_buf * buf)1843 static void le_phy_update_complete(struct net_buf *buf)
1844 {
1845 	struct bt_hci_evt_le_phy_update_complete *evt = (void *)buf->data;
1846 	uint16_t handle = sys_le16_to_cpu(evt->handle);
1847 	struct bt_conn *conn;
1848 
1849 	conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_LE);
1850 	if (!conn) {
1851 		LOG_ERR("Unable to lookup conn for handle %u", handle);
1852 		return;
1853 	}
1854 
1855 	LOG_DBG("PHY updated: status: 0x%02x %s, tx: %u, rx: %u",
1856 		evt->status, bt_hci_err_to_str(evt->status), evt->tx_phy,
1857 		evt->rx_phy);
1858 
1859 #if defined(CONFIG_BT_USER_PHY_UPDATE)
1860 	conn->le.phy.tx_phy = bt_get_phy(evt->tx_phy);
1861 	conn->le.phy.rx_phy = bt_get_phy(evt->rx_phy);
1862 	notify_le_phy_updated(conn);
1863 #endif
1864 
1865 	bt_conn_unref(conn);
1866 }
1867 #endif /* CONFIG_BT_PHY_UPDATE */
1868 
bt_le_conn_params_valid(const struct bt_le_conn_param * param)1869 bool bt_le_conn_params_valid(const struct bt_le_conn_param *param)
1870 {
1871 	if (IS_ENABLED(CONFIG_BT_CONN_PARAM_ANY)) {
1872 		return true;
1873 	}
1874 
1875 	/* All limits according to BT Core spec 5.0 [Vol 2, Part E, 7.8.12] */
1876 
1877 	if (param->interval_min > param->interval_max ||
1878 	    param->interval_min < 6 || param->interval_max > 3200) {
1879 		return false;
1880 	}
1881 
1882 	if (param->latency > 499) {
1883 		return false;
1884 	}
1885 
1886 	if (param->timeout < 10 || param->timeout > 3200 ||
1887 	    ((param->timeout * 4U) <=
1888 	     ((1U + param->latency) * param->interval_max))) {
1889 		return false;
1890 	}
1891 
1892 	return true;
1893 }
1894 
le_conn_param_neg_reply(uint16_t handle,uint8_t reason)1895 static void le_conn_param_neg_reply(uint16_t handle, uint8_t reason)
1896 {
1897 	struct bt_hci_cp_le_conn_param_req_neg_reply *cp;
1898 	struct net_buf *buf;
1899 
1900 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY,
1901 				sizeof(*cp));
1902 	if (!buf) {
1903 		LOG_ERR("Unable to allocate buffer");
1904 		return;
1905 	}
1906 
1907 	cp = net_buf_add(buf, sizeof(*cp));
1908 	cp->handle = sys_cpu_to_le16(handle);
1909 	cp->reason = sys_cpu_to_le16(reason);
1910 
1911 	bt_hci_cmd_send(BT_HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, buf);
1912 }
1913 
le_conn_param_req_reply(uint16_t handle,const struct bt_le_conn_param * param)1914 static int le_conn_param_req_reply(uint16_t handle,
1915 				   const struct bt_le_conn_param *param)
1916 {
1917 	struct bt_hci_cp_le_conn_param_req_reply *cp;
1918 	struct net_buf *buf;
1919 
1920 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(*cp));
1921 	if (!buf) {
1922 		return -ENOBUFS;
1923 	}
1924 
1925 	cp = net_buf_add(buf, sizeof(*cp));
1926 	(void)memset(cp, 0, sizeof(*cp));
1927 
1928 	cp->handle = sys_cpu_to_le16(handle);
1929 	cp->interval_min = sys_cpu_to_le16(param->interval_min);
1930 	cp->interval_max = sys_cpu_to_le16(param->interval_max);
1931 	cp->latency = sys_cpu_to_le16(param->latency);
1932 	cp->timeout = sys_cpu_to_le16(param->timeout);
1933 
1934 	return bt_hci_cmd_send(BT_HCI_OP_LE_CONN_PARAM_REQ_REPLY, buf);
1935 }
1936 
le_conn_param_req(struct net_buf * buf)1937 static void le_conn_param_req(struct net_buf *buf)
1938 {
1939 	struct bt_hci_evt_le_conn_param_req *evt = (void *)buf->data;
1940 	struct bt_le_conn_param param;
1941 	struct bt_conn *conn;
1942 	uint16_t handle;
1943 
1944 	handle = sys_le16_to_cpu(evt->handle);
1945 	param.interval_min = sys_le16_to_cpu(evt->interval_min);
1946 	param.interval_max = sys_le16_to_cpu(evt->interval_max);
1947 	param.latency = sys_le16_to_cpu(evt->latency);
1948 	param.timeout = sys_le16_to_cpu(evt->timeout);
1949 
1950 	conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_LE);
1951 	if (!conn) {
1952 		LOG_ERR("Unable to lookup conn for handle %u", handle);
1953 		le_conn_param_neg_reply(handle, BT_HCI_ERR_UNKNOWN_CONN_ID);
1954 		return;
1955 	}
1956 
1957 	if (!le_param_req(conn, &param)) {
1958 		le_conn_param_neg_reply(handle, BT_HCI_ERR_INVALID_LL_PARAM);
1959 	} else {
1960 		le_conn_param_req_reply(handle, &param);
1961 	}
1962 
1963 	bt_conn_unref(conn);
1964 }
1965 
le_conn_update_complete(struct net_buf * buf)1966 static void le_conn_update_complete(struct net_buf *buf)
1967 {
1968 	struct bt_hci_evt_le_conn_update_complete *evt = (void *)buf->data;
1969 	struct bt_conn *conn;
1970 	uint16_t handle;
1971 
1972 	handle = sys_le16_to_cpu(evt->handle);
1973 
1974 	LOG_DBG("status 0x%02x %s, handle %u",
1975 		evt->status, bt_hci_err_to_str(evt->status), handle);
1976 
1977 	conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_LE);
1978 	if (!conn) {
1979 		LOG_ERR("Unable to lookup conn for handle %u", handle);
1980 		return;
1981 	}
1982 
1983 	if (evt->status == BT_HCI_ERR_UNSUPP_REMOTE_FEATURE &&
1984 	    conn->role == BT_HCI_ROLE_PERIPHERAL &&
1985 	    !atomic_test_and_set_bit(conn->flags,
1986 				     BT_CONN_PERIPHERAL_PARAM_L2CAP)) {
1987 		/* CPR not supported, let's try L2CAP CPUP instead */
1988 		struct bt_le_conn_param param;
1989 
1990 		param.interval_min = conn->le.interval_min;
1991 		param.interval_max = conn->le.interval_max;
1992 		param.latency = conn->le.pending_latency;
1993 		param.timeout = conn->le.pending_timeout;
1994 
1995 		bt_l2cap_update_conn_param(conn, &param);
1996 	} else {
1997 		if (!evt->status) {
1998 			conn->le.interval = sys_le16_to_cpu(evt->interval);
1999 			conn->le.latency = sys_le16_to_cpu(evt->latency);
2000 			conn->le.timeout = sys_le16_to_cpu(evt->supv_timeout);
2001 
2002 #if defined(CONFIG_BT_GAP_AUTO_UPDATE_CONN_PARAMS)
2003 			atomic_clear_bit(conn->flags,
2004 					 BT_CONN_PERIPHERAL_PARAM_AUTO_UPDATE);
2005 		} else if (atomic_test_bit(conn->flags,
2006 					   BT_CONN_PERIPHERAL_PARAM_AUTO_UPDATE) &&
2007 			   evt->status == BT_HCI_ERR_UNSUPP_LL_PARAM_VAL &&
2008 			   conn->le.conn_param_retry_countdown) {
2009 			conn->le.conn_param_retry_countdown--;
2010 			k_work_schedule(&conn->deferred_work,
2011 					K_MSEC(CONFIG_BT_CONN_PARAM_RETRY_TIMEOUT));
2012 		} else {
2013 			atomic_clear_bit(conn->flags,
2014 					 BT_CONN_PERIPHERAL_PARAM_AUTO_UPDATE);
2015 #endif /* CONFIG_BT_GAP_AUTO_UPDATE_CONN_PARAMS */
2016 
2017 		}
2018 
2019 		notify_le_param_updated(conn);
2020 	}
2021 
2022 	bt_conn_unref(conn);
2023 }
2024 
2025 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
set_flow_control(void)2026 static int set_flow_control(void)
2027 {
2028 	struct bt_hci_cp_host_buffer_size *hbs;
2029 	struct net_buf *buf;
2030 	int err;
2031 
2032 	/* Check if host flow control is actually supported */
2033 	if (!BT_CMD_TEST(bt_dev.supported_commands, 10, 5)) {
2034 		LOG_WRN("Controller to host flow control not supported");
2035 		return 0;
2036 	}
2037 
2038 	buf = bt_hci_cmd_create(BT_HCI_OP_HOST_BUFFER_SIZE,
2039 				sizeof(*hbs));
2040 	if (!buf) {
2041 		return -ENOBUFS;
2042 	}
2043 
2044 	hbs = net_buf_add(buf, sizeof(*hbs));
2045 	(void)memset(hbs, 0, sizeof(*hbs));
2046 	hbs->acl_mtu = sys_cpu_to_le16(CONFIG_BT_BUF_ACL_RX_SIZE);
2047 	hbs->acl_pkts = sys_cpu_to_le16(CONFIG_BT_BUF_ACL_RX_COUNT);
2048 
2049 	err = bt_hci_cmd_send_sync(BT_HCI_OP_HOST_BUFFER_SIZE, buf, NULL);
2050 	if (err) {
2051 		return err;
2052 	}
2053 
2054 	buf = bt_hci_cmd_create(BT_HCI_OP_SET_CTL_TO_HOST_FLOW, 1);
2055 	if (!buf) {
2056 		return -ENOBUFS;
2057 	}
2058 
2059 	net_buf_add_u8(buf, BT_HCI_CTL_TO_HOST_FLOW_ENABLE);
2060 	return bt_hci_cmd_send_sync(BT_HCI_OP_SET_CTL_TO_HOST_FLOW, buf, NULL);
2061 }
2062 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
2063 
unpair(uint8_t id,const bt_addr_le_t * addr)2064 static void unpair(uint8_t id, const bt_addr_le_t *addr)
2065 {
2066 	struct bt_keys *keys = NULL;
2067 	struct bt_conn *conn = bt_conn_lookup_addr_le(id, addr);
2068 
2069 	if (conn) {
2070 		/* Clear the conn->le.keys pointer since we'll invalidate it,
2071 		 * and don't want any subsequent code (like disconnected
2072 		 * callbacks) accessing it.
2073 		 */
2074 		if (conn->type == BT_CONN_TYPE_LE) {
2075 			keys = conn->le.keys;
2076 			conn->le.keys = NULL;
2077 		}
2078 
2079 		bt_conn_disconnect(conn, BT_HCI_ERR_REMOTE_USER_TERM_CONN);
2080 		bt_conn_unref(conn);
2081 	}
2082 
2083 	if (IS_ENABLED(CONFIG_BT_CLASSIC)) {
2084 		/* LE Public may indicate BR/EDR as well */
2085 		if (addr->type == BT_ADDR_LE_PUBLIC) {
2086 			bt_keys_link_key_clear_addr(&addr->a);
2087 		}
2088 	}
2089 
2090 	if (IS_ENABLED(CONFIG_BT_SMP)) {
2091 		if (!keys) {
2092 			keys = bt_keys_find_addr(id, addr);
2093 		}
2094 
2095 		if (keys) {
2096 			bt_keys_clear(keys);
2097 		}
2098 	}
2099 
2100 	bt_gatt_clear(id, addr);
2101 
2102 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC)
2103 	struct bt_conn_auth_info_cb *listener, *next;
2104 
2105 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&bt_auth_info_cbs, listener,
2106 					  next, node) {
2107 		if (listener->bond_deleted) {
2108 			listener->bond_deleted(id, addr);
2109 		}
2110 	}
2111 #endif /* defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC) */
2112 }
2113 
unpair_remote(const struct bt_bond_info * info,void * data)2114 static void unpair_remote(const struct bt_bond_info *info, void *data)
2115 {
2116 	uint8_t *id = (uint8_t *) data;
2117 
2118 	unpair(*id, &info->addr);
2119 }
2120 
bt_unpair(uint8_t id,const bt_addr_le_t * addr)2121 int bt_unpair(uint8_t id, const bt_addr_le_t *addr)
2122 {
2123 	if (id >= CONFIG_BT_ID_MAX) {
2124 		return -EINVAL;
2125 	}
2126 
2127 	if (IS_ENABLED(CONFIG_BT_SMP)) {
2128 		if (!addr || bt_addr_le_eq(addr, BT_ADDR_LE_ANY)) {
2129 			bt_foreach_bond(id, unpair_remote, &id);
2130 		} else {
2131 			unpair(id, addr);
2132 		}
2133 	} else {
2134 		CHECKIF(addr == NULL) {
2135 			LOG_DBG("addr is NULL");
2136 			return -EINVAL;
2137 		}
2138 
2139 		unpair(id, addr);
2140 	}
2141 
2142 	return 0;
2143 }
2144 
2145 #endif /* CONFIG_BT_CONN */
2146 
2147 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC)
bt_security_err_get(uint8_t hci_err)2148 enum bt_security_err bt_security_err_get(uint8_t hci_err)
2149 {
2150 	switch (hci_err) {
2151 	case BT_HCI_ERR_SUCCESS:
2152 		return BT_SECURITY_ERR_SUCCESS;
2153 	case BT_HCI_ERR_AUTH_FAIL:
2154 		return BT_SECURITY_ERR_AUTH_FAIL;
2155 	case BT_HCI_ERR_PIN_OR_KEY_MISSING:
2156 		return BT_SECURITY_ERR_PIN_OR_KEY_MISSING;
2157 	case BT_HCI_ERR_PAIRING_NOT_SUPPORTED:
2158 		return BT_SECURITY_ERR_PAIR_NOT_SUPPORTED;
2159 	case BT_HCI_ERR_PAIRING_NOT_ALLOWED:
2160 		return BT_SECURITY_ERR_PAIR_NOT_ALLOWED;
2161 	case BT_HCI_ERR_INVALID_PARAM:
2162 		return BT_SECURITY_ERR_INVALID_PARAM;
2163 	default:
2164 		return BT_SECURITY_ERR_UNSPECIFIED;
2165 	}
2166 }
2167 #endif /* defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC) */
2168 
2169 #if defined(CONFIG_BT_SMP)
update_sec_level(struct bt_conn * conn)2170 static bool update_sec_level(struct bt_conn *conn)
2171 {
2172 	if (conn->le.keys && (conn->le.keys->flags & BT_KEYS_AUTHENTICATED)) {
2173 		if (conn->le.keys->flags & BT_KEYS_SC &&
2174 		    conn->le.keys->enc_size == BT_SMP_MAX_ENC_KEY_SIZE) {
2175 			conn->sec_level = BT_SECURITY_L4;
2176 		} else {
2177 			conn->sec_level = BT_SECURITY_L3;
2178 		}
2179 	} else {
2180 		conn->sec_level = BT_SECURITY_L2;
2181 	}
2182 
2183 	return !(conn->required_sec_level > conn->sec_level);
2184 }
2185 #endif /* CONFIG_BT_SMP */
2186 
2187 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC)
hci_encrypt_change(struct net_buf * buf)2188 static void hci_encrypt_change(struct net_buf *buf)
2189 {
2190 	struct bt_hci_evt_encrypt_change *evt = (void *)buf->data;
2191 	uint16_t handle = sys_le16_to_cpu(evt->handle);
2192 	uint8_t status = evt->status;
2193 	struct bt_conn *conn;
2194 
2195 	LOG_DBG("status 0x%02x %s handle %u encrypt 0x%02x",
2196 		evt->status, bt_hci_err_to_str(evt->status), handle, evt->encrypt);
2197 
2198 	conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_ALL);
2199 	if (!conn) {
2200 		LOG_ERR("Unable to look up conn with handle %u", handle);
2201 		return;
2202 	}
2203 
2204 	if (status) {
2205 		bt_conn_security_changed(conn, status,
2206 					 bt_security_err_get(status));
2207 		bt_conn_unref(conn);
2208 		return;
2209 	}
2210 
2211 	if (conn->encrypt == evt->encrypt) {
2212 		LOG_WRN("No change to encryption state (encrypt 0x%02x)", evt->encrypt);
2213 		bt_conn_unref(conn);
2214 		return;
2215 	}
2216 
2217 	conn->encrypt = evt->encrypt;
2218 
2219 #if defined(CONFIG_BT_SMP)
2220 	if (conn->type == BT_CONN_TYPE_LE) {
2221 		/*
2222 		 * we update keys properties only on successful encryption to
2223 		 * avoid losing valid keys if encryption was not successful.
2224 		 *
2225 		 * Update keys with last pairing info for proper sec level
2226 		 * update. This is done only for LE transport, for BR/EDR keys
2227 		 * are updated on HCI 'Link Key Notification Event'
2228 		 */
2229 		if (conn->encrypt) {
2230 			bt_smp_update_keys(conn);
2231 		}
2232 
2233 		if (!update_sec_level(conn)) {
2234 			status = BT_HCI_ERR_AUTH_FAIL;
2235 		}
2236 	}
2237 #endif /* CONFIG_BT_SMP */
2238 #if defined(CONFIG_BT_CLASSIC)
2239 	if (conn->type == BT_CONN_TYPE_BR) {
2240 		if (!bt_br_update_sec_level(conn)) {
2241 			bt_conn_unref(conn);
2242 			return;
2243 		}
2244 
2245 		if (IS_ENABLED(CONFIG_BT_SMP)) {
2246 			/*
2247 			 * Start SMP over BR/EDR if we are pairing and are
2248 			 * central on the link
2249 			 */
2250 			if (atomic_test_bit(conn->flags, BT_CONN_BR_PAIRING) &&
2251 			    conn->role == BT_CONN_ROLE_CENTRAL) {
2252 				bt_smp_br_send_pairing_req(conn);
2253 			}
2254 		}
2255 	}
2256 #endif /* CONFIG_BT_CLASSIC */
2257 
2258 	bt_conn_security_changed(conn, status, bt_security_err_get(status));
2259 
2260 	if (status) {
2261 		LOG_ERR("Failed to set required security level");
2262 		bt_conn_disconnect(conn, status);
2263 	}
2264 
2265 	bt_conn_unref(conn);
2266 }
2267 
hci_encrypt_key_refresh_complete(struct net_buf * buf)2268 static void hci_encrypt_key_refresh_complete(struct net_buf *buf)
2269 {
2270 	struct bt_hci_evt_encrypt_key_refresh_complete *evt = (void *)buf->data;
2271 	uint8_t status = evt->status;
2272 	struct bt_conn *conn;
2273 	uint16_t handle;
2274 
2275 	handle = sys_le16_to_cpu(evt->handle);
2276 
2277 	LOG_DBG("status 0x%02x %s handle %u",
2278 		evt->status, bt_hci_err_to_str(evt->status), handle);
2279 
2280 	conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_ALL);
2281 	if (!conn) {
2282 		LOG_ERR("Unable to look up conn with handle %u", handle);
2283 		return;
2284 	}
2285 
2286 	if (status) {
2287 		bt_conn_security_changed(conn, status,
2288 					 bt_security_err_get(status));
2289 		bt_conn_unref(conn);
2290 		return;
2291 	}
2292 
2293 	/*
2294 	 * Update keys with last pairing info for proper sec level update.
2295 	 * This is done only for LE transport. For BR/EDR transport keys are
2296 	 * updated on HCI 'Link Key Notification Event', therefore update here
2297 	 * only security level based on available keys and encryption state.
2298 	 */
2299 #if defined(CONFIG_BT_SMP)
2300 	if (conn->type == BT_CONN_TYPE_LE) {
2301 		bt_smp_update_keys(conn);
2302 
2303 		if (!update_sec_level(conn)) {
2304 			status = BT_HCI_ERR_AUTH_FAIL;
2305 		}
2306 	}
2307 #endif /* CONFIG_BT_SMP */
2308 #if defined(CONFIG_BT_CLASSIC)
2309 	if (conn->type == BT_CONN_TYPE_BR) {
2310 		if (!bt_br_update_sec_level(conn)) {
2311 			bt_conn_unref(conn);
2312 			return;
2313 		}
2314 	}
2315 #endif /* CONFIG_BT_CLASSIC */
2316 
2317 	bt_conn_security_changed(conn, status, bt_security_err_get(status));
2318 	if (status) {
2319 		LOG_ERR("Failed to set required security level");
2320 		bt_conn_disconnect(conn, status);
2321 	}
2322 
2323 	bt_conn_unref(conn);
2324 }
2325 #endif /* CONFIG_BT_SMP || CONFIG_BT_CLASSIC */
2326 
2327 #if defined(CONFIG_BT_REMOTE_VERSION)
bt_hci_evt_read_remote_version_complete(struct net_buf * buf)2328 static void bt_hci_evt_read_remote_version_complete(struct net_buf *buf)
2329 {
2330 	struct bt_hci_evt_remote_version_info *evt;
2331 	struct bt_conn *conn;
2332 	uint16_t handle;
2333 
2334 	evt = net_buf_pull_mem(buf, sizeof(*evt));
2335 	handle = sys_le16_to_cpu(evt->handle);
2336 	conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_ALL);
2337 	if (!conn) {
2338 		LOG_ERR("No connection for handle %u", handle);
2339 		return;
2340 	}
2341 
2342 	if (!evt->status) {
2343 		conn->rv.version = evt->version;
2344 		conn->rv.manufacturer = sys_le16_to_cpu(evt->manufacturer);
2345 		conn->rv.subversion = sys_le16_to_cpu(evt->subversion);
2346 	}
2347 
2348 	atomic_set_bit(conn->flags, BT_CONN_AUTO_VERSION_INFO);
2349 
2350 	if (IS_ENABLED(CONFIG_BT_REMOTE_INFO)) {
2351 		/* Remote features is already present */
2352 		notify_remote_info(conn);
2353 	}
2354 
2355 	bt_conn_unref(conn);
2356 }
2357 #endif /* CONFIG_BT_REMOTE_VERSION */
2358 
hci_hardware_error(struct net_buf * buf)2359 static void hci_hardware_error(struct net_buf *buf)
2360 {
2361 	struct bt_hci_evt_hardware_error *evt;
2362 
2363 	evt = net_buf_pull_mem(buf, sizeof(*evt));
2364 
2365 	LOG_ERR("Hardware error, hardware code: %d", evt->hardware_code);
2366 }
2367 
2368 #if defined(CONFIG_BT_SMP)
le_ltk_neg_reply(uint16_t handle)2369 static void le_ltk_neg_reply(uint16_t handle)
2370 {
2371 	struct bt_hci_cp_le_ltk_req_neg_reply *cp;
2372 	struct net_buf *buf;
2373 
2374 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_LTK_REQ_NEG_REPLY, sizeof(*cp));
2375 	if (!buf) {
2376 		LOG_ERR("Out of command buffers");
2377 
2378 		return;
2379 	}
2380 
2381 	cp = net_buf_add(buf, sizeof(*cp));
2382 	cp->handle = sys_cpu_to_le16(handle);
2383 
2384 	bt_hci_cmd_send(BT_HCI_OP_LE_LTK_REQ_NEG_REPLY, buf);
2385 }
2386 
le_ltk_reply(uint16_t handle,uint8_t * ltk)2387 static void le_ltk_reply(uint16_t handle, uint8_t *ltk)
2388 {
2389 	struct bt_hci_cp_le_ltk_req_reply *cp;
2390 	struct net_buf *buf;
2391 
2392 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_LTK_REQ_REPLY,
2393 				sizeof(*cp));
2394 	if (!buf) {
2395 		LOG_ERR("Out of command buffers");
2396 		return;
2397 	}
2398 
2399 	cp = net_buf_add(buf, sizeof(*cp));
2400 	cp->handle = sys_cpu_to_le16(handle);
2401 	memcpy(cp->ltk, ltk, sizeof(cp->ltk));
2402 
2403 	bt_hci_cmd_send(BT_HCI_OP_LE_LTK_REQ_REPLY, buf);
2404 }
2405 
le_ltk_request(struct net_buf * buf)2406 static void le_ltk_request(struct net_buf *buf)
2407 {
2408 	struct bt_hci_evt_le_ltk_request *evt = (void *)buf->data;
2409 	struct bt_conn *conn;
2410 	uint16_t handle;
2411 	uint8_t ltk[16];
2412 
2413 	handle = sys_le16_to_cpu(evt->handle);
2414 
2415 	LOG_DBG("handle %u", handle);
2416 
2417 	conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_LE);
2418 	if (!conn) {
2419 		LOG_ERR("Unable to lookup conn for handle %u", handle);
2420 		return;
2421 	}
2422 
2423 	if (bt_smp_request_ltk(conn, evt->rand, evt->ediv, ltk)) {
2424 		le_ltk_reply(handle, ltk);
2425 	} else {
2426 		le_ltk_neg_reply(handle);
2427 	}
2428 
2429 	bt_conn_unref(conn);
2430 }
2431 #endif /* CONFIG_BT_SMP */
2432 
hci_reset_complete(struct net_buf * buf)2433 static void hci_reset_complete(struct net_buf *buf)
2434 {
2435 	uint8_t status = buf->data[0];
2436 	atomic_t flags;
2437 
2438 	LOG_DBG("status 0x%02x %s", status, bt_hci_err_to_str(status));
2439 
2440 	if (status) {
2441 		return;
2442 	}
2443 
2444 	if (IS_ENABLED(CONFIG_BT_OBSERVER)) {
2445 		bt_scan_reset();
2446 	}
2447 
2448 #if defined(CONFIG_BT_CLASSIC)
2449 	bt_br_discovery_reset();
2450 #endif /* CONFIG_BT_CLASSIC */
2451 
2452 	flags = (atomic_get(bt_dev.flags) & BT_DEV_PERSISTENT_FLAGS);
2453 	atomic_set(bt_dev.flags, flags);
2454 }
2455 
hci_cmd_done(uint16_t opcode,uint8_t status,struct net_buf * evt_buf)2456 static void hci_cmd_done(uint16_t opcode, uint8_t status, struct net_buf *evt_buf)
2457 {
2458 	/* Original command buffer. */
2459 	struct net_buf *buf = NULL;
2460 
2461 	LOG_DBG("opcode 0x%04x status 0x%02x %s buf %p", opcode,
2462 		status, bt_hci_err_to_str(status), evt_buf);
2463 
2464 	/* Unsolicited cmd complete. This does not complete a command.
2465 	 * The controller can send these for effect of the `ncmd` field.
2466 	 */
2467 	if (opcode == 0) {
2468 		goto exit;
2469 	}
2470 
2471 	/* Take the original command buffer reference. */
2472 	buf = atomic_ptr_clear((atomic_ptr_t *)&bt_dev.sent_cmd);
2473 
2474 	if (!buf) {
2475 		LOG_ERR("No command sent for cmd complete 0x%04x", opcode);
2476 		goto exit;
2477 	}
2478 
2479 	if (cmd(buf)->opcode != opcode) {
2480 		LOG_ERR("OpCode 0x%04x completed instead of expected 0x%04x", opcode,
2481 			cmd(buf)->opcode);
2482 		buf = atomic_ptr_set((atomic_ptr_t *)&bt_dev.sent_cmd, buf);
2483 		__ASSERT_NO_MSG(!buf);
2484 		goto exit;
2485 	}
2486 
2487 	/* Response data is to be delivered in the original command
2488 	 * buffer.
2489 	 */
2490 	if (evt_buf != buf) {
2491 		net_buf_reset(buf);
2492 		bt_buf_set_type(buf, BT_BUF_EVT);
2493 		net_buf_reserve(buf, BT_BUF_RESERVE);
2494 		net_buf_add_mem(buf, evt_buf->data, evt_buf->len);
2495 	}
2496 
2497 	if (cmd(buf)->state && !status) {
2498 		struct bt_hci_cmd_state_set *update = cmd(buf)->state;
2499 
2500 		atomic_set_bit_to(update->target, update->bit, update->val);
2501 	}
2502 
2503 	/* If the command was synchronous wake up bt_hci_cmd_send_sync() */
2504 	if (cmd(buf)->sync) {
2505 		LOG_DBG("sync cmd released");
2506 		cmd(buf)->status = status;
2507 		k_sem_give(cmd(buf)->sync);
2508 	}
2509 
2510 exit:
2511 	if (buf) {
2512 		net_buf_unref(buf);
2513 	}
2514 }
2515 
hci_cmd_complete(struct net_buf * buf)2516 static void hci_cmd_complete(struct net_buf *buf)
2517 {
2518 	struct bt_hci_evt_cmd_complete *evt;
2519 	uint8_t status, ncmd;
2520 	uint16_t opcode;
2521 
2522 	evt = net_buf_pull_mem(buf, sizeof(*evt));
2523 	ncmd = evt->ncmd;
2524 	opcode = sys_le16_to_cpu(evt->opcode);
2525 
2526 	LOG_DBG("opcode 0x%04x", opcode);
2527 
2528 	/* All command return parameters have a 1-byte status in the
2529 	 * beginning, so we can safely make this generalization.
2530 	 */
2531 	status = buf->data[0];
2532 
2533 	/* HOST_NUM_COMPLETED_PACKETS should not generate a response under normal operation.
2534 	 * The generation of this command ignores `ncmd_sem`, so should not be given here.
2535 	 */
2536 	if (opcode == BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS) {
2537 		LOG_WRN("Unexpected HOST_NUM_COMPLETED_PACKETS, status 0x%02x %s",
2538 			status, bt_hci_err_to_str(status));
2539 		return;
2540 	}
2541 
2542 	hci_cmd_done(opcode, status, buf);
2543 
2544 	/* Allow next command to be sent */
2545 	if (ncmd) {
2546 		k_sem_give(&bt_dev.ncmd_sem);
2547 		bt_tx_irq_raise();
2548 	}
2549 }
2550 
hci_cmd_status(struct net_buf * buf)2551 static void hci_cmd_status(struct net_buf *buf)
2552 {
2553 	struct bt_hci_evt_cmd_status *evt;
2554 	uint16_t opcode;
2555 	uint8_t ncmd;
2556 
2557 	evt = net_buf_pull_mem(buf, sizeof(*evt));
2558 	opcode = sys_le16_to_cpu(evt->opcode);
2559 	ncmd = evt->ncmd;
2560 
2561 	LOG_DBG("opcode 0x%04x", opcode);
2562 
2563 	hci_cmd_done(opcode, evt->status, buf);
2564 
2565 	/* Allow next command to be sent */
2566 	if (ncmd) {
2567 		k_sem_give(&bt_dev.ncmd_sem);
2568 		bt_tx_irq_raise();
2569 	}
2570 }
2571 
bt_hci_get_conn_handle(const struct bt_conn * conn,uint16_t * conn_handle)2572 int bt_hci_get_conn_handle(const struct bt_conn *conn, uint16_t *conn_handle)
2573 {
2574 	if (conn->state != BT_CONN_CONNECTED) {
2575 		return -ENOTCONN;
2576 	}
2577 
2578 	*conn_handle = conn->handle;
2579 	return 0;
2580 }
2581 
2582 #if defined(CONFIG_BT_EXT_ADV)
bt_hci_get_adv_handle(const struct bt_le_ext_adv * adv,uint8_t * adv_handle)2583 int bt_hci_get_adv_handle(const struct bt_le_ext_adv *adv, uint8_t *adv_handle)
2584 {
2585 	if (!atomic_test_bit(adv->flags, BT_ADV_CREATED)) {
2586 		return -EINVAL;
2587 	}
2588 
2589 	*adv_handle = adv->handle;
2590 	return 0;
2591 }
2592 #endif /* CONFIG_BT_EXT_ADV */
2593 
2594 #if defined(CONFIG_BT_PER_ADV_SYNC)
bt_hci_get_adv_sync_handle(const struct bt_le_per_adv_sync * sync,uint16_t * sync_handle)2595 int bt_hci_get_adv_sync_handle(const struct bt_le_per_adv_sync *sync, uint16_t *sync_handle)
2596 {
2597 	if (!atomic_test_bit(sync->flags, BT_PER_ADV_SYNC_CREATED)) {
2598 		return -EINVAL;
2599 	}
2600 
2601 	*sync_handle = sync->handle;
2602 
2603 	return 0;
2604 }
2605 #endif
2606 
2607 #if defined(CONFIG_BT_HCI_VS_EVT_USER)
bt_hci_register_vnd_evt_cb(bt_hci_vnd_evt_cb_t cb)2608 int bt_hci_register_vnd_evt_cb(bt_hci_vnd_evt_cb_t cb)
2609 {
2610 	hci_vnd_evt_cb = cb;
2611 	return 0;
2612 }
2613 #endif /* CONFIG_BT_HCI_VS_EVT_USER */
2614 
2615 #if defined(CONFIG_BT_TRANSMIT_POWER_CONTROL)
bt_hci_le_transmit_power_report(struct net_buf * buf)2616 void bt_hci_le_transmit_power_report(struct net_buf *buf)
2617 {
2618 	struct bt_hci_evt_le_transmit_power_report *evt;
2619 	struct bt_conn_le_tx_power_report report;
2620 	struct bt_conn *conn;
2621 
2622 	evt = net_buf_pull_mem(buf, sizeof(*evt));
2623 	conn = bt_conn_lookup_handle(sys_le16_to_cpu(evt->handle), BT_CONN_TYPE_LE);
2624 	if (!conn) {
2625 		LOG_ERR("Unknown conn handle 0x%04X for transmit power report",
2626 		       sys_le16_to_cpu(evt->handle));
2627 		return;
2628 	}
2629 
2630 	report.reason = evt->reason;
2631 	report.phy = evt->phy;
2632 	report.tx_power_level = evt->tx_power_level;
2633 	report.tx_power_level_flag = evt->tx_power_level_flag;
2634 	report.delta = evt->delta;
2635 
2636 	notify_tx_power_report(conn, report);
2637 
2638 	bt_conn_unref(conn);
2639 }
2640 #endif /* CONFIG_BT_TRANSMIT_POWER_CONTROL */
2641 
2642 #if defined(CONFIG_BT_PATH_LOSS_MONITORING)
bt_hci_le_path_loss_threshold_event(struct net_buf * buf)2643 void bt_hci_le_path_loss_threshold_event(struct net_buf *buf)
2644 {
2645 	struct bt_hci_evt_le_path_loss_threshold *evt;
2646 	struct bt_conn_le_path_loss_threshold_report report;
2647 	struct bt_conn *conn;
2648 
2649 	evt = net_buf_pull_mem(buf, sizeof(*evt));
2650 
2651 	if (evt->zone_entered > BT_CONN_LE_PATH_LOSS_ZONE_ENTERED_HIGH) {
2652 		LOG_ERR("Invalid zone %u in bt_hci_evt_le_path_loss_threshold",
2653 			evt->zone_entered);
2654 		return;
2655 	}
2656 
2657 	conn = bt_conn_lookup_handle(sys_le16_to_cpu(evt->handle), BT_CONN_TYPE_LE);
2658 	if (!conn) {
2659 		LOG_ERR("Unknown conn handle 0x%04X for path loss threshold report",
2660 		       sys_le16_to_cpu(evt->handle));
2661 		return;
2662 	}
2663 
2664 	if (evt->current_path_loss == BT_HCI_LE_PATH_LOSS_UNAVAILABLE) {
2665 		report.zone = BT_CONN_LE_PATH_LOSS_ZONE_UNAVAILABLE;
2666 		report.path_loss = BT_HCI_LE_PATH_LOSS_UNAVAILABLE;
2667 	} else {
2668 		report.zone = evt->zone_entered;
2669 		report.path_loss = evt->current_path_loss;
2670 	}
2671 
2672 	notify_path_loss_threshold_report(conn, report);
2673 
2674 	bt_conn_unref(conn);
2675 }
2676 #endif /* CONFIG_BT_PATH_LOSS_MONITORING */
2677 
2678 static const struct event_handler vs_events[] = {
2679 #if defined(CONFIG_BT_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES)
2680 	EVENT_HANDLER(BT_HCI_EVT_VS_LE_CONNECTIONLESS_IQ_REPORT,
2681 		      bt_hci_le_vs_df_connectionless_iq_report,
2682 		      sizeof(struct bt_hci_evt_vs_le_connectionless_iq_report)),
2683 #endif /* CONFIG_BT_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES */
2684 #if defined(CONFIG_BT_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES)
2685 	EVENT_HANDLER(BT_HCI_EVT_VS_LE_CONNECTION_IQ_REPORT, bt_hci_le_vs_df_connection_iq_report,
2686 		      sizeof(struct bt_hci_evt_vs_le_connection_iq_report)),
2687 #endif /* CONFIG_BT_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES */
2688 };
2689 
hci_vendor_event(struct net_buf * buf)2690 static void hci_vendor_event(struct net_buf *buf)
2691 {
2692 	bool handled = false;
2693 
2694 #if defined(CONFIG_BT_HCI_VS_EVT_USER)
2695 	if (hci_vnd_evt_cb) {
2696 		struct net_buf_simple_state state;
2697 
2698 		net_buf_simple_save(&buf->b, &state);
2699 
2700 		handled = hci_vnd_evt_cb(&buf->b);
2701 
2702 		net_buf_simple_restore(&buf->b, &state);
2703 	}
2704 #endif /* CONFIG_BT_HCI_VS_EVT_USER */
2705 
2706 	if (IS_ENABLED(CONFIG_BT_HCI_VS) && !handled) {
2707 		struct bt_hci_evt_vs *evt;
2708 
2709 		evt = net_buf_pull_mem(buf, sizeof(*evt));
2710 
2711 		LOG_DBG("subevent 0x%02x", evt->subevent);
2712 
2713 		handle_vs_event(evt->subevent, buf, vs_events, ARRAY_SIZE(vs_events));
2714 	}
2715 }
2716 
2717 static const struct event_handler meta_events[] = {
2718 #if defined(CONFIG_BT_OBSERVER)
2719 	EVENT_HANDLER(BT_HCI_EVT_LE_ADVERTISING_REPORT, bt_hci_le_adv_report,
2720 		      sizeof(struct bt_hci_evt_le_advertising_report)),
2721 #endif /* CONFIG_BT_OBSERVER */
2722 #if defined(CONFIG_BT_CONN)
2723 	EVENT_HANDLER(BT_HCI_EVT_LE_CONN_COMPLETE, le_legacy_conn_complete,
2724 		      sizeof(struct bt_hci_evt_le_conn_complete)),
2725 	EVENT_HANDLER(BT_HCI_EVT_LE_ENH_CONN_COMPLETE, le_enh_conn_complete,
2726 		      sizeof(struct bt_hci_evt_le_enh_conn_complete)),
2727 	EVENT_HANDLER(BT_HCI_EVT_LE_CONN_UPDATE_COMPLETE,
2728 		      le_conn_update_complete,
2729 		      sizeof(struct bt_hci_evt_le_conn_update_complete)),
2730 	EVENT_HANDLER(BT_HCI_EVT_LE_REMOTE_FEAT_COMPLETE,
2731 		      le_remote_feat_complete,
2732 		      sizeof(struct bt_hci_evt_le_remote_feat_complete)),
2733 	EVENT_HANDLER(BT_HCI_EVT_LE_CONN_PARAM_REQ, le_conn_param_req,
2734 		      sizeof(struct bt_hci_evt_le_conn_param_req)),
2735 #if defined(CONFIG_BT_DATA_LEN_UPDATE)
2736 	EVENT_HANDLER(BT_HCI_EVT_LE_DATA_LEN_CHANGE, le_data_len_change,
2737 		      sizeof(struct bt_hci_evt_le_data_len_change)),
2738 #endif /* CONFIG_BT_DATA_LEN_UPDATE */
2739 #if defined(CONFIG_BT_PHY_UPDATE)
2740 	EVENT_HANDLER(BT_HCI_EVT_LE_PHY_UPDATE_COMPLETE,
2741 		      le_phy_update_complete,
2742 		      sizeof(struct bt_hci_evt_le_phy_update_complete)),
2743 #endif /* CONFIG_BT_PHY_UPDATE */
2744 #endif /* CONFIG_BT_CONN */
2745 #if defined(CONFIG_BT_SMP)
2746 	EVENT_HANDLER(BT_HCI_EVT_LE_LTK_REQUEST, le_ltk_request,
2747 		      sizeof(struct bt_hci_evt_le_ltk_request)),
2748 #endif /* CONFIG_BT_SMP */
2749 #if defined(CONFIG_BT_ECC)
2750 	EVENT_HANDLER(BT_HCI_EVT_LE_P256_PUBLIC_KEY_COMPLETE,
2751 		      bt_hci_evt_le_pkey_complete,
2752 		      sizeof(struct bt_hci_evt_le_p256_public_key_complete)),
2753 	EVENT_HANDLER(BT_HCI_EVT_LE_GENERATE_DHKEY_COMPLETE,
2754 		      bt_hci_evt_le_dhkey_complete,
2755 		      sizeof(struct bt_hci_evt_le_generate_dhkey_complete)),
2756 #endif /* CONFIG_BT_SMP */
2757 #if defined(CONFIG_BT_EXT_ADV)
2758 #if defined(CONFIG_BT_BROADCASTER)
2759 	EVENT_HANDLER(BT_HCI_EVT_LE_ADV_SET_TERMINATED, bt_hci_le_adv_set_terminated,
2760 		      sizeof(struct bt_hci_evt_le_adv_set_terminated)),
2761 	EVENT_HANDLER(BT_HCI_EVT_LE_SCAN_REQ_RECEIVED, bt_hci_le_scan_req_received,
2762 		      sizeof(struct bt_hci_evt_le_scan_req_received)),
2763 #endif
2764 #if defined(CONFIG_BT_OBSERVER)
2765 	EVENT_HANDLER(BT_HCI_EVT_LE_SCAN_TIMEOUT, bt_hci_le_scan_timeout,
2766 		      0),
2767 	EVENT_HANDLER(BT_HCI_EVT_LE_EXT_ADVERTISING_REPORT, bt_hci_le_adv_ext_report,
2768 		      sizeof(struct bt_hci_evt_le_ext_advertising_report)),
2769 #endif /* defined(CONFIG_BT_OBSERVER) */
2770 #if defined(CONFIG_BT_PER_ADV_SYNC)
2771 	EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADV_SYNC_ESTABLISHED,
2772 		      bt_hci_le_per_adv_sync_established,
2773 		      sizeof(struct bt_hci_evt_le_per_adv_sync_established)),
2774 	EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADVERTISING_REPORT, bt_hci_le_per_adv_report,
2775 		      sizeof(struct bt_hci_evt_le_per_advertising_report)),
2776 	EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADV_SYNC_LOST, bt_hci_le_per_adv_sync_lost,
2777 		      sizeof(struct bt_hci_evt_le_per_adv_sync_lost)),
2778 #if defined(CONFIG_BT_PER_ADV_SYNC_TRANSFER_RECEIVER)
2779 	EVENT_HANDLER(BT_HCI_EVT_LE_PAST_RECEIVED, bt_hci_le_past_received,
2780 		      sizeof(struct bt_hci_evt_le_past_received)),
2781 #endif /* CONFIG_BT_PER_ADV_SYNC_TRANSFER_RECEIVER */
2782 #endif /* defined(CONFIG_BT_PER_ADV_SYNC) */
2783 #endif /* defined(CONFIG_BT_EXT_ADV) */
2784 #if defined(CONFIG_BT_ISO_UNICAST)
2785 	EVENT_HANDLER(BT_HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_established,
2786 		      sizeof(struct bt_hci_evt_le_cis_established)),
2787 #if defined(CONFIG_BT_ISO_PERIPHERAL)
2788 	EVENT_HANDLER(BT_HCI_EVT_LE_CIS_REQ, hci_le_cis_req,
2789 		      sizeof(struct bt_hci_evt_le_cis_req)),
2790 #endif /* (CONFIG_BT_ISO_PERIPHERAL) */
2791 #endif /* (CONFIG_BT_ISO_UNICAST) */
2792 #if defined(CONFIG_BT_ISO_BROADCASTER)
2793 	EVENT_HANDLER(BT_HCI_EVT_LE_BIG_COMPLETE,
2794 		      hci_le_big_complete,
2795 		      sizeof(struct bt_hci_evt_le_big_complete)),
2796 	EVENT_HANDLER(BT_HCI_EVT_LE_BIG_TERMINATE,
2797 		      hci_le_big_terminate,
2798 		      sizeof(struct bt_hci_evt_le_big_terminate)),
2799 #endif /* CONFIG_BT_ISO_BROADCASTER */
2800 #if defined(CONFIG_BT_ISO_SYNC_RECEIVER)
2801 	EVENT_HANDLER(BT_HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
2802 		      hci_le_big_sync_established,
2803 		      sizeof(struct bt_hci_evt_le_big_sync_established)),
2804 	EVENT_HANDLER(BT_HCI_EVT_LE_BIG_SYNC_LOST,
2805 		      hci_le_big_sync_lost,
2806 		      sizeof(struct bt_hci_evt_le_big_sync_lost)),
2807 	EVENT_HANDLER(BT_HCI_EVT_LE_BIGINFO_ADV_REPORT,
2808 		      bt_hci_le_biginfo_adv_report,
2809 		      sizeof(struct bt_hci_evt_le_biginfo_adv_report)),
2810 #endif /* CONFIG_BT_ISO_SYNC_RECEIVER */
2811 #if defined(CONFIG_BT_DF_CONNECTIONLESS_CTE_RX)
2812 	EVENT_HANDLER(BT_HCI_EVT_LE_CONNECTIONLESS_IQ_REPORT, bt_hci_le_df_connectionless_iq_report,
2813 		      sizeof(struct bt_hci_evt_le_connectionless_iq_report)),
2814 #endif /* CONFIG_BT_DF_CONNECTIONLESS_CTE_RX */
2815 #if defined(CONFIG_BT_DF_CONNECTION_CTE_RX)
2816 	EVENT_HANDLER(BT_HCI_EVT_LE_CONNECTION_IQ_REPORT, bt_hci_le_df_connection_iq_report,
2817 		      sizeof(struct bt_hci_evt_le_connection_iq_report)),
2818 #endif /* CONFIG_BT_DF_CONNECTION_CTE_RX */
2819 #if defined(CONFIG_BT_DF_CONNECTION_CTE_REQ)
2820 	EVENT_HANDLER(BT_HCI_EVT_LE_CTE_REQUEST_FAILED, bt_hci_le_df_cte_req_failed,
2821 		      sizeof(struct bt_hci_evt_le_cte_req_failed)),
2822 #endif /* CONFIG_BT_DF_CONNECTION_CTE_REQ */
2823 #if defined(CONFIG_BT_TRANSMIT_POWER_CONTROL)
2824 	EVENT_HANDLER(BT_HCI_EVT_LE_TRANSMIT_POWER_REPORT, bt_hci_le_transmit_power_report,
2825 		      sizeof(struct bt_hci_evt_le_transmit_power_report)),
2826 #endif /* CONFIG_BT_TRANSMIT_POWER_CONTROL */
2827 #if defined(CONFIG_BT_PATH_LOSS_MONITORING)
2828 	EVENT_HANDLER(BT_HCI_EVT_LE_PATH_LOSS_THRESHOLD, bt_hci_le_path_loss_threshold_event,
2829 		      sizeof(struct bt_hci_evt_le_path_loss_threshold)),
2830 #endif /* CONFIG_BT_PATH_LOSS_MONITORING */
2831 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
2832 	EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADVERTISING_REPORT_V2, bt_hci_le_per_adv_report_v2,
2833 		      sizeof(struct bt_hci_evt_le_per_advertising_report_v2)),
2834 #if defined(CONFIG_BT_PER_ADV_SYNC_TRANSFER_RECEIVER)
2835 	EVENT_HANDLER(BT_HCI_EVT_LE_PAST_RECEIVED_V2, bt_hci_le_past_received_v2,
2836 		      sizeof(struct bt_hci_evt_le_past_received_v2)),
2837 #endif /* CONFIG_BT_PER_ADV_SYNC_TRANSFER_RECEIVER */
2838 	EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADV_SYNC_ESTABLISHED_V2,
2839 		      bt_hci_le_per_adv_sync_established_v2,
2840 		      sizeof(struct bt_hci_evt_le_per_adv_sync_established_v2)),
2841 #endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
2842 #if defined(CONFIG_BT_PER_ADV_RSP)
2843 	EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADV_SUBEVENT_DATA_REQUEST,
2844 		      bt_hci_le_per_adv_subevent_data_request,
2845 		      sizeof(struct bt_hci_evt_le_per_adv_subevent_data_request)),
2846 	EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADV_RESPONSE_REPORT, bt_hci_le_per_adv_response_report,
2847 		      sizeof(struct bt_hci_evt_le_per_adv_response_report)),
2848 #endif /* CONFIG_BT_PER_ADV_RSP */
2849 #if defined(CONFIG_BT_CONN)
2850 #if defined(CONFIG_BT_PER_ADV_RSP) || defined(CONFIG_BT_PER_ADV_SYNC_RSP)
2851 	EVENT_HANDLER(BT_HCI_EVT_LE_ENH_CONN_COMPLETE_V2, le_enh_conn_complete_v2,
2852 		      sizeof(struct bt_hci_evt_le_enh_conn_complete_v2)),
2853 #endif /* CONFIG_BT_PER_ADV_RSP || CONFIG_BT_PER_ADV_SYNC_RSP */
2854 #endif /* CONFIG_BT_CONN */
2855 
2856 };
2857 
hci_le_meta_event(struct net_buf * buf)2858 static void hci_le_meta_event(struct net_buf *buf)
2859 {
2860 	struct bt_hci_evt_le_meta_event *evt;
2861 
2862 	evt = net_buf_pull_mem(buf, sizeof(*evt));
2863 
2864 	LOG_DBG("subevent 0x%02x", evt->subevent);
2865 
2866 	handle_event(evt->subevent, buf, meta_events, ARRAY_SIZE(meta_events));
2867 }
2868 
2869 static const struct event_handler normal_events[] = {
2870 	EVENT_HANDLER(BT_HCI_EVT_VENDOR, hci_vendor_event,
2871 		      sizeof(struct bt_hci_evt_vs)),
2872 	EVENT_HANDLER(BT_HCI_EVT_LE_META_EVENT, hci_le_meta_event,
2873 		      sizeof(struct bt_hci_evt_le_meta_event)),
2874 #if defined(CONFIG_BT_CLASSIC)
2875 	EVENT_HANDLER(BT_HCI_EVT_CONN_REQUEST, bt_hci_conn_req,
2876 		      sizeof(struct bt_hci_evt_conn_request)),
2877 	EVENT_HANDLER(BT_HCI_EVT_CONN_COMPLETE, bt_hci_conn_complete,
2878 		      sizeof(struct bt_hci_evt_conn_complete)),
2879 	EVENT_HANDLER(BT_HCI_EVT_PIN_CODE_REQ, bt_hci_pin_code_req,
2880 		      sizeof(struct bt_hci_evt_pin_code_req)),
2881 	EVENT_HANDLER(BT_HCI_EVT_LINK_KEY_NOTIFY, bt_hci_link_key_notify,
2882 		      sizeof(struct bt_hci_evt_link_key_notify)),
2883 	EVENT_HANDLER(BT_HCI_EVT_LINK_KEY_REQ, bt_hci_link_key_req,
2884 		      sizeof(struct bt_hci_evt_link_key_req)),
2885 	EVENT_HANDLER(BT_HCI_EVT_IO_CAPA_RESP, bt_hci_io_capa_resp,
2886 		      sizeof(struct bt_hci_evt_io_capa_resp)),
2887 	EVENT_HANDLER(BT_HCI_EVT_IO_CAPA_REQ, bt_hci_io_capa_req,
2888 		      sizeof(struct bt_hci_evt_io_capa_req)),
2889 	EVENT_HANDLER(BT_HCI_EVT_SSP_COMPLETE, bt_hci_ssp_complete,
2890 		      sizeof(struct bt_hci_evt_ssp_complete)),
2891 	EVENT_HANDLER(BT_HCI_EVT_USER_CONFIRM_REQ, bt_hci_user_confirm_req,
2892 		      sizeof(struct bt_hci_evt_user_confirm_req)),
2893 	EVENT_HANDLER(BT_HCI_EVT_USER_PASSKEY_NOTIFY,
2894 		      bt_hci_user_passkey_notify,
2895 		      sizeof(struct bt_hci_evt_user_passkey_notify)),
2896 	EVENT_HANDLER(BT_HCI_EVT_USER_PASSKEY_REQ, bt_hci_user_passkey_req,
2897 		      sizeof(struct bt_hci_evt_user_passkey_req)),
2898 	EVENT_HANDLER(BT_HCI_EVT_INQUIRY_COMPLETE, bt_hci_inquiry_complete,
2899 		      sizeof(struct bt_hci_evt_inquiry_complete)),
2900 	EVENT_HANDLER(BT_HCI_EVT_INQUIRY_RESULT_WITH_RSSI,
2901 		      bt_hci_inquiry_result_with_rssi,
2902 		      sizeof(struct bt_hci_evt_inquiry_result_with_rssi)),
2903 	EVENT_HANDLER(BT_HCI_EVT_EXTENDED_INQUIRY_RESULT,
2904 		      bt_hci_extended_inquiry_result,
2905 		      sizeof(struct bt_hci_evt_extended_inquiry_result)),
2906 	EVENT_HANDLER(BT_HCI_EVT_REMOTE_NAME_REQ_COMPLETE,
2907 		      bt_hci_remote_name_request_complete,
2908 		      sizeof(struct bt_hci_evt_remote_name_req_complete)),
2909 	EVENT_HANDLER(BT_HCI_EVT_AUTH_COMPLETE, bt_hci_auth_complete,
2910 		      sizeof(struct bt_hci_evt_auth_complete)),
2911 	EVENT_HANDLER(BT_HCI_EVT_REMOTE_FEATURES,
2912 		      bt_hci_read_remote_features_complete,
2913 		      sizeof(struct bt_hci_evt_remote_features)),
2914 	EVENT_HANDLER(BT_HCI_EVT_REMOTE_EXT_FEATURES,
2915 		      bt_hci_read_remote_ext_features_complete,
2916 		      sizeof(struct bt_hci_evt_remote_ext_features)),
2917 	EVENT_HANDLER(BT_HCI_EVT_ROLE_CHANGE, bt_hci_role_change,
2918 		      sizeof(struct bt_hci_evt_role_change)),
2919 	EVENT_HANDLER(BT_HCI_EVT_SYNC_CONN_COMPLETE, bt_hci_synchronous_conn_complete,
2920 		      sizeof(struct bt_hci_evt_sync_conn_complete)),
2921 #endif /* CONFIG_BT_CLASSIC */
2922 #if defined(CONFIG_BT_CONN)
2923 	EVENT_HANDLER(BT_HCI_EVT_DISCONN_COMPLETE, hci_disconn_complete,
2924 		      sizeof(struct bt_hci_evt_disconn_complete)),
2925 #endif /* CONFIG_BT_CONN */
2926 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_CLASSIC)
2927 	EVENT_HANDLER(BT_HCI_EVT_ENCRYPT_CHANGE, hci_encrypt_change,
2928 		      sizeof(struct bt_hci_evt_encrypt_change)),
2929 	EVENT_HANDLER(BT_HCI_EVT_ENCRYPT_KEY_REFRESH_COMPLETE,
2930 		      hci_encrypt_key_refresh_complete,
2931 		      sizeof(struct bt_hci_evt_encrypt_key_refresh_complete)),
2932 #endif /* CONFIG_BT_SMP || CONFIG_BT_CLASSIC */
2933 #if defined(CONFIG_BT_REMOTE_VERSION)
2934 	EVENT_HANDLER(BT_HCI_EVT_REMOTE_VERSION_INFO,
2935 		      bt_hci_evt_read_remote_version_complete,
2936 		      sizeof(struct bt_hci_evt_remote_version_info)),
2937 #endif /* CONFIG_BT_REMOTE_VERSION */
2938 	EVENT_HANDLER(BT_HCI_EVT_HARDWARE_ERROR, hci_hardware_error,
2939 		      sizeof(struct bt_hci_evt_hardware_error)),
2940 };
2941 
2942 
2943 #define BT_HCI_EVT_FLAG_RECV_PRIO BIT(0)
2944 #define BT_HCI_EVT_FLAG_RECV      BIT(1)
2945 
2946 /** @brief Get HCI event flags.
2947  *
2948  * Helper for the HCI driver to get HCI event flags that describes rules that.
2949  * must be followed.
2950  *
2951  * @param evt HCI event code.
2952  *
2953  * @return HCI event flags for the specified event.
2954  */
bt_hci_evt_get_flags(uint8_t evt)2955 static inline uint8_t bt_hci_evt_get_flags(uint8_t evt)
2956 {
2957 	switch (evt) {
2958 	case BT_HCI_EVT_DISCONN_COMPLETE:
2959 		return BT_HCI_EVT_FLAG_RECV | BT_HCI_EVT_FLAG_RECV_PRIO;
2960 		/* fallthrough */
2961 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_ISO)
2962 	case BT_HCI_EVT_NUM_COMPLETED_PACKETS:
2963 #if defined(CONFIG_BT_CONN)
2964 	case BT_HCI_EVT_DATA_BUF_OVERFLOW:
2965 		__fallthrough;
2966 #endif /* defined(CONFIG_BT_CONN) */
2967 #endif /* CONFIG_BT_CONN ||  CONFIG_BT_ISO */
2968 	case BT_HCI_EVT_CMD_COMPLETE:
2969 	case BT_HCI_EVT_CMD_STATUS:
2970 		return BT_HCI_EVT_FLAG_RECV_PRIO;
2971 	default:
2972 		return BT_HCI_EVT_FLAG_RECV;
2973 	}
2974 }
2975 
hci_event(struct net_buf * buf)2976 static void hci_event(struct net_buf *buf)
2977 {
2978 	struct bt_hci_evt_hdr *hdr;
2979 
2980 	if (buf->len < sizeof(*hdr)) {
2981 		LOG_ERR("Invalid HCI event size (%u)", buf->len);
2982 		net_buf_unref(buf);
2983 		return;
2984 	}
2985 
2986 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2987 	LOG_DBG("event 0x%02x", hdr->evt);
2988 	BT_ASSERT(bt_hci_evt_get_flags(hdr->evt) & BT_HCI_EVT_FLAG_RECV);
2989 
2990 	handle_event(hdr->evt, buf, normal_events, ARRAY_SIZE(normal_events));
2991 
2992 	net_buf_unref(buf);
2993 }
2994 
hci_core_send_cmd(void)2995 static void hci_core_send_cmd(void)
2996 {
2997 	struct net_buf *buf;
2998 	int err;
2999 
3000 	/* Get next command */
3001 	LOG_DBG("fetch cmd");
3002 	buf = net_buf_get(&bt_dev.cmd_tx_queue, K_NO_WAIT);
3003 	BT_ASSERT(buf);
3004 
3005 	/* Clear out any existing sent command */
3006 	if (bt_dev.sent_cmd) {
3007 		LOG_ERR("Uncleared pending sent_cmd");
3008 		net_buf_unref(bt_dev.sent_cmd);
3009 		bt_dev.sent_cmd = NULL;
3010 	}
3011 
3012 	bt_dev.sent_cmd = net_buf_ref(buf);
3013 
3014 	LOG_DBG("Sending command 0x%04x (buf %p) to driver", cmd(buf)->opcode, buf);
3015 
3016 	err = bt_send(buf);
3017 	if (err) {
3018 		LOG_ERR("Unable to send to driver (err %d)", err);
3019 		k_sem_give(&bt_dev.ncmd_sem);
3020 		hci_cmd_done(cmd(buf)->opcode, BT_HCI_ERR_UNSPECIFIED, buf);
3021 		net_buf_unref(buf);
3022 		bt_tx_irq_raise();
3023 	}
3024 }
3025 
3026 #if defined(CONFIG_BT_CONN)
3027 #if defined(CONFIG_BT_ISO)
3028 /* command FIFO + conn_change signal + MAX_CONN + ISO_MAX_CHAN */
3029 #define EV_COUNT (2 + CONFIG_BT_MAX_CONN + CONFIG_BT_ISO_MAX_CHAN)
3030 #else
3031 /* command FIFO + conn_change signal + MAX_CONN */
3032 #define EV_COUNT (2 + CONFIG_BT_MAX_CONN)
3033 #endif /* CONFIG_BT_ISO */
3034 #else
3035 #if defined(CONFIG_BT_ISO)
3036 /* command FIFO + conn_change signal + ISO_MAX_CHAN */
3037 #define EV_COUNT (2 + CONFIG_BT_ISO_MAX_CHAN)
3038 #else
3039 /* command FIFO */
3040 #define EV_COUNT 1
3041 #endif /* CONFIG_BT_ISO */
3042 #endif /* CONFIG_BT_CONN */
3043 
read_local_ver_complete(struct net_buf * buf)3044 static void read_local_ver_complete(struct net_buf *buf)
3045 {
3046 	struct bt_hci_rp_read_local_version_info *rp = (void *)buf->data;
3047 
3048 	LOG_DBG("status 0x%02x %s", rp->status, bt_hci_err_to_str(rp->status));
3049 
3050 	bt_dev.hci_version = rp->hci_version;
3051 	bt_dev.hci_revision = sys_le16_to_cpu(rp->hci_revision);
3052 	bt_dev.lmp_version = rp->lmp_version;
3053 	bt_dev.lmp_subversion = sys_le16_to_cpu(rp->lmp_subversion);
3054 	bt_dev.manufacturer = sys_le16_to_cpu(rp->manufacturer);
3055 }
3056 
read_le_features_complete(struct net_buf * buf)3057 static void read_le_features_complete(struct net_buf *buf)
3058 {
3059 	struct bt_hci_rp_le_read_local_features *rp = (void *)buf->data;
3060 
3061 	LOG_DBG("status 0x%02x %s", rp->status, bt_hci_err_to_str(rp->status));
3062 
3063 	memcpy(bt_dev.le.features, rp->features, sizeof(bt_dev.le.features));
3064 }
3065 
3066 #if defined(CONFIG_BT_CONN)
3067 #if !defined(CONFIG_BT_CLASSIC)
read_buffer_size_complete(struct net_buf * buf)3068 static void read_buffer_size_complete(struct net_buf *buf)
3069 {
3070 	struct bt_hci_rp_read_buffer_size *rp = (void *)buf->data;
3071 	uint16_t pkts;
3072 
3073 	LOG_DBG("status 0x%02x %s", rp->status, bt_hci_err_to_str(rp->status));
3074 
3075 	/* If LE-side has buffers we can ignore the BR/EDR values */
3076 	if (bt_dev.le.acl_mtu) {
3077 		return;
3078 	}
3079 
3080 	bt_dev.le.acl_mtu = sys_le16_to_cpu(rp->acl_max_len);
3081 	pkts = sys_le16_to_cpu(rp->acl_max_num);
3082 
3083 	LOG_DBG("ACL BR/EDR buffers: pkts %u mtu %u", pkts, bt_dev.le.acl_mtu);
3084 
3085 	k_sem_init(&bt_dev.le.acl_pkts, pkts, pkts);
3086 }
3087 #endif /* !defined(CONFIG_BT_CLASSIC) */
3088 #endif /* CONFIG_BT_CONN */
3089 
le_read_buffer_size_complete(struct net_buf * buf)3090 static void le_read_buffer_size_complete(struct net_buf *buf)
3091 {
3092 	struct bt_hci_rp_le_read_buffer_size *rp = (void *)buf->data;
3093 
3094 	LOG_DBG("status 0x%02x %s", rp->status, bt_hci_err_to_str(rp->status));
3095 
3096 #if defined(CONFIG_BT_CONN)
3097 	uint16_t acl_mtu = sys_le16_to_cpu(rp->le_max_len);
3098 
3099 	if (!acl_mtu || !rp->le_max_num) {
3100 		return;
3101 	}
3102 
3103 	bt_dev.le.acl_mtu = acl_mtu;
3104 
3105 	LOG_DBG("ACL LE buffers: pkts %u mtu %u", rp->le_max_num, bt_dev.le.acl_mtu);
3106 
3107 	k_sem_init(&bt_dev.le.acl_pkts, rp->le_max_num, rp->le_max_num);
3108 #endif /* CONFIG_BT_CONN */
3109 }
3110 
read_buffer_size_v2_complete(struct net_buf * buf)3111 static void read_buffer_size_v2_complete(struct net_buf *buf)
3112 {
3113 #if defined(CONFIG_BT_ISO)
3114 	struct bt_hci_rp_le_read_buffer_size_v2 *rp = (void *)buf->data;
3115 
3116 	LOG_DBG("status %u %s", rp->status, bt_hci_err_to_str(rp->status));
3117 
3118 #if defined(CONFIG_BT_CONN)
3119 	uint16_t acl_mtu = sys_le16_to_cpu(rp->acl_max_len);
3120 
3121 	if (acl_mtu && rp->acl_max_num) {
3122 		bt_dev.le.acl_mtu = acl_mtu;
3123 		LOG_DBG("ACL LE buffers: pkts %u mtu %u", rp->acl_max_num, bt_dev.le.acl_mtu);
3124 
3125 		k_sem_init(&bt_dev.le.acl_pkts, rp->acl_max_num, rp->acl_max_num);
3126 	}
3127 #endif /* CONFIG_BT_CONN */
3128 
3129 	uint16_t iso_mtu = sys_le16_to_cpu(rp->iso_max_len);
3130 
3131 	if (!iso_mtu || !rp->iso_max_num) {
3132 		LOG_ERR("ISO buffer size not set");
3133 		return;
3134 	}
3135 
3136 	bt_dev.le.iso_mtu = iso_mtu;
3137 
3138 	LOG_DBG("ISO buffers: pkts %u mtu %u", rp->iso_max_num, bt_dev.le.iso_mtu);
3139 
3140 	k_sem_init(&bt_dev.le.iso_pkts, rp->iso_max_num, rp->iso_max_num);
3141 	bt_dev.le.iso_limit = rp->iso_max_num;
3142 #endif /* CONFIG_BT_ISO */
3143 }
3144 
le_set_host_feature(uint8_t bit_number,uint8_t bit_value)3145 static int le_set_host_feature(uint8_t bit_number, uint8_t bit_value)
3146 {
3147 	struct bt_hci_cp_le_set_host_feature *cp;
3148 	struct net_buf *buf;
3149 
3150 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_HOST_FEATURE, sizeof(*cp));
3151 	if (!buf) {
3152 		return -ENOBUFS;
3153 	}
3154 
3155 	cp = net_buf_add(buf, sizeof(*cp));
3156 	cp->bit_number = bit_number;
3157 	cp->bit_value = bit_value;
3158 
3159 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_HOST_FEATURE, buf, NULL);
3160 }
3161 
read_supported_commands_complete(struct net_buf * buf)3162 static void read_supported_commands_complete(struct net_buf *buf)
3163 {
3164 	struct bt_hci_rp_read_supported_commands *rp = (void *)buf->data;
3165 
3166 	LOG_DBG("status 0x%02x %s", rp->status, bt_hci_err_to_str(rp->status));
3167 
3168 	memcpy(bt_dev.supported_commands, rp->commands,
3169 	       sizeof(bt_dev.supported_commands));
3170 
3171 	/* Report additional HCI commands used for ECDH as
3172 	 * supported if TinyCrypt ECC is used for emulation.
3173 	 */
3174 	if (IS_ENABLED(CONFIG_BT_TINYCRYPT_ECC)) {
3175 		bt_hci_ecc_supported_commands(bt_dev.supported_commands);
3176 	}
3177 }
3178 
read_local_features_complete(struct net_buf * buf)3179 static void read_local_features_complete(struct net_buf *buf)
3180 {
3181 	struct bt_hci_rp_read_local_features *rp = (void *)buf->data;
3182 
3183 	LOG_DBG("status 0x%02x %s", rp->status, bt_hci_err_to_str(rp->status));
3184 
3185 	memcpy(bt_dev.features[0], rp->features, sizeof(bt_dev.features[0]));
3186 }
3187 
le_read_supp_states_complete(struct net_buf * buf)3188 static void le_read_supp_states_complete(struct net_buf *buf)
3189 {
3190 	struct bt_hci_rp_le_read_supp_states *rp = (void *)buf->data;
3191 
3192 	LOG_DBG("status 0x%02x %s", rp->status, bt_hci_err_to_str(rp->status));
3193 
3194 	bt_dev.le.states = sys_get_le64(rp->le_states);
3195 }
3196 
3197 #if defined(CONFIG_BT_BROADCASTER)
le_read_maximum_adv_data_len_complete(struct net_buf * buf)3198 static void le_read_maximum_adv_data_len_complete(struct net_buf *buf)
3199 {
3200 	struct bt_hci_rp_le_read_max_adv_data_len *rp = (void *)buf->data;
3201 
3202 	LOG_DBG("status 0x%02x %s", rp->status, bt_hci_err_to_str(rp->status));
3203 
3204 	bt_dev.le.max_adv_data_len = sys_le16_to_cpu(rp->max_adv_data_len);
3205 }
3206 #endif /* CONFIG_BT_BROADCASTER */
3207 
3208 #if defined(CONFIG_BT_SMP)
le_read_resolving_list_size_complete(struct net_buf * buf)3209 static void le_read_resolving_list_size_complete(struct net_buf *buf)
3210 {
3211 	struct bt_hci_rp_le_read_rl_size *rp = (void *)buf->data;
3212 
3213 	LOG_DBG("Resolving List size %u", rp->rl_size);
3214 
3215 	bt_dev.le.rl_size = rp->rl_size;
3216 }
3217 #endif /* defined(CONFIG_BT_SMP) */
3218 
common_init(void)3219 static int common_init(void)
3220 {
3221 	struct net_buf *rsp;
3222 	int err;
3223 
3224 	if (!drv_quirk_no_reset()) {
3225 		/* Send HCI_RESET */
3226 		err = bt_hci_cmd_send_sync(BT_HCI_OP_RESET, NULL, &rsp);
3227 		if (err) {
3228 			return err;
3229 		}
3230 		hci_reset_complete(rsp);
3231 		net_buf_unref(rsp);
3232 	}
3233 
3234 	/* Read Local Supported Features */
3235 	err = bt_hci_cmd_send_sync(BT_HCI_OP_READ_LOCAL_FEATURES, NULL, &rsp);
3236 	if (err) {
3237 		return err;
3238 	}
3239 	read_local_features_complete(rsp);
3240 	net_buf_unref(rsp);
3241 
3242 	/* Read Local Version Information */
3243 	err = bt_hci_cmd_send_sync(BT_HCI_OP_READ_LOCAL_VERSION_INFO, NULL,
3244 				   &rsp);
3245 	if (err) {
3246 		return err;
3247 	}
3248 	read_local_ver_complete(rsp);
3249 	net_buf_unref(rsp);
3250 
3251 	/* Read Local Supported Commands */
3252 	err = bt_hci_cmd_send_sync(BT_HCI_OP_READ_SUPPORTED_COMMANDS, NULL,
3253 				   &rsp);
3254 	if (err) {
3255 		return err;
3256 	}
3257 	read_supported_commands_complete(rsp);
3258 	net_buf_unref(rsp);
3259 
3260 	if (IS_ENABLED(CONFIG_BT_HOST_CRYPTO_PRNG)) {
3261 		/* Initialize the PRNG so that it is safe to use it later
3262 		 * on in the initialization process.
3263 		 */
3264 		err = prng_init();
3265 		if (err) {
3266 			return err;
3267 		}
3268 	}
3269 
3270 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
3271 	err = set_flow_control();
3272 	if (err) {
3273 		return err;
3274 	}
3275 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
3276 
3277 	return 0;
3278 }
3279 
le_set_event_mask(void)3280 static int le_set_event_mask(void)
3281 {
3282 	struct bt_hci_cp_le_set_event_mask *cp_mask;
3283 	struct net_buf *buf;
3284 	uint64_t mask = 0U;
3285 
3286 	/* Set LE event mask */
3287 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_EVENT_MASK, sizeof(*cp_mask));
3288 	if (!buf) {
3289 		return -ENOBUFS;
3290 	}
3291 
3292 	cp_mask = net_buf_add(buf, sizeof(*cp_mask));
3293 
3294 	mask |= BT_EVT_MASK_LE_ADVERTISING_REPORT;
3295 
3296 	if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
3297 	    BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
3298 		mask |= BT_EVT_MASK_LE_ADV_SET_TERMINATED;
3299 		mask |= BT_EVT_MASK_LE_SCAN_REQ_RECEIVED;
3300 		mask |= BT_EVT_MASK_LE_EXT_ADVERTISING_REPORT;
3301 		mask |= BT_EVT_MASK_LE_SCAN_TIMEOUT;
3302 		if (IS_ENABLED(CONFIG_BT_PER_ADV_SYNC)) {
3303 			mask |= BT_EVT_MASK_LE_PER_ADV_SYNC_ESTABLISHED;
3304 			mask |= BT_EVT_MASK_LE_PER_ADVERTISING_REPORT;
3305 			mask |= BT_EVT_MASK_LE_PER_ADV_SYNC_LOST;
3306 			mask |= BT_EVT_MASK_LE_PAST_RECEIVED;
3307 		}
3308 	}
3309 
3310 	if (IS_ENABLED(CONFIG_BT_CONN)) {
3311 		if ((IS_ENABLED(CONFIG_BT_SMP) &&
3312 		     BT_FEAT_LE_PRIVACY(bt_dev.le.features)) ||
3313 		    (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
3314 		     BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
3315 			/* C24:
3316 			 * Mandatory if the LE Controller supports Connection
3317 			 * State and either LE Feature (LL Privacy) or
3318 			 * LE Feature (Extended Advertising) is supported, ...
3319 			 */
3320 			mask |= BT_EVT_MASK_LE_ENH_CONN_COMPLETE;
3321 		} else {
3322 			mask |= BT_EVT_MASK_LE_CONN_COMPLETE;
3323 		}
3324 
3325 		mask |= BT_EVT_MASK_LE_CONN_UPDATE_COMPLETE;
3326 		mask |= BT_EVT_MASK_LE_REMOTE_FEAT_COMPLETE;
3327 
3328 		if (BT_FEAT_LE_CONN_PARAM_REQ_PROC(bt_dev.le.features)) {
3329 			mask |= BT_EVT_MASK_LE_CONN_PARAM_REQ;
3330 		}
3331 
3332 		if (IS_ENABLED(CONFIG_BT_DATA_LEN_UPDATE) &&
3333 		    BT_FEAT_LE_DLE(bt_dev.le.features)) {
3334 			mask |= BT_EVT_MASK_LE_DATA_LEN_CHANGE;
3335 		}
3336 
3337 		if (IS_ENABLED(CONFIG_BT_PHY_UPDATE) &&
3338 		    (BT_FEAT_LE_PHY_2M(bt_dev.le.features) ||
3339 		     BT_FEAT_LE_PHY_CODED(bt_dev.le.features))) {
3340 			mask |= BT_EVT_MASK_LE_PHY_UPDATE_COMPLETE;
3341 		}
3342 		if (IS_ENABLED(CONFIG_BT_TRANSMIT_POWER_CONTROL)) {
3343 			mask |= BT_EVT_MASK_LE_TRANSMIT_POWER_REPORTING;
3344 		}
3345 
3346 		if (IS_ENABLED(CONFIG_BT_PATH_LOSS_MONITORING)) {
3347 			mask |= BT_EVT_MASK_LE_PATH_LOSS_THRESHOLD;
3348 		}
3349 	}
3350 
3351 	if (IS_ENABLED(CONFIG_BT_SMP) &&
3352 	    BT_FEAT_LE_ENCR(bt_dev.le.features)) {
3353 		mask |= BT_EVT_MASK_LE_LTK_REQUEST;
3354 	}
3355 
3356 	/*
3357 	 * If "LE Read Local P-256 Public Key" and "LE Generate DH Key" are
3358 	 * supported we need to enable events generated by those commands.
3359 	 */
3360 	if (IS_ENABLED(CONFIG_BT_ECC) &&
3361 	    (BT_CMD_TEST(bt_dev.supported_commands, 34, 1)) &&
3362 	    (BT_CMD_TEST(bt_dev.supported_commands, 34, 2))) {
3363 		mask |= BT_EVT_MASK_LE_P256_PUBLIC_KEY_COMPLETE;
3364 		mask |= BT_EVT_MASK_LE_GENERATE_DHKEY_COMPLETE;
3365 	}
3366 
3367 	/*
3368 	 * Enable CIS events only if ISO connections are enabled and controller
3369 	 * support them.
3370 	 */
3371 	if (IS_ENABLED(CONFIG_BT_ISO) &&
3372 	    BT_FEAT_LE_CIS(bt_dev.le.features)) {
3373 		mask |= BT_EVT_MASK_LE_CIS_ESTABLISHED;
3374 		if (BT_FEAT_LE_CIS_PERIPHERAL(bt_dev.le.features)) {
3375 			mask |= BT_EVT_MASK_LE_CIS_REQ;
3376 		}
3377 	}
3378 
3379 	/* Enable BIS events for broadcaster and/or receiver */
3380 	if (IS_ENABLED(CONFIG_BT_ISO) && BT_FEAT_LE_BIS(bt_dev.le.features)) {
3381 		if (IS_ENABLED(CONFIG_BT_ISO_BROADCASTER) &&
3382 		    BT_FEAT_LE_ISO_BROADCASTER(bt_dev.le.features)) {
3383 			mask |= BT_EVT_MASK_LE_BIG_COMPLETE;
3384 			mask |= BT_EVT_MASK_LE_BIG_TERMINATED;
3385 		}
3386 		if (IS_ENABLED(CONFIG_BT_ISO_SYNC_RECEIVER) &&
3387 		    BT_FEAT_LE_SYNC_RECEIVER(bt_dev.le.features)) {
3388 			mask |= BT_EVT_MASK_LE_BIG_SYNC_ESTABLISHED;
3389 			mask |= BT_EVT_MASK_LE_BIG_SYNC_LOST;
3390 			mask |= BT_EVT_MASK_LE_BIGINFO_ADV_REPORT;
3391 		}
3392 	}
3393 
3394 	/* Enable IQ samples report events receiver */
3395 	if (IS_ENABLED(CONFIG_BT_DF_CONNECTIONLESS_CTE_RX)) {
3396 		mask |= BT_EVT_MASK_LE_CONNECTIONLESS_IQ_REPORT;
3397 	}
3398 
3399 	if (IS_ENABLED(CONFIG_BT_DF_CONNECTION_CTE_RX)) {
3400 		mask |= BT_EVT_MASK_LE_CONNECTION_IQ_REPORT;
3401 		mask |= BT_EVT_MASK_LE_CTE_REQUEST_FAILED;
3402 	}
3403 
3404 	if (IS_ENABLED(CONFIG_BT_PER_ADV_RSP)) {
3405 		mask |= BT_EVT_MASK_LE_PER_ADV_SUBEVENT_DATA_REQ;
3406 		mask |= BT_EVT_MASK_LE_PER_ADV_RESPONSE_REPORT;
3407 	}
3408 
3409 	if (IS_ENABLED(CONFIG_BT_PER_ADV_SYNC_RSP)) {
3410 		mask |= BT_EVT_MASK_LE_PER_ADVERTISING_REPORT_V2;
3411 		mask |= BT_EVT_MASK_LE_PER_ADV_SYNC_ESTABLISHED_V2;
3412 		mask |= BT_EVT_MASK_LE_PAST_RECEIVED_V2;
3413 	}
3414 
3415 	if (IS_ENABLED(CONFIG_BT_CONN) &&
3416 	    (IS_ENABLED(CONFIG_BT_PER_ADV_RSP) || IS_ENABLED(CONFIG_BT_PER_ADV_SYNC_RSP))) {
3417 		mask |= BT_EVT_MASK_LE_ENH_CONN_COMPLETE_V2;
3418 	}
3419 
3420 	sys_put_le64(mask, cp_mask->events);
3421 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_EVENT_MASK, buf, NULL);
3422 }
3423 
le_init_iso(void)3424 static int le_init_iso(void)
3425 {
3426 	int err;
3427 	struct net_buf *rsp;
3428 
3429 	if (IS_ENABLED(CONFIG_BT_ISO_UNICAST)) {
3430 		/* Set Connected Isochronous Streams - Host support */
3431 		err = le_set_host_feature(BT_LE_FEAT_BIT_ISO_CHANNELS, 1);
3432 		if (err) {
3433 			return err;
3434 		}
3435 	}
3436 
3437 	/* Octet 41, bit 5 is read buffer size V2 */
3438 	if (BT_CMD_TEST(bt_dev.supported_commands, 41, 5)) {
3439 		/* Read ISO Buffer Size V2 */
3440 		err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_BUFFER_SIZE_V2,
3441 					   NULL, &rsp);
3442 		if (err) {
3443 			return err;
3444 		}
3445 
3446 		read_buffer_size_v2_complete(rsp);
3447 
3448 		net_buf_unref(rsp);
3449 	} else if (IS_ENABLED(CONFIG_BT_CONN_TX)) {
3450 		if (IS_ENABLED(CONFIG_BT_ISO_TX)) {
3451 			LOG_WRN("Read Buffer Size V2 command is not supported. "
3452 				"No ISO TX buffers will be available");
3453 		}
3454 
3455 		/* Read LE Buffer Size in the case that we support ACL without TX ISO (e.g. if we
3456 		 * only support ISO sync receiver).
3457 		 */
3458 		err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_BUFFER_SIZE,
3459 					   NULL, &rsp);
3460 		if (err) {
3461 			return err;
3462 		}
3463 
3464 		le_read_buffer_size_complete(rsp);
3465 
3466 		net_buf_unref(rsp);
3467 	}
3468 
3469 	return 0;
3470 }
3471 
le_init(void)3472 static int le_init(void)
3473 {
3474 	struct bt_hci_cp_write_le_host_supp *cp_le;
3475 	struct net_buf *buf, *rsp;
3476 	int err;
3477 
3478 	/* For now we only support LE capable controllers */
3479 	if (!BT_FEAT_LE(bt_dev.features)) {
3480 		LOG_ERR("Non-LE capable controller detected!");
3481 		return -ENODEV;
3482 	}
3483 
3484 	/* Read Low Energy Supported Features */
3485 	err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_LOCAL_FEATURES, NULL,
3486 				   &rsp);
3487 	if (err) {
3488 		return err;
3489 	}
3490 
3491 	read_le_features_complete(rsp);
3492 	net_buf_unref(rsp);
3493 
3494 	if (IS_ENABLED(CONFIG_BT_ISO) &&
3495 	    BT_FEAT_LE_ISO(bt_dev.le.features)) {
3496 		err = le_init_iso();
3497 		if (err) {
3498 			return err;
3499 		}
3500 	} else if (IS_ENABLED(CONFIG_BT_CONN)) {
3501 		/* Read LE Buffer Size */
3502 		err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_BUFFER_SIZE,
3503 					   NULL, &rsp);
3504 		if (err) {
3505 			return err;
3506 		}
3507 
3508 		le_read_buffer_size_complete(rsp);
3509 
3510 		net_buf_unref(rsp);
3511 	}
3512 
3513 #if defined(CONFIG_BT_BROADCASTER)
3514 	if (IS_ENABLED(CONFIG_BT_EXT_ADV) && BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
3515 		/* Read LE Max Adv Data Len */
3516 		err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_MAX_ADV_DATA_LEN, NULL, &rsp);
3517 		if (err == 0) {
3518 			le_read_maximum_adv_data_len_complete(rsp);
3519 			net_buf_unref(rsp);
3520 		} else if (err == -EIO) {
3521 			LOG_WRN("Controller does not support 'LE_READ_MAX_ADV_DATA_LEN'. "
3522 				"Assuming maximum length is 31 bytes.");
3523 			bt_dev.le.max_adv_data_len = 31;
3524 		} else {
3525 			return err;
3526 		}
3527 	} else {
3528 		bt_dev.le.max_adv_data_len = 31;
3529 	}
3530 #endif /* CONFIG_BT_BROADCASTER */
3531 
3532 	if (BT_FEAT_BREDR(bt_dev.features)) {
3533 		buf = bt_hci_cmd_create(BT_HCI_OP_LE_WRITE_LE_HOST_SUPP,
3534 					sizeof(*cp_le));
3535 		if (!buf) {
3536 			return -ENOBUFS;
3537 		}
3538 
3539 		cp_le = net_buf_add(buf, sizeof(*cp_le));
3540 
3541 		/* Explicitly enable LE for dual-mode controllers */
3542 		cp_le->le = 0x01;
3543 		cp_le->simul = 0x00;
3544 		err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_WRITE_LE_HOST_SUPP, buf,
3545 					   NULL);
3546 		if (err) {
3547 			return err;
3548 		}
3549 	}
3550 
3551 	/* Read LE Supported States */
3552 	if (BT_CMD_LE_STATES(bt_dev.supported_commands)) {
3553 		err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_SUPP_STATES, NULL,
3554 					   &rsp);
3555 		if (err) {
3556 			return err;
3557 		}
3558 
3559 		le_read_supp_states_complete(rsp);
3560 		net_buf_unref(rsp);
3561 	}
3562 
3563 	if (IS_ENABLED(CONFIG_BT_CONN) &&
3564 	    IS_ENABLED(CONFIG_BT_DATA_LEN_UPDATE) &&
3565 	    IS_ENABLED(CONFIG_BT_AUTO_DATA_LEN_UPDATE) &&
3566 	    BT_FEAT_LE_DLE(bt_dev.le.features)) {
3567 		struct bt_hci_cp_le_write_default_data_len *cp;
3568 		uint16_t tx_octets, tx_time;
3569 
3570 		err = hci_le_read_max_data_len(&tx_octets, &tx_time);
3571 		if (err) {
3572 			return err;
3573 		}
3574 
3575 		buf = bt_hci_cmd_create(BT_HCI_OP_LE_WRITE_DEFAULT_DATA_LEN,
3576 					sizeof(*cp));
3577 		if (!buf) {
3578 			return -ENOBUFS;
3579 		}
3580 
3581 		cp = net_buf_add(buf, sizeof(*cp));
3582 		cp->max_tx_octets = sys_cpu_to_le16(tx_octets);
3583 		cp->max_tx_time = sys_cpu_to_le16(tx_time);
3584 
3585 		err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_WRITE_DEFAULT_DATA_LEN,
3586 					   buf, NULL);
3587 		if (err) {
3588 			return err;
3589 		}
3590 	}
3591 
3592 #if defined(CONFIG_BT_SMP)
3593 	if (BT_FEAT_LE_PRIVACY(bt_dev.le.features)) {
3594 #if defined(CONFIG_BT_PRIVACY)
3595 		struct bt_hci_cp_le_set_rpa_timeout *cp;
3596 
3597 		buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_RPA_TIMEOUT,
3598 					sizeof(*cp));
3599 		if (!buf) {
3600 			return -ENOBUFS;
3601 		}
3602 
3603 		cp = net_buf_add(buf, sizeof(*cp));
3604 		cp->rpa_timeout = sys_cpu_to_le16(bt_dev.rpa_timeout);
3605 		err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_RPA_TIMEOUT, buf,
3606 					   NULL);
3607 		if (err) {
3608 			return err;
3609 		}
3610 #endif /* defined(CONFIG_BT_PRIVACY) */
3611 
3612 		err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_RL_SIZE, NULL,
3613 					   &rsp);
3614 		if (err) {
3615 			return err;
3616 		}
3617 		le_read_resolving_list_size_complete(rsp);
3618 		net_buf_unref(rsp);
3619 	}
3620 #endif
3621 
3622 #if defined(CONFIG_BT_DF)
3623 	if (BT_FEAT_LE_CONNECTIONLESS_CTE_TX(bt_dev.le.features) ||
3624 	    BT_FEAT_LE_CONNECTIONLESS_CTE_RX(bt_dev.le.features) ||
3625 	    BT_FEAT_LE_RX_CTE(bt_dev.le.features)) {
3626 		err = le_df_init();
3627 		if (err) {
3628 			return err;
3629 		}
3630 	}
3631 #endif /* CONFIG_BT_DF */
3632 
3633 	return  le_set_event_mask();
3634 }
3635 
3636 #if !defined(CONFIG_BT_CLASSIC)
bt_br_init(void)3637 static int bt_br_init(void)
3638 {
3639 #if defined(CONFIG_BT_CONN)
3640 	struct net_buf *rsp;
3641 	int err;
3642 
3643 	if (bt_dev.le.acl_mtu) {
3644 		return 0;
3645 	}
3646 
3647 	/* Use BR/EDR buffer size if LE reports zero buffers */
3648 	err = bt_hci_cmd_send_sync(BT_HCI_OP_READ_BUFFER_SIZE, NULL, &rsp);
3649 	if (err) {
3650 		return err;
3651 	}
3652 
3653 	read_buffer_size_complete(rsp);
3654 	net_buf_unref(rsp);
3655 #endif /* CONFIG_BT_CONN */
3656 
3657 	return 0;
3658 }
3659 #endif /* !defined(CONFIG_BT_CLASSIC) */
3660 
set_event_mask(void)3661 static int set_event_mask(void)
3662 {
3663 	struct bt_hci_cp_set_event_mask *ev;
3664 	struct net_buf *buf;
3665 	uint64_t mask = 0U;
3666 
3667 	buf = bt_hci_cmd_create(BT_HCI_OP_SET_EVENT_MASK, sizeof(*ev));
3668 	if (!buf) {
3669 		return -ENOBUFS;
3670 	}
3671 
3672 	ev = net_buf_add(buf, sizeof(*ev));
3673 
3674 	if (IS_ENABLED(CONFIG_BT_CLASSIC)) {
3675 		/* Since we require LE support, we can count on a
3676 		 * Bluetooth 4.0 feature set
3677 		 */
3678 		mask |= BT_EVT_MASK_INQUIRY_COMPLETE;
3679 		mask |= BT_EVT_MASK_CONN_COMPLETE;
3680 		mask |= BT_EVT_MASK_CONN_REQUEST;
3681 		mask |= BT_EVT_MASK_AUTH_COMPLETE;
3682 		mask |= BT_EVT_MASK_REMOTE_NAME_REQ_COMPLETE;
3683 		mask |= BT_EVT_MASK_REMOTE_FEATURES;
3684 		mask |= BT_EVT_MASK_ROLE_CHANGE;
3685 		mask |= BT_EVT_MASK_PIN_CODE_REQ;
3686 		mask |= BT_EVT_MASK_LINK_KEY_REQ;
3687 		mask |= BT_EVT_MASK_LINK_KEY_NOTIFY;
3688 		mask |= BT_EVT_MASK_INQUIRY_RESULT_WITH_RSSI;
3689 		mask |= BT_EVT_MASK_REMOTE_EXT_FEATURES;
3690 		mask |= BT_EVT_MASK_SYNC_CONN_COMPLETE;
3691 		mask |= BT_EVT_MASK_EXTENDED_INQUIRY_RESULT;
3692 		mask |= BT_EVT_MASK_IO_CAPA_REQ;
3693 		mask |= BT_EVT_MASK_IO_CAPA_RESP;
3694 		mask |= BT_EVT_MASK_USER_CONFIRM_REQ;
3695 		mask |= BT_EVT_MASK_USER_PASSKEY_REQ;
3696 		mask |= BT_EVT_MASK_SSP_COMPLETE;
3697 		mask |= BT_EVT_MASK_USER_PASSKEY_NOTIFY;
3698 	}
3699 
3700 	mask |= BT_EVT_MASK_HARDWARE_ERROR;
3701 	mask |= BT_EVT_MASK_DATA_BUFFER_OVERFLOW;
3702 	mask |= BT_EVT_MASK_LE_META_EVENT;
3703 
3704 	if (IS_ENABLED(CONFIG_BT_CONN)) {
3705 		mask |= BT_EVT_MASK_DISCONN_COMPLETE;
3706 		mask |= BT_EVT_MASK_REMOTE_VERSION_INFO;
3707 	}
3708 
3709 	if (IS_ENABLED(CONFIG_BT_SMP) &&
3710 	    BT_FEAT_LE_ENCR(bt_dev.le.features)) {
3711 		mask |= BT_EVT_MASK_ENCRYPT_CHANGE;
3712 		mask |= BT_EVT_MASK_ENCRYPT_KEY_REFRESH_COMPLETE;
3713 	}
3714 
3715 	sys_put_le64(mask, ev->events);
3716 	return bt_hci_cmd_send_sync(BT_HCI_OP_SET_EVENT_MASK, buf, NULL);
3717 }
3718 
bt_hci_get_ver_str(uint8_t core_version)3719 const char *bt_hci_get_ver_str(uint8_t core_version)
3720 {
3721 	const char * const str[] = {
3722 		"1.0b", "1.1", "1.2", "2.0", "2.1", "3.0", "4.0", "4.1", "4.2",
3723 		"5.0", "5.1", "5.2", "5.3", "5.4"
3724 	};
3725 
3726 	if (core_version < ARRAY_SIZE(str)) {
3727 		return str[core_version];
3728 	}
3729 
3730 	return "unknown";
3731 }
3732 
bt_dev_show_info(void)3733 static void bt_dev_show_info(void)
3734 {
3735 	int i;
3736 
3737 	LOG_INF("Identity%s: %s", bt_dev.id_count > 1 ? "[0]" : "",
3738 		bt_addr_le_str(&bt_dev.id_addr[0]));
3739 
3740 	if (IS_ENABLED(CONFIG_BT_LOG_SNIFFER_INFO)) {
3741 #if defined(CONFIG_BT_PRIVACY)
3742 		uint8_t irk[16];
3743 
3744 		sys_memcpy_swap(irk, bt_dev.irk[0], 16);
3745 		LOG_INF("IRK%s: 0x%s", bt_dev.id_count > 1 ? "[0]" : "", bt_hex(irk, 16));
3746 #endif
3747 	}
3748 
3749 	for (i = 1; i < bt_dev.id_count; i++) {
3750 		LOG_INF("Identity[%d]: %s", i, bt_addr_le_str(&bt_dev.id_addr[i]));
3751 
3752 		if (IS_ENABLED(CONFIG_BT_LOG_SNIFFER_INFO)) {
3753 #if defined(CONFIG_BT_PRIVACY)
3754 			uint8_t irk[16];
3755 
3756 			sys_memcpy_swap(irk, bt_dev.irk[i], 16);
3757 			LOG_INF("IRK[%d]: 0x%s", i, bt_hex(irk, 16));
3758 #endif
3759 		}
3760 	}
3761 
3762 	if (IS_ENABLED(CONFIG_BT_SMP) &&
3763 	    IS_ENABLED(CONFIG_BT_LOG_SNIFFER_INFO)) {
3764 		bt_keys_foreach_type(BT_KEYS_ALL, bt_keys_show_sniffer_info, NULL);
3765 	}
3766 
3767 	LOG_INF("HCI: version %s (0x%02x) revision 0x%04x, manufacturer 0x%04x",
3768 		bt_hci_get_ver_str(bt_dev.hci_version), bt_dev.hci_version, bt_dev.hci_revision,
3769 		bt_dev.manufacturer);
3770 	LOG_INF("LMP: version %s (0x%02x) subver 0x%04x", bt_hci_get_ver_str(bt_dev.lmp_version),
3771 		bt_dev.lmp_version, bt_dev.lmp_subversion);
3772 }
3773 
3774 #if defined(CONFIG_BT_HCI_VS)
vs_hw_platform(uint16_t platform)3775 static const char *vs_hw_platform(uint16_t platform)
3776 {
3777 	static const char * const plat_str[] = {
3778 		"reserved", "Intel Corporation", "Nordic Semiconductor",
3779 		"NXP Semiconductors" };
3780 
3781 	if (platform < ARRAY_SIZE(plat_str)) {
3782 		return plat_str[platform];
3783 	}
3784 
3785 	return "unknown";
3786 }
3787 
vs_hw_variant(uint16_t platform,uint16_t variant)3788 static const char *vs_hw_variant(uint16_t platform, uint16_t variant)
3789 {
3790 	static const char * const nordic_str[] = {
3791 		"reserved", "nRF51x", "nRF52x", "nRF53x", "nRF54Hx", "nRF54Lx"
3792 	};
3793 
3794 	if (platform != BT_HCI_VS_HW_PLAT_NORDIC) {
3795 		return "unknown";
3796 	}
3797 
3798 	if (variant < ARRAY_SIZE(nordic_str)) {
3799 		return nordic_str[variant];
3800 	}
3801 
3802 	return "unknown";
3803 }
3804 
vs_fw_variant(uint8_t variant)3805 static const char *vs_fw_variant(uint8_t variant)
3806 {
3807 	static const char * const var_str[] = {
3808 		"Standard Bluetooth controller",
3809 		"Vendor specific controller",
3810 		"Firmware loader",
3811 		"Rescue image",
3812 	};
3813 
3814 	if (variant < ARRAY_SIZE(var_str)) {
3815 		return var_str[variant];
3816 	}
3817 
3818 	return "unknown";
3819 }
3820 
hci_vs_init(void)3821 static void hci_vs_init(void)
3822 {
3823 	union {
3824 		struct bt_hci_rp_vs_read_version_info *info;
3825 		struct bt_hci_rp_vs_read_supported_commands *cmds;
3826 		struct bt_hci_rp_vs_read_supported_features *feat;
3827 	} rp;
3828 	struct net_buf *rsp;
3829 	int err;
3830 
3831 	/* If heuristics is enabled, try to guess HCI VS support by looking
3832 	 * at the HCI version and identity address. We haven't set any addresses
3833 	 * at this point. So we need to read the public address.
3834 	 */
3835 	if (IS_ENABLED(CONFIG_BT_HCI_VS_EXT_DETECT)) {
3836 		bt_addr_le_t addr;
3837 
3838 		if ((bt_dev.hci_version < BT_HCI_VERSION_5_0) ||
3839 		    bt_id_read_public_addr(&addr)) {
3840 			LOG_WRN("Controller doesn't seem to support "
3841 				"Zephyr vendor HCI");
3842 			return;
3843 		}
3844 	}
3845 
3846 	err = bt_hci_cmd_send_sync(BT_HCI_OP_VS_READ_VERSION_INFO, NULL, &rsp);
3847 	if (err) {
3848 		LOG_WRN("Vendor HCI extensions not available");
3849 		return;
3850 	}
3851 
3852 	if (IS_ENABLED(CONFIG_BT_HCI_VS_EXT_DETECT) &&
3853 	    rsp->len != sizeof(struct bt_hci_rp_vs_read_version_info)) {
3854 		LOG_WRN("Invalid Vendor HCI extensions");
3855 		net_buf_unref(rsp);
3856 		return;
3857 	}
3858 
3859 	rp.info = (void *)rsp->data;
3860 	LOG_INF("HW Platform: %s (0x%04x)", vs_hw_platform(sys_le16_to_cpu(rp.info->hw_platform)),
3861 		sys_le16_to_cpu(rp.info->hw_platform));
3862 	LOG_INF("HW Variant: %s (0x%04x)",
3863 		vs_hw_variant(sys_le16_to_cpu(rp.info->hw_platform),
3864 			      sys_le16_to_cpu(rp.info->hw_variant)),
3865 		sys_le16_to_cpu(rp.info->hw_variant));
3866 	LOG_INF("Firmware: %s (0x%02x) Version %u.%u Build %u", vs_fw_variant(rp.info->fw_variant),
3867 		rp.info->fw_variant, rp.info->fw_version, sys_le16_to_cpu(rp.info->fw_revision),
3868 		sys_le32_to_cpu(rp.info->fw_build));
3869 
3870 	net_buf_unref(rsp);
3871 
3872 	err = bt_hci_cmd_send_sync(BT_HCI_OP_VS_READ_SUPPORTED_COMMANDS,
3873 				   NULL, &rsp);
3874 	if (err) {
3875 		LOG_WRN("Failed to read supported vendor commands");
3876 		return;
3877 	}
3878 
3879 	if (IS_ENABLED(CONFIG_BT_HCI_VS_EXT_DETECT) &&
3880 	    rsp->len != sizeof(struct bt_hci_rp_vs_read_supported_commands)) {
3881 		LOG_WRN("Invalid Vendor HCI extensions");
3882 		net_buf_unref(rsp);
3883 		return;
3884 	}
3885 
3886 	rp.cmds = (void *)rsp->data;
3887 	memcpy(bt_dev.vs_commands, rp.cmds->commands, BT_DEV_VS_CMDS_MAX);
3888 	net_buf_unref(rsp);
3889 
3890 	if (BT_VS_CMD_SUP_FEAT(bt_dev.vs_commands)) {
3891 		err = bt_hci_cmd_send_sync(BT_HCI_OP_VS_READ_SUPPORTED_FEATURES,
3892 					   NULL, &rsp);
3893 		if (err) {
3894 			LOG_WRN("Failed to read supported vendor features");
3895 			return;
3896 		}
3897 
3898 		if (IS_ENABLED(CONFIG_BT_HCI_VS_EXT_DETECT) &&
3899 		    rsp->len !=
3900 		    sizeof(struct bt_hci_rp_vs_read_supported_features)) {
3901 			LOG_WRN("Invalid Vendor HCI extensions");
3902 			net_buf_unref(rsp);
3903 			return;
3904 		}
3905 
3906 		rp.feat = (void *)rsp->data;
3907 		memcpy(bt_dev.vs_features, rp.feat->features,
3908 		       BT_DEV_VS_FEAT_MAX);
3909 		net_buf_unref(rsp);
3910 	}
3911 }
3912 #endif /* CONFIG_BT_HCI_VS */
3913 
hci_init(void)3914 static int hci_init(void)
3915 {
3916 	int err;
3917 
3918 #if defined(CONFIG_BT_HCI_SETUP)
3919 	struct bt_hci_setup_params setup_params = { 0 };
3920 
3921 	bt_addr_copy(&setup_params.public_addr, BT_ADDR_ANY);
3922 #if defined(CONFIG_BT_HCI_SET_PUBLIC_ADDR)
3923 	if (bt_dev.id_count > 0 && bt_dev.id_addr[BT_ID_DEFAULT].type == BT_ADDR_LE_PUBLIC) {
3924 		bt_addr_copy(&setup_params.public_addr, &bt_dev.id_addr[BT_ID_DEFAULT].a);
3925 	}
3926 #endif /* defined(CONFIG_BT_HCI_SET_PUBLIC_ADDR) */
3927 
3928 #if DT_HAS_CHOSEN(zephyr_bt_hci)
3929 	err = bt_hci_setup(bt_dev.hci, &setup_params);
3930 	if (err && err != -ENOSYS) {
3931 		return err;
3932 	}
3933 #else
3934 	if (bt_dev.drv->setup) {
3935 		err = bt_dev.drv->setup(&setup_params);
3936 		if (err) {
3937 			return err;
3938 		}
3939 	}
3940 #endif
3941 #endif /* defined(CONFIG_BT_HCI_SETUP) */
3942 
3943 	err = common_init();
3944 	if (err) {
3945 		return err;
3946 	}
3947 
3948 	err = le_init();
3949 	if (err) {
3950 		return err;
3951 	}
3952 
3953 	if (BT_FEAT_BREDR(bt_dev.features)) {
3954 		err = bt_br_init();
3955 		if (err) {
3956 			return err;
3957 		}
3958 	} else if (IS_ENABLED(CONFIG_BT_CLASSIC)) {
3959 		LOG_ERR("Non-BR/EDR controller detected");
3960 		return -EIO;
3961 	}
3962 #if defined(CONFIG_BT_CONN)
3963 	else if (!bt_dev.le.acl_mtu) {
3964 		LOG_ERR("ACL BR/EDR buffers not initialized");
3965 		return -EIO;
3966 	}
3967 #endif
3968 
3969 	err = set_event_mask();
3970 	if (err) {
3971 		return err;
3972 	}
3973 
3974 #if defined(CONFIG_BT_HCI_VS)
3975 	hci_vs_init();
3976 #endif
3977 	err = bt_id_init();
3978 	if (err) {
3979 		return err;
3980 	}
3981 
3982 	return 0;
3983 }
3984 
bt_send(struct net_buf * buf)3985 int bt_send(struct net_buf *buf)
3986 {
3987 	LOG_DBG("buf %p len %u type %u", buf, buf->len, bt_buf_get_type(buf));
3988 
3989 	bt_monitor_send(bt_monitor_opcode(buf), buf->data, buf->len);
3990 
3991 	if (IS_ENABLED(CONFIG_BT_TINYCRYPT_ECC)) {
3992 		return bt_hci_ecc_send(buf);
3993 	}
3994 
3995 #if DT_HAS_CHOSEN(zephyr_bt_hci)
3996 	return bt_hci_send(bt_dev.hci, buf);
3997 #else
3998 	return bt_dev.drv->send(buf);
3999 #endif
4000 }
4001 
4002 static const struct event_handler prio_events[] = {
4003 	EVENT_HANDLER(BT_HCI_EVT_CMD_COMPLETE, hci_cmd_complete,
4004 		      sizeof(struct bt_hci_evt_cmd_complete)),
4005 	EVENT_HANDLER(BT_HCI_EVT_CMD_STATUS, hci_cmd_status,
4006 		      sizeof(struct bt_hci_evt_cmd_status)),
4007 #if defined(CONFIG_BT_CONN)
4008 	EVENT_HANDLER(BT_HCI_EVT_DATA_BUF_OVERFLOW,
4009 		      hci_data_buf_overflow,
4010 		      sizeof(struct bt_hci_evt_data_buf_overflow)),
4011 	EVENT_HANDLER(BT_HCI_EVT_DISCONN_COMPLETE, hci_disconn_complete_prio,
4012 		      sizeof(struct bt_hci_evt_disconn_complete)),
4013 #endif /* CONFIG_BT_CONN */
4014 #if defined(CONFIG_BT_CONN_TX)
4015 	EVENT_HANDLER(BT_HCI_EVT_NUM_COMPLETED_PACKETS,
4016 		      hci_num_completed_packets,
4017 		      sizeof(struct bt_hci_evt_num_completed_packets)),
4018 #endif /* CONFIG_BT_CONN_TX */
4019 };
4020 
hci_event_prio(struct net_buf * buf)4021 void hci_event_prio(struct net_buf *buf)
4022 {
4023 	struct net_buf_simple_state state;
4024 	struct bt_hci_evt_hdr *hdr;
4025 	uint8_t evt_flags;
4026 
4027 	net_buf_simple_save(&buf->b, &state);
4028 
4029 	if (buf->len < sizeof(*hdr)) {
4030 		LOG_ERR("Invalid HCI event size (%u)", buf->len);
4031 		net_buf_unref(buf);
4032 		return;
4033 	}
4034 
4035 	hdr = net_buf_pull_mem(buf, sizeof(*hdr));
4036 	evt_flags = bt_hci_evt_get_flags(hdr->evt);
4037 	BT_ASSERT(evt_flags & BT_HCI_EVT_FLAG_RECV_PRIO);
4038 
4039 	handle_event(hdr->evt, buf, prio_events, ARRAY_SIZE(prio_events));
4040 
4041 	if (evt_flags & BT_HCI_EVT_FLAG_RECV) {
4042 		net_buf_simple_restore(&buf->b, &state);
4043 	} else {
4044 		net_buf_unref(buf);
4045 	}
4046 }
4047 
rx_queue_put(struct net_buf * buf)4048 static void rx_queue_put(struct net_buf *buf)
4049 {
4050 	net_buf_slist_put(&bt_dev.rx_queue, buf);
4051 
4052 #if defined(CONFIG_BT_RECV_WORKQ_SYS)
4053 	const int err = k_work_submit(&rx_work);
4054 #elif defined(CONFIG_BT_RECV_WORKQ_BT)
4055 	const int err = k_work_submit_to_queue(&bt_workq, &rx_work);
4056 #endif /* CONFIG_BT_RECV_WORKQ_SYS */
4057 	if (err < 0) {
4058 		LOG_ERR("Could not submit rx_work: %d", err);
4059 	}
4060 }
4061 
bt_recv_unsafe(struct net_buf * buf)4062 static int bt_recv_unsafe(struct net_buf *buf)
4063 {
4064 	bt_monitor_send(bt_monitor_opcode(buf), buf->data, buf->len);
4065 
4066 	LOG_DBG("buf %p len %u", buf, buf->len);
4067 
4068 	switch (bt_buf_get_type(buf)) {
4069 #if defined(CONFIG_BT_CONN)
4070 	case BT_BUF_ACL_IN:
4071 		rx_queue_put(buf);
4072 		return 0;
4073 #endif /* BT_CONN */
4074 	case BT_BUF_EVT:
4075 	{
4076 		struct bt_hci_evt_hdr *hdr = (void *)buf->data;
4077 		uint8_t evt_flags = bt_hci_evt_get_flags(hdr->evt);
4078 
4079 		if (evt_flags & BT_HCI_EVT_FLAG_RECV_PRIO) {
4080 			hci_event_prio(buf);
4081 		}
4082 
4083 		if (evt_flags & BT_HCI_EVT_FLAG_RECV) {
4084 			rx_queue_put(buf);
4085 		}
4086 
4087 		return 0;
4088 	}
4089 #if defined(CONFIG_BT_ISO)
4090 	case BT_BUF_ISO_IN:
4091 		rx_queue_put(buf);
4092 		return 0;
4093 #endif /* CONFIG_BT_ISO */
4094 	default:
4095 		LOG_ERR("Invalid buf type %u", bt_buf_get_type(buf));
4096 		net_buf_unref(buf);
4097 		return -EINVAL;
4098 	}
4099 }
4100 
4101 #if DT_HAS_CHOSEN(zephyr_bt_hci)
bt_hci_recv(const struct device * dev,struct net_buf * buf)4102 int bt_hci_recv(const struct device *dev, struct net_buf *buf)
4103 {
4104 	ARG_UNUSED(dev);
4105 #else
4106 int bt_recv(struct net_buf *buf)
4107 {
4108 #endif
4109 	int err;
4110 
4111 	k_sched_lock();
4112 	err = bt_recv_unsafe(buf);
4113 	k_sched_unlock();
4114 
4115 	return err;
4116 }
4117 
4118 /* Old-style HCI driver registration */
4119 #if !DT_HAS_CHOSEN(zephyr_bt_hci)
4120 int bt_hci_driver_register(const struct bt_hci_driver *drv)
4121 {
4122 	if (bt_dev.drv) {
4123 		return -EALREADY;
4124 	}
4125 
4126 	if (!drv->open || !drv->send) {
4127 		return -EINVAL;
4128 	}
4129 
4130 	bt_dev.drv = drv;
4131 
4132 	LOG_DBG("Registered %s", drv->name ? drv->name : "");
4133 
4134 	bt_monitor_new_index(BT_MONITOR_TYPE_PRIMARY, drv->bus,
4135 			     BT_ADDR_ANY, drv->name ? drv->name : "bt0");
4136 
4137 	return 0;
4138 }
4139 #endif /* !DT_HAS_CHOSEN(zephyr_bt_hci) */
4140 
4141 void bt_finalize_init(void)
4142 {
4143 	atomic_set_bit(bt_dev.flags, BT_DEV_READY);
4144 
4145 	if (IS_ENABLED(CONFIG_BT_OBSERVER)) {
4146 		bt_le_scan_update(false);
4147 	}
4148 
4149 	bt_dev_show_info();
4150 }
4151 
4152 static int bt_init(void)
4153 {
4154 	int err;
4155 
4156 	err = hci_init();
4157 	if (err) {
4158 		return err;
4159 	}
4160 
4161 	if (IS_ENABLED(CONFIG_BT_CONN)) {
4162 		err = bt_conn_init();
4163 		if (err) {
4164 			return err;
4165 		}
4166 	}
4167 
4168 	if (IS_ENABLED(CONFIG_BT_ISO)) {
4169 		err = bt_conn_iso_init();
4170 		if (err) {
4171 			return err;
4172 		}
4173 	}
4174 
4175 	if (IS_ENABLED(CONFIG_BT_SETTINGS)) {
4176 		if (!bt_dev.id_count) {
4177 			LOG_INF("No ID address. App must call settings_load()");
4178 			return 0;
4179 		}
4180 
4181 		atomic_set_bit(bt_dev.flags, BT_DEV_PRESET_ID);
4182 	}
4183 
4184 	bt_finalize_init();
4185 	return 0;
4186 }
4187 
4188 static void init_work(struct k_work *work)
4189 {
4190 	int err;
4191 
4192 	err = bt_init();
4193 	if (ready_cb) {
4194 		ready_cb(err);
4195 	}
4196 }
4197 
4198 static void rx_work_handler(struct k_work *work)
4199 {
4200 	int err;
4201 
4202 	struct net_buf *buf;
4203 
4204 	LOG_DBG("Getting net_buf from queue");
4205 	buf = net_buf_slist_get(&bt_dev.rx_queue);
4206 	if (!buf) {
4207 		return;
4208 	}
4209 
4210 	LOG_DBG("buf %p type %u len %u", buf, bt_buf_get_type(buf), buf->len);
4211 
4212 	switch (bt_buf_get_type(buf)) {
4213 #if defined(CONFIG_BT_CONN)
4214 	case BT_BUF_ACL_IN:
4215 		hci_acl(buf);
4216 		break;
4217 #endif /* CONFIG_BT_CONN */
4218 #if defined(CONFIG_BT_ISO)
4219 	case BT_BUF_ISO_IN:
4220 		hci_iso(buf);
4221 		break;
4222 #endif /* CONFIG_BT_ISO */
4223 	case BT_BUF_EVT:
4224 		hci_event(buf);
4225 		break;
4226 	default:
4227 		LOG_ERR("Unknown buf type %u", bt_buf_get_type(buf));
4228 		net_buf_unref(buf);
4229 		break;
4230 	}
4231 
4232 	/* Schedule the work handler to be executed again if there are
4233 	 * additional items in the queue. This allows for other users of the
4234 	 * work queue to get a chance at running, which wouldn't be possible if
4235 	 * we used a while() loop with a k_yield() statement.
4236 	 */
4237 	if (!sys_slist_is_empty(&bt_dev.rx_queue)) {
4238 
4239 #if defined(CONFIG_BT_RECV_WORKQ_SYS)
4240 		err = k_work_submit(&rx_work);
4241 #elif defined(CONFIG_BT_RECV_WORKQ_BT)
4242 		err = k_work_submit_to_queue(&bt_workq, &rx_work);
4243 #endif
4244 		if (err < 0) {
4245 			LOG_ERR("Could not submit rx_work: %d", err);
4246 		}
4247 	}
4248 }
4249 
4250 #if defined(CONFIG_BT_TESTING)
4251 k_tid_t bt_testing_tx_tid_get(void)
4252 {
4253 	/* We now TX everything from the syswq */
4254 	return &k_sys_work_q.thread;
4255 }
4256 #endif
4257 
4258 int bt_enable(bt_ready_cb_t cb)
4259 {
4260 	int err;
4261 
4262 #if DT_HAS_CHOSEN(zephyr_bt_hci)
4263 	if (!device_is_ready(bt_dev.hci)) {
4264 		LOG_ERR("HCI driver is not ready");
4265 		return -ENODEV;
4266 	}
4267 
4268 	bt_monitor_new_index(BT_MONITOR_TYPE_PRIMARY, BT_HCI_BUS, BT_ADDR_ANY, BT_HCI_NAME);
4269 #else /* !DT_HAS_CHONSEN(zephyr_bt_hci) */
4270 	if (!bt_dev.drv) {
4271 		LOG_ERR("No HCI driver registered");
4272 		return -ENODEV;
4273 	}
4274 #endif
4275 
4276 	atomic_clear_bit(bt_dev.flags, BT_DEV_DISABLE);
4277 
4278 	if (atomic_test_and_set_bit(bt_dev.flags, BT_DEV_ENABLE)) {
4279 		return -EALREADY;
4280 	}
4281 
4282 	if (IS_ENABLED(CONFIG_BT_SETTINGS)) {
4283 		err = bt_settings_init();
4284 		if (err) {
4285 			return err;
4286 		}
4287 	} else if (IS_ENABLED(CONFIG_BT_DEVICE_NAME_DYNAMIC)) {
4288 		err = bt_set_name(CONFIG_BT_DEVICE_NAME);
4289 		if (err) {
4290 			LOG_WRN("Failed to set device name (%d)", err);
4291 		}
4292 	}
4293 
4294 	ready_cb = cb;
4295 
4296 	/* Give cmd_sem allowing to send first HCI_Reset cmd, the only
4297 	 * exception is if the controller requests to wait for an
4298 	 * initial Command Complete for NOP.
4299 	 */
4300 	if (!IS_ENABLED(CONFIG_BT_WAIT_NOP)) {
4301 		k_sem_init(&bt_dev.ncmd_sem, 1, 1);
4302 	} else {
4303 		k_sem_init(&bt_dev.ncmd_sem, 0, 1);
4304 	}
4305 	k_fifo_init(&bt_dev.cmd_tx_queue);
4306 
4307 #if defined(CONFIG_BT_RECV_WORKQ_BT)
4308 	/* RX thread */
4309 	k_work_queue_init(&bt_workq);
4310 	k_work_queue_start(&bt_workq, rx_thread_stack,
4311 			   CONFIG_BT_RX_STACK_SIZE,
4312 			   K_PRIO_COOP(CONFIG_BT_RX_PRIO), NULL);
4313 	k_thread_name_set(&bt_workq.thread, "BT RX WQ");
4314 #endif
4315 
4316 #if DT_HAS_CHOSEN(zephyr_bt_hci)
4317 	err = bt_hci_open(bt_dev.hci, bt_hci_recv);
4318 #else
4319 	err = bt_dev.drv->open();
4320 #endif
4321 	if (err) {
4322 		LOG_ERR("HCI driver open failed (%d)", err);
4323 		return err;
4324 	}
4325 
4326 	bt_monitor_send(BT_MONITOR_OPEN_INDEX, NULL, 0);
4327 
4328 	if (!cb) {
4329 		return bt_init();
4330 	}
4331 
4332 	k_work_submit(&bt_dev.init);
4333 	return 0;
4334 }
4335 
4336 int bt_disable(void)
4337 {
4338 	int err;
4339 
4340 #if !DT_HAS_CHOSEN(zephyr_bt_hci)
4341 	if (!bt_dev.drv) {
4342 		LOG_ERR("No HCI driver registered");
4343 		return -ENODEV;
4344 	}
4345 
4346 	if (!bt_dev.drv->close) {
4347 		return -ENOTSUP;
4348 	}
4349 #endif
4350 
4351 	if (atomic_test_and_set_bit(bt_dev.flags, BT_DEV_DISABLE)) {
4352 		return -EALREADY;
4353 	}
4354 
4355 	/* Clear BT_DEV_READY before disabling HCI link */
4356 	atomic_clear_bit(bt_dev.flags, BT_DEV_READY);
4357 
4358 #if defined(CONFIG_BT_BROADCASTER)
4359 	bt_adv_reset_adv_pool();
4360 #endif /* CONFIG_BT_BROADCASTER */
4361 
4362 #if defined(CONFIG_BT_PRIVACY)
4363 	k_work_cancel_delayable(&bt_dev.rpa_update);
4364 #endif /* CONFIG_BT_PRIVACY */
4365 
4366 #if defined(CONFIG_BT_PER_ADV_SYNC)
4367 	bt_periodic_sync_disable();
4368 #endif /* CONFIG_BT_PER_ADV_SYNC */
4369 
4370 #if defined(CONFIG_BT_CONN)
4371 	if (IS_ENABLED(CONFIG_BT_SMP)) {
4372 		bt_pub_key_hci_disrupted();
4373 	}
4374 	bt_conn_cleanup_all();
4375 	disconnected_handles_reset();
4376 #endif /* CONFIG_BT_CONN */
4377 
4378 #if DT_HAS_CHOSEN(zephyr_bt_hci)
4379 	err = bt_hci_close(bt_dev.hci);
4380 	if (err == -ENOSYS) {
4381 		atomic_clear_bit(bt_dev.flags, BT_DEV_DISABLE);
4382 		atomic_set_bit(bt_dev.flags, BT_DEV_READY);
4383 		return -ENOTSUP;
4384 	}
4385 #else
4386 	err = bt_dev.drv->close();
4387 #endif
4388 	if (err) {
4389 		LOG_ERR("HCI driver close failed (%d)", err);
4390 
4391 		/* Re-enable BT_DEV_READY to avoid inconsistent stack state */
4392 		atomic_set_bit(bt_dev.flags, BT_DEV_READY);
4393 
4394 		return err;
4395 	}
4396 
4397 #if defined(CONFIG_BT_RECV_WORKQ_BT)
4398 	/* Abort RX thread */
4399 	k_thread_abort(&bt_workq.thread);
4400 #endif
4401 
4402 	/* Some functions rely on checking this bitfield */
4403 	memset(bt_dev.supported_commands, 0x00, sizeof(bt_dev.supported_commands));
4404 
4405 	/* Reset IDs and corresponding keys. */
4406 	bt_dev.id_count = 0;
4407 #if defined(CONFIG_BT_SMP)
4408 	bt_dev.le.rl_entries = 0;
4409 	bt_keys_reset();
4410 #endif
4411 
4412 	/* If random address was set up - clear it */
4413 	bt_addr_le_copy(&bt_dev.random_addr, BT_ADDR_LE_ANY);
4414 
4415 	if (IS_ENABLED(CONFIG_BT_ISO)) {
4416 		bt_iso_reset();
4417 	}
4418 
4419 	bt_monitor_send(BT_MONITOR_CLOSE_INDEX, NULL, 0);
4420 
4421 	/* Clear BT_DEV_ENABLE here to prevent early bt_enable() calls, before disable is
4422 	 * completed.
4423 	 */
4424 	atomic_clear_bit(bt_dev.flags, BT_DEV_ENABLE);
4425 
4426 	return 0;
4427 }
4428 
4429 bool bt_is_ready(void)
4430 {
4431 	return atomic_test_bit(bt_dev.flags, BT_DEV_READY);
4432 }
4433 
4434 #define DEVICE_NAME_LEN (sizeof(CONFIG_BT_DEVICE_NAME) - 1)
4435 #if defined(CONFIG_BT_DEVICE_NAME_DYNAMIC)
4436 BUILD_ASSERT(DEVICE_NAME_LEN < CONFIG_BT_DEVICE_NAME_MAX);
4437 #else
4438 BUILD_ASSERT(DEVICE_NAME_LEN < 248);
4439 #endif
4440 
4441 int bt_set_name(const char *name)
4442 {
4443 #if defined(CONFIG_BT_DEVICE_NAME_DYNAMIC)
4444 	size_t len = strlen(name);
4445 	int err;
4446 
4447 	if (len > CONFIG_BT_DEVICE_NAME_MAX) {
4448 		return -ENOMEM;
4449 	}
4450 
4451 	if (!strcmp(bt_dev.name, name)) {
4452 		return 0;
4453 	}
4454 
4455 	memcpy(bt_dev.name, name, len);
4456 	bt_dev.name[len] = '\0';
4457 
4458 	if (IS_ENABLED(CONFIG_BT_SETTINGS)) {
4459 		err = bt_settings_store_name(bt_dev.name, len);
4460 		if (err) {
4461 			LOG_WRN("Unable to store name");
4462 		}
4463 	}
4464 
4465 	return 0;
4466 #else
4467 	return -ENOMEM;
4468 #endif
4469 }
4470 
4471 const char *bt_get_name(void)
4472 {
4473 #if defined(CONFIG_BT_DEVICE_NAME_DYNAMIC)
4474 	return bt_dev.name;
4475 #else
4476 	return CONFIG_BT_DEVICE_NAME;
4477 #endif
4478 }
4479 
4480 uint16_t bt_get_appearance(void)
4481 {
4482 #if defined(CONFIG_BT_DEVICE_APPEARANCE_DYNAMIC)
4483 	return bt_dev.appearance;
4484 #else
4485 	return CONFIG_BT_DEVICE_APPEARANCE;
4486 #endif
4487 }
4488 
4489 #if defined(CONFIG_BT_DEVICE_APPEARANCE_DYNAMIC)
4490 int bt_set_appearance(uint16_t appearance)
4491 {
4492 	if (bt_dev.appearance != appearance) {
4493 		if (IS_ENABLED(CONFIG_BT_SETTINGS)) {
4494 			int err = bt_settings_store_appearance(&appearance, sizeof(appearance));
4495 			if (err) {
4496 				LOG_ERR("Unable to save setting 'bt/appearance' (err %d).", err);
4497 				return err;
4498 			}
4499 		}
4500 
4501 		bt_dev.appearance = appearance;
4502 	}
4503 
4504 	return 0;
4505 }
4506 #endif
4507 
4508 bool bt_addr_le_is_bonded(uint8_t id, const bt_addr_le_t *addr)
4509 {
4510 	if (IS_ENABLED(CONFIG_BT_SMP)) {
4511 		struct bt_keys *keys = bt_keys_find_addr(id, addr);
4512 
4513 		/* if there are any keys stored then device is bonded */
4514 		return keys && keys->keys;
4515 	} else {
4516 		return false;
4517 	}
4518 }
4519 
4520 #if defined(CONFIG_BT_FILTER_ACCEPT_LIST)
4521 int bt_le_filter_accept_list_add(const bt_addr_le_t *addr)
4522 {
4523 	struct bt_hci_cp_le_add_dev_to_fal *cp;
4524 	struct net_buf *buf;
4525 	int err;
4526 
4527 	if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
4528 		return -EAGAIN;
4529 	}
4530 
4531 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_ADD_DEV_TO_FAL, sizeof(*cp));
4532 	if (!buf) {
4533 		return -ENOBUFS;
4534 	}
4535 
4536 	cp = net_buf_add(buf, sizeof(*cp));
4537 	bt_addr_le_copy(&cp->addr, addr);
4538 
4539 	err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_ADD_DEV_TO_FAL, buf, NULL);
4540 	if (err) {
4541 		LOG_ERR("Failed to add device to filter accept list");
4542 
4543 		return err;
4544 	}
4545 
4546 	return 0;
4547 }
4548 
4549 int bt_le_filter_accept_list_remove(const bt_addr_le_t *addr)
4550 {
4551 	struct bt_hci_cp_le_rem_dev_from_fal *cp;
4552 	struct net_buf *buf;
4553 	int err;
4554 
4555 	if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
4556 		return -EAGAIN;
4557 	}
4558 
4559 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_REM_DEV_FROM_FAL, sizeof(*cp));
4560 	if (!buf) {
4561 		return -ENOBUFS;
4562 	}
4563 
4564 	cp = net_buf_add(buf, sizeof(*cp));
4565 	bt_addr_le_copy(&cp->addr, addr);
4566 
4567 	err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_REM_DEV_FROM_FAL, buf, NULL);
4568 	if (err) {
4569 		LOG_ERR("Failed to remove device from filter accept list");
4570 		return err;
4571 	}
4572 
4573 	return 0;
4574 }
4575 
4576 int bt_le_filter_accept_list_clear(void)
4577 {
4578 	int err;
4579 
4580 	if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
4581 		return -EAGAIN;
4582 	}
4583 
4584 	err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_CLEAR_FAL, NULL, NULL);
4585 	if (err) {
4586 		LOG_ERR("Failed to clear filter accept list");
4587 		return err;
4588 	}
4589 
4590 	return 0;
4591 }
4592 #endif /* defined(CONFIG_BT_FILTER_ACCEPT_LIST) */
4593 
4594 int bt_le_set_chan_map(uint8_t chan_map[5])
4595 {
4596 	struct bt_hci_cp_le_set_host_chan_classif *cp;
4597 	struct net_buf *buf;
4598 
4599 	if (!(IS_ENABLED(CONFIG_BT_CENTRAL) || IS_ENABLED(CONFIG_BT_BROADCASTER))) {
4600 		return -ENOTSUP;
4601 	}
4602 
4603 	if (!BT_CMD_TEST(bt_dev.supported_commands, 27, 3)) {
4604 		LOG_WRN("Set Host Channel Classification command is "
4605 			"not supported");
4606 		return -ENOTSUP;
4607 	}
4608 
4609 	buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_HOST_CHAN_CLASSIF,
4610 				sizeof(*cp));
4611 	if (!buf) {
4612 		return -ENOBUFS;
4613 	}
4614 
4615 	cp = net_buf_add(buf, sizeof(*cp));
4616 
4617 	memcpy(&cp->ch_map[0], &chan_map[0], 4);
4618 	cp->ch_map[4] = chan_map[4] & BIT_MASK(5);
4619 
4620 	return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_HOST_CHAN_CLASSIF,
4621 				    buf, NULL);
4622 }
4623 
4624 #if defined(CONFIG_BT_RPA_TIMEOUT_DYNAMIC)
4625 int bt_le_set_rpa_timeout(uint16_t new_rpa_timeout)
4626 {
4627 	if ((new_rpa_timeout == 0) || (new_rpa_timeout > 3600)) {
4628 		return -EINVAL;
4629 	}
4630 
4631 	if (new_rpa_timeout == bt_dev.rpa_timeout) {
4632 		return 0;
4633 	}
4634 
4635 	bt_dev.rpa_timeout = new_rpa_timeout;
4636 	atomic_set_bit(bt_dev.flags, BT_DEV_RPA_TIMEOUT_CHANGED);
4637 
4638 	return 0;
4639 }
4640 #endif
4641 
4642 int bt_configure_data_path(uint8_t dir, uint8_t id, uint8_t vs_config_len,
4643 			   const uint8_t *vs_config)
4644 {
4645 	struct bt_hci_rp_configure_data_path *rp;
4646 	struct bt_hci_cp_configure_data_path *cp;
4647 	struct net_buf *rsp;
4648 	struct net_buf *buf;
4649 	int err;
4650 
4651 	buf = bt_hci_cmd_create(BT_HCI_OP_CONFIGURE_DATA_PATH, sizeof(*cp) +
4652 				vs_config_len);
4653 	if (!buf) {
4654 		return -ENOBUFS;
4655 	}
4656 
4657 	cp = net_buf_add(buf, sizeof(*cp));
4658 	cp->data_path_dir = dir;
4659 	cp->data_path_id  = id;
4660 	cp->vs_config_len = vs_config_len;
4661 	if (vs_config_len) {
4662 		(void)memcpy(cp->vs_config, vs_config, vs_config_len);
4663 	}
4664 
4665 	err = bt_hci_cmd_send_sync(BT_HCI_OP_CONFIGURE_DATA_PATH, buf, &rsp);
4666 	if (err) {
4667 		return err;
4668 	}
4669 
4670 	rp = (void *)rsp->data;
4671 	if (rp->status) {
4672 		err = -EIO;
4673 	}
4674 	net_buf_unref(rsp);
4675 
4676 	return err;
4677 }
4678 
4679 /* Return `true` if a command was processed/sent */
4680 static bool process_pending_cmd(k_timeout_t timeout)
4681 {
4682 	if (!k_fifo_is_empty(&bt_dev.cmd_tx_queue)) {
4683 		if (k_sem_take(&bt_dev.ncmd_sem, timeout) == 0) {
4684 			hci_core_send_cmd();
4685 			return true;
4686 		}
4687 	}
4688 
4689 	return false;
4690 }
4691 
4692 static void tx_processor(struct k_work *item)
4693 {
4694 	LOG_DBG("TX process start");
4695 	if (process_pending_cmd(K_NO_WAIT)) {
4696 		/* If we processed a command, let the scheduler run before
4697 		 * processing another command (or data).
4698 		 */
4699 		bt_tx_irq_raise();
4700 		return;
4701 	}
4702 
4703 	/* Hand over control to conn to process pending data */
4704 	if (IS_ENABLED(CONFIG_BT_CONN_TX)) {
4705 		bt_conn_tx_processor();
4706 	}
4707 }
4708 
4709 static K_WORK_DEFINE(tx_work, tx_processor);
4710 
4711 void bt_tx_irq_raise(void)
4712 {
4713 	LOG_DBG("kick TX");
4714 	k_work_submit(&tx_work);
4715 }
4716