1 /* hci_core.c - HCI core Bluetooth handling */
2
3 /*
4 * Copyright (c) 2017-2021 Nordic Semiconductor ASA
5 * Copyright (c) 2015-2016 Intel Corporation
6 *
7 * SPDX-License-Identifier: Apache-2.0
8 */
9
10 #include <zephyr.h>
11 #include <string.h>
12 #include <stdio.h>
13 #include <errno.h>
14 #include <sys/atomic.h>
15 #include <sys/util.h>
16 #include <sys/slist.h>
17 #include <sys/byteorder.h>
18 #include <debug/stack.h>
19 #include <sys/__assert.h>
20 #include <soc.h>
21
22 #include <settings/settings.h>
23
24 #include <bluetooth/bluetooth.h>
25 #include <bluetooth/conn.h>
26 #include <bluetooth/l2cap.h>
27 #include <bluetooth/hci.h>
28 #include <bluetooth/hci_vs.h>
29 #include <drivers/bluetooth/hci_driver.h>
30
31 #define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_HCI_CORE)
32 #define LOG_MODULE_NAME bt_hci_core
33 #include "common/log.h"
34
35 #include "common/rpa.h"
36 #include "keys.h"
37 #include "monitor.h"
38 #include "hci_core.h"
39 #include "hci_ecc.h"
40 #include "ecc.h"
41 #include "id.h"
42 #include "adv.h"
43 #include "scan.h"
44
45 #include "conn_internal.h"
46 #include "iso_internal.h"
47 #include "l2cap_internal.h"
48 #include "gatt_internal.h"
49 #include "smp.h"
50 #include "crypto.h"
51 #include "settings.h"
52
53 #if defined(CONFIG_BT_BREDR)
54 #include "br.h"
55 #endif
56
57 #if IS_ENABLED(CONFIG_BT_DF)
58 #include "direction_internal.h"
59 #endif /* CONFIG_BT_DF */
60
61 #define HCI_CMD_TIMEOUT K_SECONDS(10)
62
63 /* Stacks for the threads */
64 #if !defined(CONFIG_BT_RECV_IS_RX_THREAD)
65 static struct k_thread rx_thread_data;
66 static K_KERNEL_STACK_DEFINE(rx_thread_stack, CONFIG_BT_RX_STACK_SIZE);
67 #endif
68 static struct k_thread tx_thread_data;
69 static K_KERNEL_STACK_DEFINE(tx_thread_stack, CONFIG_BT_HCI_TX_STACK_SIZE);
70
71 static void init_work(struct k_work *work);
72
73 struct bt_dev bt_dev = {
74 .init = Z_WORK_INITIALIZER(init_work),
75 /* Give cmd_sem allowing to send first HCI_Reset cmd, the only
76 * exception is if the controller requests to wait for an
77 * initial Command Complete for NOP.
78 */
79 #if !defined(CONFIG_BT_WAIT_NOP)
80 .ncmd_sem = Z_SEM_INITIALIZER(bt_dev.ncmd_sem, 1, 1),
81 #else
82 .ncmd_sem = Z_SEM_INITIALIZER(bt_dev.ncmd_sem, 0, 1),
83 #endif
84 .cmd_tx_queue = Z_FIFO_INITIALIZER(bt_dev.cmd_tx_queue),
85 #if !defined(CONFIG_BT_RECV_IS_RX_THREAD)
86 .rx_queue = Z_FIFO_INITIALIZER(bt_dev.rx_queue),
87 #endif
88 };
89
90 static bt_ready_cb_t ready_cb;
91
92 #if defined(CONFIG_BT_HCI_VS_EVT_USER)
93 static bt_hci_vnd_evt_cb_t *hci_vnd_evt_cb;
94 #endif /* CONFIG_BT_HCI_VS_EVT_USER */
95
96 struct cmd_data {
97 /** HCI status of the command completion */
98 uint8_t status;
99
100 /** The command OpCode that the buffer contains */
101 uint16_t opcode;
102
103 /** The state to update when command completes with success. */
104 struct bt_hci_cmd_state_set *state;
105
106 /** Used by bt_hci_cmd_send_sync. */
107 struct k_sem *sync;
108 };
109
110 static struct cmd_data cmd_data[CONFIG_BT_BUF_CMD_TX_COUNT];
111
112 #define cmd(buf) (&cmd_data[net_buf_id(buf)])
113 #define acl(buf) ((struct acl_data *)net_buf_user_data(buf))
114
bt_hci_cmd_state_set_init(struct net_buf * buf,struct bt_hci_cmd_state_set * state,atomic_t * target,int bit,bool val)115 void bt_hci_cmd_state_set_init(struct net_buf *buf,
116 struct bt_hci_cmd_state_set *state,
117 atomic_t *target, int bit, bool val)
118 {
119 state->target = target;
120 state->bit = bit;
121 state->val = val;
122 cmd(buf)->state = state;
123 }
124
125 /* HCI command buffers. Derive the needed size from both Command and Event
126 * buffer length since the buffer is also used for the response event i.e
127 * command complete or command status.
128 */
129 #define CMD_BUF_SIZE MAX(BT_BUF_EVT_RX_SIZE, BT_BUF_CMD_TX_SIZE)
130 NET_BUF_POOL_FIXED_DEFINE(hci_cmd_pool, CONFIG_BT_BUF_CMD_TX_COUNT,
131 CMD_BUF_SIZE, NULL);
132
133 struct event_handler {
134 uint8_t event;
135 uint8_t min_len;
136 void (*handler)(struct net_buf *buf);
137 };
138
139 #define EVENT_HANDLER(_evt, _handler, _min_len) \
140 { \
141 .event = _evt, \
142 .handler = _handler, \
143 .min_len = _min_len, \
144 }
145
handle_event(uint8_t event,struct net_buf * buf,const struct event_handler * handlers,size_t num_handlers)146 static inline void handle_event(uint8_t event, struct net_buf *buf,
147 const struct event_handler *handlers,
148 size_t num_handlers)
149 {
150 size_t i;
151
152 for (i = 0; i < num_handlers; i++) {
153 const struct event_handler *handler = &handlers[i];
154
155 if (handler->event != event) {
156 continue;
157 }
158
159 if (buf->len < handler->min_len) {
160 BT_ERR("Too small (%u bytes) event 0x%02x",
161 buf->len, event);
162 return;
163 }
164
165 handler->handler(buf);
166 return;
167 }
168
169 BT_WARN("Unhandled event 0x%02x len %u: %s", event,
170 buf->len, bt_hex(buf->data, buf->len));
171 }
172
173 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
bt_hci_host_num_completed_packets(struct net_buf * buf)174 void bt_hci_host_num_completed_packets(struct net_buf *buf)
175 {
176
177 struct bt_hci_cp_host_num_completed_packets *cp;
178 uint16_t handle = acl(buf)->handle;
179 struct bt_hci_handle_count *hc;
180 struct bt_conn *conn;
181
182 net_buf_destroy(buf);
183
184 /* Do nothing if controller to host flow control is not supported */
185 if (!BT_CMD_TEST(bt_dev.supported_commands, 10, 5)) {
186 return;
187 }
188
189 conn = bt_conn_lookup_index(acl(buf)->index);
190 if (!conn) {
191 BT_WARN("Unable to look up conn with index 0x%02x",
192 acl(buf)->index);
193 return;
194 }
195
196 if (conn->state != BT_CONN_CONNECTED &&
197 conn->state != BT_CONN_DISCONNECT) {
198 BT_WARN("Not reporting packet for non-connected conn");
199 bt_conn_unref(conn);
200 return;
201 }
202
203 bt_conn_unref(conn);
204
205 BT_DBG("Reporting completed packet for handle %u", handle);
206
207 buf = bt_hci_cmd_create(BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS,
208 sizeof(*cp) + sizeof(*hc));
209 if (!buf) {
210 BT_ERR("Unable to allocate new HCI command");
211 return;
212 }
213
214 cp = net_buf_add(buf, sizeof(*cp));
215 cp->num_handles = sys_cpu_to_le16(1);
216
217 hc = net_buf_add(buf, sizeof(*hc));
218 hc->handle = sys_cpu_to_le16(handle);
219 hc->count = sys_cpu_to_le16(1);
220
221 bt_hci_cmd_send(BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS, buf);
222 }
223 #endif /* defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL) */
224
bt_hci_cmd_create(uint16_t opcode,uint8_t param_len)225 struct net_buf *bt_hci_cmd_create(uint16_t opcode, uint8_t param_len)
226 {
227 struct bt_hci_cmd_hdr *hdr;
228 struct net_buf *buf;
229
230 BT_DBG("opcode 0x%04x param_len %u", opcode, param_len);
231
232 buf = net_buf_alloc(&hci_cmd_pool, K_FOREVER);
233 __ASSERT_NO_MSG(buf);
234
235 BT_DBG("buf %p", buf);
236
237 net_buf_reserve(buf, BT_BUF_RESERVE);
238
239 bt_buf_set_type(buf, BT_BUF_CMD);
240
241 cmd(buf)->opcode = opcode;
242 cmd(buf)->sync = NULL;
243 cmd(buf)->state = NULL;
244
245 hdr = net_buf_add(buf, sizeof(*hdr));
246 hdr->opcode = sys_cpu_to_le16(opcode);
247 hdr->param_len = param_len;
248
249 return buf;
250 }
251
bt_hci_cmd_send(uint16_t opcode,struct net_buf * buf)252 int bt_hci_cmd_send(uint16_t opcode, struct net_buf *buf)
253 {
254 if (!buf) {
255 buf = bt_hci_cmd_create(opcode, 0);
256 if (!buf) {
257 return -ENOBUFS;
258 }
259 }
260
261 BT_DBG("opcode 0x%04x len %u", opcode, buf->len);
262
263 /* Host Number of Completed Packets can ignore the ncmd value
264 * and does not generate any cmd complete/status events.
265 */
266 if (opcode == BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS) {
267 int err;
268
269 err = bt_send(buf);
270 if (err) {
271 BT_ERR("Unable to send to driver (err %d)", err);
272 net_buf_unref(buf);
273 }
274
275 return err;
276 }
277
278 net_buf_put(&bt_dev.cmd_tx_queue, buf);
279
280 return 0;
281 }
282
bt_hci_cmd_send_sync(uint16_t opcode,struct net_buf * buf,struct net_buf ** rsp)283 int bt_hci_cmd_send_sync(uint16_t opcode, struct net_buf *buf,
284 struct net_buf **rsp)
285 {
286 struct k_sem sync_sem;
287 uint8_t status;
288 int err;
289
290 if (!buf) {
291 buf = bt_hci_cmd_create(opcode, 0);
292 if (!buf) {
293 return -ENOBUFS;
294 }
295 }
296
297 BT_DBG("buf %p opcode 0x%04x len %u", buf, opcode, buf->len);
298
299 k_sem_init(&sync_sem, 0, 1);
300 cmd(buf)->sync = &sync_sem;
301
302 /* Make sure the buffer stays around until the command completes */
303 net_buf_put(&bt_dev.cmd_tx_queue, net_buf_ref(buf));
304
305 err = k_sem_take(&sync_sem, HCI_CMD_TIMEOUT);
306 BT_ASSERT_MSG(err == 0, "k_sem_take failed with err %d", err);
307
308 status = cmd(buf)->status;
309 if (status) {
310 BT_WARN("opcode 0x%04x status 0x%02x", opcode, status);
311 net_buf_unref(buf);
312
313 switch (status) {
314 case BT_HCI_ERR_CONN_LIMIT_EXCEEDED:
315 return -ECONNREFUSED;
316 default:
317 return -EIO;
318 }
319 }
320
321 BT_DBG("rsp %p opcode 0x%04x len %u", buf, opcode, buf->len);
322
323 if (rsp) {
324 *rsp = buf;
325 } else {
326 net_buf_unref(buf);
327 }
328
329 return 0;
330 }
331
hci_le_read_max_data_len(uint16_t * tx_octets,uint16_t * tx_time)332 static int hci_le_read_max_data_len(uint16_t *tx_octets, uint16_t *tx_time)
333 {
334 struct bt_hci_rp_le_read_max_data_len *rp;
335 struct net_buf *rsp;
336 int err;
337
338 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_MAX_DATA_LEN, NULL, &rsp);
339 if (err) {
340 BT_ERR("Failed to read DLE max data len");
341 return err;
342 }
343
344 rp = (void *)rsp->data;
345 *tx_octets = sys_le16_to_cpu(rp->max_tx_octets);
346 *tx_time = sys_le16_to_cpu(rp->max_tx_time);
347 net_buf_unref(rsp);
348
349 return 0;
350 }
351
bt_get_phy(uint8_t hci_phy)352 uint8_t bt_get_phy(uint8_t hci_phy)
353 {
354 switch (hci_phy) {
355 case BT_HCI_LE_PHY_1M:
356 return BT_GAP_LE_PHY_1M;
357 case BT_HCI_LE_PHY_2M:
358 return BT_GAP_LE_PHY_2M;
359 case BT_HCI_LE_PHY_CODED:
360 return BT_GAP_LE_PHY_CODED;
361 default:
362 return 0;
363 }
364 }
365
366 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_ISO)
hci_num_completed_packets(struct net_buf * buf)367 static void hci_num_completed_packets(struct net_buf *buf)
368 {
369 struct bt_hci_evt_num_completed_packets *evt = (void *)buf->data;
370 int i;
371
372 BT_DBG("num_handles %u", evt->num_handles);
373
374 for (i = 0; i < evt->num_handles; i++) {
375 uint16_t handle, count;
376 struct bt_conn *conn;
377
378 handle = sys_le16_to_cpu(evt->h[i].handle);
379 count = sys_le16_to_cpu(evt->h[i].count);
380
381 BT_DBG("handle %u count %u", handle, count);
382
383 conn = bt_conn_lookup_handle(handle);
384 if (!conn) {
385 BT_ERR("No connection for handle %u", handle);
386 continue;
387 }
388
389 while (count--) {
390 struct bt_conn_tx *tx;
391 sys_snode_t *node;
392 unsigned int key;
393
394 key = irq_lock();
395
396 if (conn->pending_no_cb) {
397 conn->pending_no_cb--;
398 irq_unlock(key);
399 k_sem_give(bt_conn_get_pkts(conn));
400 continue;
401 }
402
403 node = sys_slist_get(&conn->tx_pending);
404 irq_unlock(key);
405
406 if (!node) {
407 BT_ERR("packets count mismatch");
408 break;
409 }
410
411 tx = CONTAINER_OF(node, struct bt_conn_tx, node);
412
413 key = irq_lock();
414 conn->pending_no_cb = tx->pending_no_cb;
415 tx->pending_no_cb = 0U;
416 sys_slist_append(&conn->tx_complete, &tx->node);
417 irq_unlock(key);
418
419 k_work_submit(&conn->tx_complete_work);
420 k_sem_give(bt_conn_get_pkts(conn));
421 }
422
423 bt_conn_unref(conn);
424 }
425 }
426 #endif /* CONFIG_BT_CONN || CONFIG_BT_ISO */
427
428 #if defined(CONFIG_BT_CONN)
hci_acl(struct net_buf * buf)429 static void hci_acl(struct net_buf *buf)
430 {
431 struct bt_hci_acl_hdr *hdr;
432 uint16_t handle, len;
433 struct bt_conn *conn;
434 uint8_t flags;
435
436 BT_DBG("buf %p", buf);
437
438 BT_ASSERT(buf->len >= sizeof(*hdr));
439
440 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
441 len = sys_le16_to_cpu(hdr->len);
442 handle = sys_le16_to_cpu(hdr->handle);
443 flags = bt_acl_flags(handle);
444
445 acl(buf)->handle = bt_acl_handle(handle);
446 acl(buf)->index = BT_CONN_INDEX_INVALID;
447
448 BT_DBG("handle %u len %u flags %u", acl(buf)->handle, len, flags);
449
450 if (buf->len != len) {
451 BT_ERR("ACL data length mismatch (%u != %u)", buf->len, len);
452 net_buf_unref(buf);
453 return;
454 }
455
456 conn = bt_conn_lookup_handle(acl(buf)->handle);
457 if (!conn) {
458 BT_ERR("Unable to find conn for handle %u", acl(buf)->handle);
459 net_buf_unref(buf);
460 return;
461 }
462
463 acl(buf)->index = bt_conn_index(conn);
464
465 bt_conn_recv(conn, buf, flags);
466 bt_conn_unref(conn);
467 }
468
hci_data_buf_overflow(struct net_buf * buf)469 static void hci_data_buf_overflow(struct net_buf *buf)
470 {
471 struct bt_hci_evt_data_buf_overflow *evt = (void *)buf->data;
472
473 BT_WARN("Data buffer overflow (link type 0x%02x)", evt->link_type);
474 }
475
476 #if defined(CONFIG_BT_CENTRAL)
set_phy_conn_param(const struct bt_conn * conn,struct bt_hci_ext_conn_phy * phy)477 static void set_phy_conn_param(const struct bt_conn *conn,
478 struct bt_hci_ext_conn_phy *phy)
479 {
480 phy->conn_interval_min = sys_cpu_to_le16(conn->le.interval_min);
481 phy->conn_interval_max = sys_cpu_to_le16(conn->le.interval_max);
482 phy->conn_latency = sys_cpu_to_le16(conn->le.latency);
483 phy->supervision_timeout = sys_cpu_to_le16(conn->le.timeout);
484
485 phy->min_ce_len = 0;
486 phy->max_ce_len = 0;
487 }
488
bt_le_create_conn_ext(const struct bt_conn * conn)489 int bt_le_create_conn_ext(const struct bt_conn *conn)
490 {
491 struct bt_hci_cp_le_ext_create_conn *cp;
492 struct bt_hci_ext_conn_phy *phy;
493 struct bt_hci_cmd_state_set state;
494 bool use_filter = false;
495 struct net_buf *buf;
496 uint8_t own_addr_type;
497 uint8_t num_phys;
498 int err;
499
500 if (IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST)) {
501 use_filter = atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT);
502 }
503
504 err = bt_id_set_create_conn_own_addr(use_filter, &own_addr_type);
505 if (err) {
506 return err;
507 }
508
509 num_phys = (!(bt_dev.create_param.options &
510 BT_CONN_LE_OPT_NO_1M) ? 1 : 0) +
511 ((bt_dev.create_param.options &
512 BT_CONN_LE_OPT_CODED) ? 1 : 0);
513
514 buf = bt_hci_cmd_create(BT_HCI_OP_LE_EXT_CREATE_CONN, sizeof(*cp) +
515 num_phys * sizeof(*phy));
516 if (!buf) {
517 return -ENOBUFS;
518 }
519
520 cp = net_buf_add(buf, sizeof(*cp));
521 (void)memset(cp, 0, sizeof(*cp));
522
523 if (use_filter) {
524 /* User Initiated procedure use fast scan parameters. */
525 bt_addr_le_copy(&cp->peer_addr, BT_ADDR_LE_ANY);
526 cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_FILTER;
527 } else {
528 const bt_addr_le_t *peer_addr = &conn->le.dst;
529
530 #if defined(CONFIG_BT_SMP)
531 if (!bt_dev.le.rl_size ||
532 bt_dev.le.rl_entries > bt_dev.le.rl_size) {
533 /* Host resolving is used, use the RPA directly. */
534 peer_addr = &conn->le.resp_addr;
535 }
536 #endif
537 bt_addr_le_copy(&cp->peer_addr, peer_addr);
538 cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_NO_FILTER;
539 }
540
541 cp->own_addr_type = own_addr_type;
542 cp->phys = 0;
543
544 if (!(bt_dev.create_param.options & BT_CONN_LE_OPT_NO_1M)) {
545 cp->phys |= BT_HCI_LE_EXT_SCAN_PHY_1M;
546 phy = net_buf_add(buf, sizeof(*phy));
547 phy->scan_interval = sys_cpu_to_le16(
548 bt_dev.create_param.interval);
549 phy->scan_window = sys_cpu_to_le16(
550 bt_dev.create_param.window);
551 set_phy_conn_param(conn, phy);
552 }
553
554 if (bt_dev.create_param.options & BT_CONN_LE_OPT_CODED) {
555 cp->phys |= BT_HCI_LE_EXT_SCAN_PHY_CODED;
556 phy = net_buf_add(buf, sizeof(*phy));
557 phy->scan_interval = sys_cpu_to_le16(
558 bt_dev.create_param.interval_coded);
559 phy->scan_window = sys_cpu_to_le16(
560 bt_dev.create_param.window_coded);
561 set_phy_conn_param(conn, phy);
562 }
563
564 bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags,
565 BT_DEV_INITIATING, true);
566
567 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_EXT_CREATE_CONN, buf, NULL);
568 }
569
bt_le_create_conn_legacy(const struct bt_conn * conn)570 int bt_le_create_conn_legacy(const struct bt_conn *conn)
571 {
572 struct bt_hci_cp_le_create_conn *cp;
573 struct bt_hci_cmd_state_set state;
574 bool use_filter = false;
575 struct net_buf *buf;
576 uint8_t own_addr_type;
577 int err;
578
579 if (IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST)) {
580 use_filter = atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT);
581 }
582
583 err = bt_id_set_create_conn_own_addr(use_filter, &own_addr_type);
584 if (err) {
585 return err;
586 }
587
588 buf = bt_hci_cmd_create(BT_HCI_OP_LE_CREATE_CONN, sizeof(*cp));
589 if (!buf) {
590 return -ENOBUFS;
591 }
592
593 cp = net_buf_add(buf, sizeof(*cp));
594 memset(cp, 0, sizeof(*cp));
595 cp->own_addr_type = own_addr_type;
596
597 if (use_filter) {
598 /* User Initiated procedure use fast scan parameters. */
599 bt_addr_le_copy(&cp->peer_addr, BT_ADDR_LE_ANY);
600 cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_FILTER;
601 } else {
602 const bt_addr_le_t *peer_addr = &conn->le.dst;
603
604 #if defined(CONFIG_BT_SMP)
605 if (!bt_dev.le.rl_size ||
606 bt_dev.le.rl_entries > bt_dev.le.rl_size) {
607 /* Host resolving is used, use the RPA directly. */
608 peer_addr = &conn->le.resp_addr;
609 }
610 #endif
611 bt_addr_le_copy(&cp->peer_addr, peer_addr);
612 cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_NO_FILTER;
613 }
614
615 cp->scan_interval = sys_cpu_to_le16(bt_dev.create_param.interval);
616 cp->scan_window = sys_cpu_to_le16(bt_dev.create_param.window);
617
618 cp->conn_interval_min = sys_cpu_to_le16(conn->le.interval_min);
619 cp->conn_interval_max = sys_cpu_to_le16(conn->le.interval_max);
620 cp->conn_latency = sys_cpu_to_le16(conn->le.latency);
621 cp->supervision_timeout = sys_cpu_to_le16(conn->le.timeout);
622
623 bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags,
624 BT_DEV_INITIATING, true);
625
626 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_CREATE_CONN, buf, NULL);
627 }
628
bt_le_create_conn(const struct bt_conn * conn)629 int bt_le_create_conn(const struct bt_conn *conn)
630 {
631 if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
632 BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
633 return bt_le_create_conn_ext(conn);
634 }
635
636 return bt_le_create_conn_legacy(conn);
637 }
638
bt_le_create_conn_cancel(void)639 int bt_le_create_conn_cancel(void)
640 {
641 struct net_buf *buf;
642 struct bt_hci_cmd_state_set state;
643
644 buf = bt_hci_cmd_create(BT_HCI_OP_LE_CREATE_CONN_CANCEL, 0);
645
646 bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags,
647 BT_DEV_INITIATING, false);
648
649 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_CREATE_CONN_CANCEL, buf, NULL);
650 }
651 #endif /* CONFIG_BT_CENTRAL */
652
bt_hci_disconnect(uint16_t handle,uint8_t reason)653 int bt_hci_disconnect(uint16_t handle, uint8_t reason)
654 {
655 struct net_buf *buf;
656 struct bt_hci_cp_disconnect *disconn;
657
658 buf = bt_hci_cmd_create(BT_HCI_OP_DISCONNECT, sizeof(*disconn));
659 if (!buf) {
660 return -ENOBUFS;
661 }
662
663 disconn = net_buf_add(buf, sizeof(*disconn));
664 disconn->handle = sys_cpu_to_le16(handle);
665 disconn->reason = reason;
666
667 return bt_hci_cmd_send_sync(BT_HCI_OP_DISCONNECT, buf, NULL);
668 }
669
670 static uint16_t disconnected_handles[CONFIG_BT_MAX_CONN];
conn_handle_disconnected(uint16_t handle)671 static void conn_handle_disconnected(uint16_t handle)
672 {
673 for (int i = 0; i < ARRAY_SIZE(disconnected_handles); i++) {
674 if (!disconnected_handles[i]) {
675 /* Use invalid connection handle bits so that connection
676 * handle 0 can be used as a valid non-zero handle.
677 */
678 disconnected_handles[i] = ~BT_ACL_HANDLE_MASK | handle;
679 }
680 }
681 }
682
conn_handle_is_disconnected(uint16_t handle)683 static bool conn_handle_is_disconnected(uint16_t handle)
684 {
685 handle |= ~BT_ACL_HANDLE_MASK;
686
687 for (int i = 0; i < ARRAY_SIZE(disconnected_handles); i++) {
688 if (disconnected_handles[i] == handle) {
689 disconnected_handles[i] = 0;
690 return true;
691 }
692 }
693
694 return false;
695 }
696
hci_disconn_complete_prio(struct net_buf * buf)697 static void hci_disconn_complete_prio(struct net_buf *buf)
698 {
699 struct bt_hci_evt_disconn_complete *evt = (void *)buf->data;
700 uint16_t handle = sys_le16_to_cpu(evt->handle);
701 struct bt_conn *conn;
702
703 BT_DBG("status 0x%02x handle %u reason 0x%02x", evt->status, handle,
704 evt->reason);
705
706 if (evt->status) {
707 return;
708 }
709
710 conn = bt_conn_lookup_handle(handle);
711 if (!conn) {
712 /* Priority disconnect complete event received before normal
713 * connection complete event.
714 */
715 conn_handle_disconnected(handle);
716 return;
717 }
718
719 bt_conn_set_state(conn, BT_CONN_DISCONNECT_COMPLETE);
720 bt_conn_unref(conn);
721 }
722
hci_disconn_complete(struct net_buf * buf)723 static void hci_disconn_complete(struct net_buf *buf)
724 {
725 struct bt_hci_evt_disconn_complete *evt = (void *)buf->data;
726 uint16_t handle = sys_le16_to_cpu(evt->handle);
727 struct bt_conn *conn;
728
729 BT_DBG("status 0x%02x handle %u reason 0x%02x", evt->status, handle,
730 evt->reason);
731
732 if (evt->status) {
733 return;
734 }
735
736 conn = bt_conn_lookup_handle(handle);
737 if (!conn) {
738 BT_ERR("Unable to look up conn with handle %u", handle);
739 return;
740 }
741
742 conn->err = evt->reason;
743
744 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
745
746 if (conn->type != BT_CONN_TYPE_LE) {
747 #if defined(CONFIG_BT_BREDR)
748 if (conn->type == BT_CONN_TYPE_SCO) {
749 bt_sco_cleanup(conn);
750 return;
751 }
752 /*
753 * If only for one connection session bond was set, clear keys
754 * database row for this connection.
755 */
756 if (conn->type == BT_CONN_TYPE_BR &&
757 atomic_test_and_clear_bit(conn->flags, BT_CONN_BR_NOBOND)) {
758 bt_keys_link_key_clear(conn->br.link_key);
759 }
760 #endif
761 bt_conn_unref(conn);
762 return;
763 }
764
765 #if defined(CONFIG_BT_CENTRAL) && !defined(CONFIG_BT_FILTER_ACCEPT_LIST)
766 if (atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT)) {
767 bt_conn_set_state(conn, BT_CONN_CONNECT_SCAN);
768 bt_le_scan_update(false);
769 }
770 #endif /* defined(CONFIG_BT_CENTRAL) && !defined(CONFIG_BT_FILTER_ACCEPT_LIST) */
771
772 bt_conn_unref(conn);
773 }
774
hci_le_read_remote_features(struct bt_conn * conn)775 static int hci_le_read_remote_features(struct bt_conn *conn)
776 {
777 struct bt_hci_cp_le_read_remote_features *cp;
778 struct net_buf *buf;
779
780 buf = bt_hci_cmd_create(BT_HCI_OP_LE_READ_REMOTE_FEATURES,
781 sizeof(*cp));
782 if (!buf) {
783 return -ENOBUFS;
784 }
785
786 cp = net_buf_add(buf, sizeof(*cp));
787 cp->handle = sys_cpu_to_le16(conn->handle);
788 bt_hci_cmd_send(BT_HCI_OP_LE_READ_REMOTE_FEATURES, buf);
789
790 return 0;
791 }
792
hci_read_remote_version(struct bt_conn * conn)793 static int hci_read_remote_version(struct bt_conn *conn)
794 {
795 struct bt_hci_cp_read_remote_version_info *cp;
796 struct net_buf *buf;
797
798 if (conn->state != BT_CONN_CONNECTED) {
799 return -ENOTCONN;
800 }
801
802 /* Remote version cannot change. */
803 if (atomic_test_bit(conn->flags, BT_CONN_AUTO_VERSION_INFO)) {
804 return 0;
805 }
806
807 buf = bt_hci_cmd_create(BT_HCI_OP_READ_REMOTE_VERSION_INFO,
808 sizeof(*cp));
809 if (!buf) {
810 return -ENOBUFS;
811 }
812
813 cp = net_buf_add(buf, sizeof(*cp));
814 cp->handle = sys_cpu_to_le16(conn->handle);
815
816 return bt_hci_cmd_send_sync(BT_HCI_OP_READ_REMOTE_VERSION_INFO, buf,
817 NULL);
818 }
819
820 /* LE Data Length Change Event is optional so this function just ignore
821 * error and stack will continue to use default values.
822 */
bt_le_set_data_len(struct bt_conn * conn,uint16_t tx_octets,uint16_t tx_time)823 int bt_le_set_data_len(struct bt_conn *conn, uint16_t tx_octets, uint16_t tx_time)
824 {
825 struct bt_hci_cp_le_set_data_len *cp;
826 struct net_buf *buf;
827
828 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_DATA_LEN, sizeof(*cp));
829 if (!buf) {
830 return -ENOBUFS;
831 }
832
833 cp = net_buf_add(buf, sizeof(*cp));
834 cp->handle = sys_cpu_to_le16(conn->handle);
835 cp->tx_octets = sys_cpu_to_le16(tx_octets);
836 cp->tx_time = sys_cpu_to_le16(tx_time);
837
838 return bt_hci_cmd_send(BT_HCI_OP_LE_SET_DATA_LEN, buf);
839 }
840
841 #if defined(CONFIG_BT_USER_PHY_UPDATE)
hci_le_read_phy(struct bt_conn * conn)842 static int hci_le_read_phy(struct bt_conn *conn)
843 {
844 struct bt_hci_cp_le_read_phy *cp;
845 struct bt_hci_rp_le_read_phy *rp;
846 struct net_buf *buf, *rsp;
847 int err;
848
849 buf = bt_hci_cmd_create(BT_HCI_OP_LE_READ_PHY, sizeof(*cp));
850 if (!buf) {
851 return -ENOBUFS;
852 }
853
854 cp = net_buf_add(buf, sizeof(*cp));
855 cp->handle = sys_cpu_to_le16(conn->handle);
856
857 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_PHY, buf, &rsp);
858 if (err) {
859 return err;
860 }
861
862 rp = (void *)rsp->data;
863 conn->le.phy.tx_phy = bt_get_phy(rp->tx_phy);
864 conn->le.phy.rx_phy = bt_get_phy(rp->rx_phy);
865 net_buf_unref(rsp);
866
867 return 0;
868 }
869 #endif /* defined(CONFIG_BT_USER_PHY_UPDATE) */
870
bt_le_set_phy(struct bt_conn * conn,uint8_t all_phys,uint8_t pref_tx_phy,uint8_t pref_rx_phy,uint8_t phy_opts)871 int bt_le_set_phy(struct bt_conn *conn, uint8_t all_phys,
872 uint8_t pref_tx_phy, uint8_t pref_rx_phy, uint8_t phy_opts)
873 {
874 struct bt_hci_cp_le_set_phy *cp;
875 struct net_buf *buf;
876
877 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_PHY, sizeof(*cp));
878 if (!buf) {
879 return -ENOBUFS;
880 }
881
882 cp = net_buf_add(buf, sizeof(*cp));
883 cp->handle = sys_cpu_to_le16(conn->handle);
884 cp->all_phys = all_phys;
885 cp->tx_phys = pref_tx_phy;
886 cp->rx_phys = pref_rx_phy;
887 cp->phy_opts = phy_opts;
888
889 return bt_hci_cmd_send(BT_HCI_OP_LE_SET_PHY, buf);
890 }
891
find_pending_connect(uint8_t role,bt_addr_le_t * peer_addr)892 static struct bt_conn *find_pending_connect(uint8_t role, bt_addr_le_t *peer_addr)
893 {
894 struct bt_conn *conn;
895
896 /*
897 * Make lookup to check if there's a connection object in
898 * CONNECT or CONNECT_AUTO state associated with passed peer LE address.
899 */
900 if (IS_ENABLED(CONFIG_BT_CENTRAL) && role == BT_HCI_ROLE_CENTRAL) {
901 conn = bt_conn_lookup_state_le(BT_ID_DEFAULT, peer_addr,
902 BT_CONN_CONNECT);
903 if (IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST) && !conn) {
904 conn = bt_conn_lookup_state_le(BT_ID_DEFAULT,
905 BT_ADDR_LE_NONE,
906 BT_CONN_CONNECT_AUTO);
907 }
908
909 return conn;
910 }
911
912 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && role == BT_HCI_ROLE_PERIPHERAL) {
913 conn = bt_conn_lookup_state_le(bt_dev.adv_conn_id, peer_addr,
914 BT_CONN_CONNECT_DIR_ADV);
915 if (!conn) {
916 conn = bt_conn_lookup_state_le(bt_dev.adv_conn_id,
917 BT_ADDR_LE_NONE,
918 BT_CONN_CONNECT_ADV);
919 }
920
921 return conn;
922 }
923
924 return NULL;
925 }
926
conn_auto_initiate(struct bt_conn * conn)927 static void conn_auto_initiate(struct bt_conn *conn)
928 {
929 int err;
930
931 if (conn->state != BT_CONN_CONNECTED) {
932 /* It is possible that connection was disconnected directly from
933 * connected callback so we must check state before doing
934 * connection parameters update.
935 */
936 return;
937 }
938
939 if (!atomic_test_bit(conn->flags, BT_CONN_AUTO_FEATURE_EXCH) &&
940 ((conn->role == BT_HCI_ROLE_CENTRAL) ||
941 BT_FEAT_LE_PER_INIT_FEAT_XCHG(bt_dev.le.features))) {
942 err = hci_le_read_remote_features(conn);
943 if (!err) {
944 return;
945 }
946 }
947
948 if (IS_ENABLED(CONFIG_BT_REMOTE_VERSION) &&
949 !atomic_test_bit(conn->flags, BT_CONN_AUTO_VERSION_INFO)) {
950 err = hci_read_remote_version(conn);
951 if (!err) {
952 return;
953 }
954 }
955
956 if (IS_ENABLED(CONFIG_BT_AUTO_PHY_UPDATE) &&
957 !atomic_test_bit(conn->flags, BT_CONN_AUTO_PHY_COMPLETE) &&
958 BT_FEAT_LE_PHY_2M(bt_dev.le.features)) {
959 err = bt_le_set_phy(conn, 0U, BT_HCI_LE_PHY_PREFER_2M,
960 BT_HCI_LE_PHY_PREFER_2M,
961 BT_HCI_LE_PHY_CODED_ANY);
962 if (!err) {
963 atomic_set_bit(conn->flags, BT_CONN_AUTO_PHY_UPDATE);
964 return;
965 }
966
967 BT_ERR("Failed to set LE PHY (%d)", err);
968 }
969
970 if (IS_ENABLED(CONFIG_BT_AUTO_DATA_LEN_UPDATE) &&
971 BT_FEAT_LE_DLE(bt_dev.le.features)) {
972 if (IS_BT_QUIRK_NO_AUTO_DLE(&bt_dev)) {
973 uint16_t tx_octets, tx_time;
974
975 err = hci_le_read_max_data_len(&tx_octets, &tx_time);
976 if (!err) {
977 err = bt_le_set_data_len(conn,
978 tx_octets, tx_time);
979 if (err) {
980 BT_ERR("Failed to set data len (%d)", err);
981 }
982 }
983 } else {
984 /* No need to auto-initiate DLE procedure.
985 * It is done by the controller.
986 */
987 }
988 }
989 }
990
le_conn_complete_cancel(void)991 static void le_conn_complete_cancel(void)
992 {
993 struct bt_conn *conn;
994
995 /* Handle create connection cancel.
996 *
997 * There is no need to check ID address as only one
998 * connection in central role can be in pending state.
999 */
1000 conn = find_pending_connect(BT_HCI_ROLE_CENTRAL, NULL);
1001 if (!conn) {
1002 BT_ERR("No pending central connection");
1003 return;
1004 }
1005
1006 conn->err = BT_HCI_ERR_UNKNOWN_CONN_ID;
1007
1008 /* Handle cancellation of outgoing connection attempt. */
1009 if (!IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST)) {
1010 /* We notify before checking autoconnect flag
1011 * as application may choose to change it from
1012 * callback.
1013 */
1014 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
1015 /* Check if device is marked for autoconnect. */
1016 if (atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT)) {
1017 /* Restart passive scanner for device */
1018 bt_conn_set_state(conn, BT_CONN_CONNECT_SCAN);
1019 }
1020 } else {
1021 if (atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT)) {
1022 /* Restart FAL initiator after RPA timeout. */
1023 bt_le_create_conn(conn);
1024 } else {
1025 /* Create connection canceled by timeout */
1026 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
1027 }
1028 }
1029
1030 bt_conn_unref(conn);
1031 }
1032
le_conn_complete_adv_timeout(void)1033 static void le_conn_complete_adv_timeout(void)
1034 {
1035 if (!(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1036 BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
1037 struct bt_le_ext_adv *adv = bt_le_adv_lookup_legacy();
1038 struct bt_conn *conn;
1039
1040 /* Handle advertising timeout after high duty cycle directed
1041 * advertising.
1042 */
1043
1044 atomic_clear_bit(adv->flags, BT_ADV_ENABLED);
1045
1046 if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1047 !BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
1048 /* No advertising set terminated event, must be a
1049 * legacy advertiser set.
1050 */
1051 bt_le_adv_delete_legacy();
1052 }
1053
1054 /* There is no need to check ID address as only one
1055 * connection in peripheral role can be in pending state.
1056 */
1057 conn = find_pending_connect(BT_HCI_ROLE_PERIPHERAL, NULL);
1058 if (!conn) {
1059 BT_ERR("No pending peripheral connection");
1060 return;
1061 }
1062
1063 conn->err = BT_HCI_ERR_ADV_TIMEOUT;
1064 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
1065
1066 bt_conn_unref(conn);
1067 }
1068 }
1069
enh_conn_complete(struct bt_hci_evt_le_enh_conn_complete * evt)1070 static void enh_conn_complete(struct bt_hci_evt_le_enh_conn_complete *evt)
1071 {
1072 #if (CONFIG_BT_ID_MAX > 1) && (CONFIG_BT_EXT_ADV_MAX_ADV_SET > 1)
1073 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
1074 evt->role == BT_HCI_ROLE_PERIPHERAL &&
1075 evt->status == BT_HCI_ERR_SUCCESS &&
1076 (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1077 BT_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
1078
1079 /* Cache the connection complete event. Process it later.
1080 * See bt_dev.cached_conn_complete.
1081 */
1082 for (int i = 0; i < ARRAY_SIZE(bt_dev.cached_conn_complete); i++) {
1083 if (!bt_dev.cached_conn_complete[i].valid) {
1084 (void)memcpy(&bt_dev.cached_conn_complete[i].evt,
1085 evt,
1086 sizeof(struct bt_hci_evt_le_enh_conn_complete));
1087 bt_dev.cached_conn_complete[i].valid = true;
1088 return;
1089 }
1090 }
1091
1092 __ASSERT(false, "No more cache entries available."
1093 "This should not happen by design");
1094
1095 return;
1096 }
1097 #endif
1098 bt_hci_le_enh_conn_complete(evt);
1099 }
1100
bt_hci_le_enh_conn_complete(struct bt_hci_evt_le_enh_conn_complete * evt)1101 void bt_hci_le_enh_conn_complete(struct bt_hci_evt_le_enh_conn_complete *evt)
1102 {
1103 uint16_t handle = sys_le16_to_cpu(evt->handle);
1104 bool is_disconnected = conn_handle_is_disconnected(handle);
1105 bt_addr_le_t peer_addr, id_addr;
1106 struct bt_conn *conn;
1107
1108 BT_DBG("status 0x%02x handle %u role %u peer %s peer RPA %s",
1109 evt->status, handle, evt->role, bt_addr_le_str(&evt->peer_addr),
1110 bt_addr_str(&evt->peer_rpa));
1111 BT_DBG("local RPA %s", bt_addr_str(&evt->local_rpa));
1112
1113 #if defined(CONFIG_BT_SMP)
1114 bt_id_pending_keys_update();
1115 #endif
1116
1117 if (evt->status) {
1118 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
1119 evt->status == BT_HCI_ERR_ADV_TIMEOUT) {
1120 le_conn_complete_adv_timeout();
1121 return;
1122 }
1123
1124 if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1125 evt->status == BT_HCI_ERR_UNKNOWN_CONN_ID) {
1126 le_conn_complete_cancel();
1127 bt_le_scan_update(false);
1128 return;
1129 }
1130
1131 BT_WARN("Unexpected status 0x%02x", evt->status);
1132
1133 return;
1134 }
1135
1136 /* Translate "enhanced" identity address type to normal one */
1137 if (evt->peer_addr.type == BT_ADDR_LE_PUBLIC_ID ||
1138 evt->peer_addr.type == BT_ADDR_LE_RANDOM_ID) {
1139 bt_addr_le_copy(&id_addr, &evt->peer_addr);
1140 id_addr.type -= BT_ADDR_LE_PUBLIC_ID;
1141
1142 bt_addr_copy(&peer_addr.a, &evt->peer_rpa);
1143 peer_addr.type = BT_ADDR_LE_RANDOM;
1144 } else {
1145 uint8_t id = evt->role == BT_HCI_ROLE_PERIPHERAL ? bt_dev.adv_conn_id :
1146 BT_ID_DEFAULT;
1147
1148 bt_addr_le_copy(&id_addr,
1149 bt_lookup_id_addr(id, &evt->peer_addr));
1150 bt_addr_le_copy(&peer_addr, &evt->peer_addr);
1151 }
1152
1153 conn = find_pending_connect(evt->role, &id_addr);
1154
1155 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
1156 evt->role == BT_HCI_ROLE_PERIPHERAL &&
1157 !(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1158 BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
1159 struct bt_le_ext_adv *adv = bt_le_adv_lookup_legacy();
1160 /* Clear advertising even if we are not able to add connection
1161 * object to keep host in sync with controller state.
1162 */
1163 atomic_clear_bit(adv->flags, BT_ADV_ENABLED);
1164 (void)bt_le_lim_adv_cancel_timeout(adv);
1165 }
1166
1167 if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1168 evt->role == BT_HCI_ROLE_CENTRAL) {
1169 /* Clear initiating even if we are not able to add connection
1170 * object to keep the host in sync with controller state.
1171 */
1172 atomic_clear_bit(bt_dev.flags, BT_DEV_INITIATING);
1173 }
1174
1175 if (!conn) {
1176 BT_ERR("No pending conn for peer %s",
1177 bt_addr_le_str(&evt->peer_addr));
1178 bt_hci_disconnect(handle, BT_HCI_ERR_UNSPECIFIED);
1179 return;
1180 }
1181
1182 conn->handle = handle;
1183 bt_addr_le_copy(&conn->le.dst, &id_addr);
1184 conn->le.interval = sys_le16_to_cpu(evt->interval);
1185 conn->le.latency = sys_le16_to_cpu(evt->latency);
1186 conn->le.timeout = sys_le16_to_cpu(evt->supv_timeout);
1187 conn->role = evt->role;
1188 conn->err = 0U;
1189
1190 #if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
1191 conn->le.data_len.tx_max_len = BT_GAP_DATA_LEN_DEFAULT;
1192 conn->le.data_len.tx_max_time = BT_GAP_DATA_TIME_DEFAULT;
1193 conn->le.data_len.rx_max_len = BT_GAP_DATA_LEN_DEFAULT;
1194 conn->le.data_len.rx_max_time = BT_GAP_DATA_TIME_DEFAULT;
1195 #endif
1196
1197 #if defined(CONFIG_BT_USER_PHY_UPDATE)
1198 conn->le.phy.tx_phy = BT_GAP_LE_PHY_1M;
1199 conn->le.phy.rx_phy = BT_GAP_LE_PHY_1M;
1200 #endif
1201 /*
1202 * Use connection address (instead of identity address) as initiator
1203 * or responder address. Only peripheral needs to be updated. For central all
1204 * was set during outgoing connection creation.
1205 */
1206 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
1207 conn->role == BT_HCI_ROLE_PERIPHERAL) {
1208 bt_addr_le_copy(&conn->le.init_addr, &peer_addr);
1209
1210 if (!(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1211 BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
1212 struct bt_le_ext_adv *adv = bt_le_adv_lookup_legacy();
1213
1214 if (IS_ENABLED(CONFIG_BT_PRIVACY) &&
1215 !atomic_test_bit(adv->flags, BT_ADV_USE_IDENTITY)) {
1216 conn->le.resp_addr.type = BT_ADDR_LE_RANDOM;
1217 if (bt_addr_cmp(&evt->local_rpa,
1218 BT_ADDR_ANY) != 0) {
1219 bt_addr_copy(&conn->le.resp_addr.a,
1220 &evt->local_rpa);
1221 } else {
1222 bt_addr_copy(&conn->le.resp_addr.a,
1223 &bt_dev.random_addr.a);
1224 }
1225 } else {
1226 bt_addr_le_copy(&conn->le.resp_addr,
1227 &bt_dev.id_addr[conn->id]);
1228 }
1229 } else {
1230 /* Copy the local RPA and handle this in advertising set
1231 * terminated event.
1232 */
1233 bt_addr_copy(&conn->le.resp_addr.a, &evt->local_rpa);
1234 }
1235
1236 /* if the controller supports, lets advertise for another
1237 * peripheral connection.
1238 * check for connectable advertising state is sufficient as
1239 * this is how this le connection complete for peripheral occurred.
1240 */
1241 if (BT_LE_STATES_PER_CONN_ADV(bt_dev.le.states)) {
1242 bt_le_adv_resume();
1243 }
1244
1245 if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1246 !BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
1247 struct bt_le_ext_adv *adv = bt_le_adv_lookup_legacy();
1248 /* No advertising set terminated event, must be a
1249 * legacy advertiser set.
1250 */
1251 if (!atomic_test_bit(adv->flags, BT_ADV_PERSIST)) {
1252 bt_le_adv_delete_legacy();
1253 }
1254 }
1255 }
1256
1257 if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1258 conn->role == BT_HCI_ROLE_CENTRAL) {
1259 bt_addr_le_copy(&conn->le.resp_addr, &peer_addr);
1260
1261 if (IS_ENABLED(CONFIG_BT_PRIVACY)) {
1262 conn->le.init_addr.type = BT_ADDR_LE_RANDOM;
1263 if (bt_addr_cmp(&evt->local_rpa, BT_ADDR_ANY) != 0) {
1264 bt_addr_copy(&conn->le.init_addr.a,
1265 &evt->local_rpa);
1266 } else {
1267 bt_addr_copy(&conn->le.init_addr.a,
1268 &bt_dev.random_addr.a);
1269 }
1270 } else {
1271 bt_addr_le_copy(&conn->le.init_addr,
1272 &bt_dev.id_addr[conn->id]);
1273 }
1274 }
1275
1276 #if defined(CONFIG_BT_USER_PHY_UPDATE)
1277 if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1278 BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
1279 int err;
1280
1281 err = hci_le_read_phy(conn);
1282 if (err) {
1283 BT_WARN("Failed to read PHY (%d)", err);
1284 } else {
1285 if (IS_ENABLED(CONFIG_BT_AUTO_PHY_UPDATE) &&
1286 conn->le.phy.tx_phy == BT_HCI_LE_PHY_PREFER_2M &&
1287 conn->le.phy.rx_phy == BT_HCI_LE_PHY_PREFER_2M) {
1288 /* Already on 2M, skip auto-phy update. */
1289 atomic_set_bit(conn->flags,
1290 BT_CONN_AUTO_PHY_COMPLETE);
1291 }
1292 }
1293 }
1294 #endif /* defined(CONFIG_BT_USER_PHY_UPDATE) */
1295
1296 bt_conn_set_state(conn, BT_CONN_CONNECTED);
1297
1298 if (is_disconnected) {
1299 /* Mark the connection as already disconnected before calling
1300 * the connected callback, so that the application cannot
1301 * start sending packets
1302 */
1303 bt_conn_set_state(conn, BT_CONN_DISCONNECT_COMPLETE);
1304 }
1305
1306 bt_conn_connected(conn);
1307
1308 /* Start auto-initiated procedures */
1309 conn_auto_initiate(conn);
1310
1311 bt_conn_unref(conn);
1312
1313 if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1314 conn->role == BT_HCI_ROLE_CENTRAL) {
1315 bt_le_scan_update(false);
1316 }
1317 }
1318
le_enh_conn_complete(struct net_buf * buf)1319 static void le_enh_conn_complete(struct net_buf *buf)
1320 {
1321 enh_conn_complete((void *)buf->data);
1322 }
1323
le_legacy_conn_complete(struct net_buf * buf)1324 static void le_legacy_conn_complete(struct net_buf *buf)
1325 {
1326 struct bt_hci_evt_le_conn_complete *evt = (void *)buf->data;
1327 struct bt_hci_evt_le_enh_conn_complete enh;
1328
1329 BT_DBG("status 0x%02x role %u %s", evt->status, evt->role,
1330 bt_addr_le_str(&evt->peer_addr));
1331
1332 enh.status = evt->status;
1333 enh.handle = evt->handle;
1334 enh.role = evt->role;
1335 enh.interval = evt->interval;
1336 enh.latency = evt->latency;
1337 enh.supv_timeout = evt->supv_timeout;
1338 enh.clock_accuracy = evt->clock_accuracy;
1339
1340 bt_addr_le_copy(&enh.peer_addr, &evt->peer_addr);
1341
1342 if (IS_ENABLED(CONFIG_BT_PRIVACY)) {
1343 bt_addr_copy(&enh.local_rpa, &bt_dev.random_addr.a);
1344 } else {
1345 bt_addr_copy(&enh.local_rpa, BT_ADDR_ANY);
1346 }
1347
1348 bt_addr_copy(&enh.peer_rpa, BT_ADDR_ANY);
1349
1350 enh_conn_complete(&enh);
1351 }
1352
le_remote_feat_complete(struct net_buf * buf)1353 static void le_remote_feat_complete(struct net_buf *buf)
1354 {
1355 struct bt_hci_evt_le_remote_feat_complete *evt = (void *)buf->data;
1356 uint16_t handle = sys_le16_to_cpu(evt->handle);
1357 struct bt_conn *conn;
1358
1359 conn = bt_conn_lookup_handle(handle);
1360 if (!conn) {
1361 BT_ERR("Unable to lookup conn for handle %u", handle);
1362 return;
1363 }
1364
1365 if (!evt->status) {
1366 memcpy(conn->le.features, evt->features,
1367 sizeof(conn->le.features));
1368 }
1369
1370 atomic_set_bit(conn->flags, BT_CONN_AUTO_FEATURE_EXCH);
1371
1372 if (IS_ENABLED(CONFIG_BT_REMOTE_INFO) &&
1373 !IS_ENABLED(CONFIG_BT_REMOTE_VERSION)) {
1374 notify_remote_info(conn);
1375 }
1376
1377 /* Continue with auto-initiated procedures */
1378 conn_auto_initiate(conn);
1379
1380 bt_conn_unref(conn);
1381 }
1382
1383 #if defined(CONFIG_BT_DATA_LEN_UPDATE)
le_data_len_change(struct net_buf * buf)1384 static void le_data_len_change(struct net_buf *buf)
1385 {
1386 struct bt_hci_evt_le_data_len_change *evt = (void *)buf->data;
1387 uint16_t handle = sys_le16_to_cpu(evt->handle);
1388 struct bt_conn *conn;
1389
1390 conn = bt_conn_lookup_handle(handle);
1391 if (!conn) {
1392 BT_ERR("Unable to lookup conn for handle %u", handle);
1393 return;
1394 }
1395
1396 #if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
1397 uint16_t max_tx_octets = sys_le16_to_cpu(evt->max_tx_octets);
1398 uint16_t max_rx_octets = sys_le16_to_cpu(evt->max_rx_octets);
1399 uint16_t max_tx_time = sys_le16_to_cpu(evt->max_tx_time);
1400 uint16_t max_rx_time = sys_le16_to_cpu(evt->max_rx_time);
1401
1402 if (IS_ENABLED(CONFIG_BT_AUTO_DATA_LEN_UPDATE)) {
1403 atomic_set_bit(conn->flags, BT_CONN_AUTO_DATA_LEN_COMPLETE);
1404 }
1405
1406 BT_DBG("max. tx: %u (%uus), max. rx: %u (%uus)",
1407 max_tx_octets, max_tx_time, max_rx_octets, max_rx_time);
1408
1409 conn->le.data_len.tx_max_len = max_tx_octets;
1410 conn->le.data_len.tx_max_time = max_tx_time;
1411 conn->le.data_len.rx_max_len = max_rx_octets;
1412 conn->le.data_len.rx_max_time = max_rx_time;
1413 notify_le_data_len_updated(conn);
1414 #endif
1415
1416 bt_conn_unref(conn);
1417 }
1418 #endif /* CONFIG_BT_DATA_LEN_UPDATE */
1419
1420 #if defined(CONFIG_BT_PHY_UPDATE)
le_phy_update_complete(struct net_buf * buf)1421 static void le_phy_update_complete(struct net_buf *buf)
1422 {
1423 struct bt_hci_evt_le_phy_update_complete *evt = (void *)buf->data;
1424 uint16_t handle = sys_le16_to_cpu(evt->handle);
1425 struct bt_conn *conn;
1426
1427 conn = bt_conn_lookup_handle(handle);
1428 if (!conn) {
1429 BT_ERR("Unable to lookup conn for handle %u", handle);
1430 return;
1431 }
1432
1433 BT_DBG("PHY updated: status: 0x%02x, tx: %u, rx: %u",
1434 evt->status, evt->tx_phy, evt->rx_phy);
1435
1436 if (IS_ENABLED(CONFIG_BT_AUTO_PHY_UPDATE) &&
1437 atomic_test_and_clear_bit(conn->flags, BT_CONN_AUTO_PHY_UPDATE)) {
1438 atomic_set_bit(conn->flags, BT_CONN_AUTO_PHY_COMPLETE);
1439
1440 /* Continue with auto-initiated procedures */
1441 conn_auto_initiate(conn);
1442 }
1443
1444 #if defined(CONFIG_BT_USER_PHY_UPDATE)
1445 conn->le.phy.tx_phy = bt_get_phy(evt->tx_phy);
1446 conn->le.phy.rx_phy = bt_get_phy(evt->rx_phy);
1447 notify_le_phy_updated(conn);
1448 #endif
1449
1450 bt_conn_unref(conn);
1451 }
1452 #endif /* CONFIG_BT_PHY_UPDATE */
1453
bt_le_conn_params_valid(const struct bt_le_conn_param * param)1454 bool bt_le_conn_params_valid(const struct bt_le_conn_param *param)
1455 {
1456 /* All limits according to BT Core spec 5.0 [Vol 2, Part E, 7.8.12] */
1457
1458 if (param->interval_min > param->interval_max ||
1459 param->interval_min < 6 || param->interval_max > 3200) {
1460 return false;
1461 }
1462
1463 if (param->latency > 499) {
1464 return false;
1465 }
1466
1467 if (param->timeout < 10 || param->timeout > 3200 ||
1468 ((param->timeout * 4U) <=
1469 ((1U + param->latency) * param->interval_max))) {
1470 return false;
1471 }
1472
1473 return true;
1474 }
1475
le_conn_param_neg_reply(uint16_t handle,uint8_t reason)1476 static void le_conn_param_neg_reply(uint16_t handle, uint8_t reason)
1477 {
1478 struct bt_hci_cp_le_conn_param_req_neg_reply *cp;
1479 struct net_buf *buf;
1480
1481 buf = bt_hci_cmd_create(BT_HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY,
1482 sizeof(*cp));
1483 if (!buf) {
1484 BT_ERR("Unable to allocate buffer");
1485 return;
1486 }
1487
1488 cp = net_buf_add(buf, sizeof(*cp));
1489 cp->handle = sys_cpu_to_le16(handle);
1490 cp->reason = sys_cpu_to_le16(reason);
1491
1492 bt_hci_cmd_send(BT_HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, buf);
1493 }
1494
le_conn_param_req_reply(uint16_t handle,const struct bt_le_conn_param * param)1495 static int le_conn_param_req_reply(uint16_t handle,
1496 const struct bt_le_conn_param *param)
1497 {
1498 struct bt_hci_cp_le_conn_param_req_reply *cp;
1499 struct net_buf *buf;
1500
1501 buf = bt_hci_cmd_create(BT_HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(*cp));
1502 if (!buf) {
1503 return -ENOBUFS;
1504 }
1505
1506 cp = net_buf_add(buf, sizeof(*cp));
1507 (void)memset(cp, 0, sizeof(*cp));
1508
1509 cp->handle = sys_cpu_to_le16(handle);
1510 cp->interval_min = sys_cpu_to_le16(param->interval_min);
1511 cp->interval_max = sys_cpu_to_le16(param->interval_max);
1512 cp->latency = sys_cpu_to_le16(param->latency);
1513 cp->timeout = sys_cpu_to_le16(param->timeout);
1514
1515 return bt_hci_cmd_send(BT_HCI_OP_LE_CONN_PARAM_REQ_REPLY, buf);
1516 }
1517
le_conn_param_req(struct net_buf * buf)1518 static void le_conn_param_req(struct net_buf *buf)
1519 {
1520 struct bt_hci_evt_le_conn_param_req *evt = (void *)buf->data;
1521 struct bt_le_conn_param param;
1522 struct bt_conn *conn;
1523 uint16_t handle;
1524
1525 handle = sys_le16_to_cpu(evt->handle);
1526 param.interval_min = sys_le16_to_cpu(evt->interval_min);
1527 param.interval_max = sys_le16_to_cpu(evt->interval_max);
1528 param.latency = sys_le16_to_cpu(evt->latency);
1529 param.timeout = sys_le16_to_cpu(evt->timeout);
1530
1531 conn = bt_conn_lookup_handle(handle);
1532 if (!conn) {
1533 BT_ERR("Unable to lookup conn for handle %u", handle);
1534 le_conn_param_neg_reply(handle, BT_HCI_ERR_UNKNOWN_CONN_ID);
1535 return;
1536 }
1537
1538 if (!le_param_req(conn, ¶m)) {
1539 le_conn_param_neg_reply(handle, BT_HCI_ERR_INVALID_LL_PARAM);
1540 } else {
1541 le_conn_param_req_reply(handle, ¶m);
1542 }
1543
1544 bt_conn_unref(conn);
1545 }
1546
le_conn_update_complete(struct net_buf * buf)1547 static void le_conn_update_complete(struct net_buf *buf)
1548 {
1549 struct bt_hci_evt_le_conn_update_complete *evt = (void *)buf->data;
1550 struct bt_conn *conn;
1551 uint16_t handle;
1552
1553 handle = sys_le16_to_cpu(evt->handle);
1554
1555 BT_DBG("status 0x%02x, handle %u", evt->status, handle);
1556
1557 conn = bt_conn_lookup_handle(handle);
1558 if (!conn) {
1559 BT_ERR("Unable to lookup conn for handle %u", handle);
1560 return;
1561 }
1562
1563 if (!evt->status) {
1564 conn->le.interval = sys_le16_to_cpu(evt->interval);
1565 conn->le.latency = sys_le16_to_cpu(evt->latency);
1566 conn->le.timeout = sys_le16_to_cpu(evt->supv_timeout);
1567 notify_le_param_updated(conn);
1568 } else if (evt->status == BT_HCI_ERR_UNSUPP_REMOTE_FEATURE &&
1569 conn->role == BT_HCI_ROLE_PERIPHERAL &&
1570 !atomic_test_and_set_bit(conn->flags,
1571 BT_CONN_PERIPHERAL_PARAM_L2CAP)) {
1572 /* CPR not supported, let's try L2CAP CPUP instead */
1573 struct bt_le_conn_param param;
1574
1575 param.interval_min = conn->le.interval_min;
1576 param.interval_max = conn->le.interval_max;
1577 param.latency = conn->le.pending_latency;
1578 param.timeout = conn->le.pending_timeout;
1579
1580 bt_l2cap_update_conn_param(conn, ¶m);
1581 }
1582
1583 bt_conn_unref(conn);
1584 }
1585
1586 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
set_flow_control(void)1587 static int set_flow_control(void)
1588 {
1589 struct bt_hci_cp_host_buffer_size *hbs;
1590 struct net_buf *buf;
1591 int err;
1592
1593 /* Check if host flow control is actually supported */
1594 if (!BT_CMD_TEST(bt_dev.supported_commands, 10, 5)) {
1595 BT_WARN("Controller to host flow control not supported");
1596 return 0;
1597 }
1598
1599 buf = bt_hci_cmd_create(BT_HCI_OP_HOST_BUFFER_SIZE,
1600 sizeof(*hbs));
1601 if (!buf) {
1602 return -ENOBUFS;
1603 }
1604
1605 hbs = net_buf_add(buf, sizeof(*hbs));
1606 (void)memset(hbs, 0, sizeof(*hbs));
1607 hbs->acl_mtu = sys_cpu_to_le16(CONFIG_BT_BUF_ACL_RX_SIZE);
1608 hbs->acl_pkts = sys_cpu_to_le16(CONFIG_BT_BUF_ACL_RX_COUNT);
1609
1610 err = bt_hci_cmd_send_sync(BT_HCI_OP_HOST_BUFFER_SIZE, buf, NULL);
1611 if (err) {
1612 return err;
1613 }
1614
1615 buf = bt_hci_cmd_create(BT_HCI_OP_SET_CTL_TO_HOST_FLOW, 1);
1616 if (!buf) {
1617 return -ENOBUFS;
1618 }
1619
1620 net_buf_add_u8(buf, BT_HCI_CTL_TO_HOST_FLOW_ENABLE);
1621 return bt_hci_cmd_send_sync(BT_HCI_OP_SET_CTL_TO_HOST_FLOW, buf, NULL);
1622 }
1623 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
1624
unpair(uint8_t id,const bt_addr_le_t * addr)1625 static void unpair(uint8_t id, const bt_addr_le_t *addr)
1626 {
1627 struct bt_keys *keys = NULL;
1628 struct bt_conn *conn = bt_conn_lookup_addr_le(id, addr);
1629
1630 if (conn) {
1631 /* Clear the conn->le.keys pointer since we'll invalidate it,
1632 * and don't want any subsequent code (like disconnected
1633 * callbacks) accessing it.
1634 */
1635 if (conn->type == BT_CONN_TYPE_LE) {
1636 keys = conn->le.keys;
1637 conn->le.keys = NULL;
1638 }
1639
1640 bt_conn_disconnect(conn, BT_HCI_ERR_REMOTE_USER_TERM_CONN);
1641 bt_conn_unref(conn);
1642 }
1643
1644 if (IS_ENABLED(CONFIG_BT_BREDR)) {
1645 /* LE Public may indicate BR/EDR as well */
1646 if (addr->type == BT_ADDR_LE_PUBLIC) {
1647 bt_keys_link_key_clear_addr(&addr->a);
1648 }
1649 }
1650
1651 if (IS_ENABLED(CONFIG_BT_SMP)) {
1652 if (!keys) {
1653 keys = bt_keys_find_addr(id, addr);
1654 }
1655
1656 if (keys) {
1657 bt_keys_clear(keys);
1658 }
1659 }
1660
1661 bt_gatt_clear(id, addr);
1662
1663 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_BREDR)
1664 if (bt_auth && bt_auth->bond_deleted) {
1665 bt_auth->bond_deleted(id, addr);
1666 }
1667 #endif /* defined(CONFIG_BT_SMP) || defined(CONFIG_BT_BREDR) */
1668 }
1669
unpair_remote(const struct bt_bond_info * info,void * data)1670 static void unpair_remote(const struct bt_bond_info *info, void *data)
1671 {
1672 uint8_t *id = (uint8_t *) data;
1673
1674 unpair(*id, &info->addr);
1675 }
1676
bt_unpair(uint8_t id,const bt_addr_le_t * addr)1677 int bt_unpair(uint8_t id, const bt_addr_le_t *addr)
1678 {
1679 if (id >= CONFIG_BT_ID_MAX) {
1680 return -EINVAL;
1681 }
1682
1683 if (IS_ENABLED(CONFIG_BT_SMP) &&
1684 (!addr || !bt_addr_le_cmp(addr, BT_ADDR_LE_ANY))) {
1685 bt_foreach_bond(id, unpair_remote, &id);
1686 return 0;
1687 }
1688
1689 unpair(id, addr);
1690 return 0;
1691 }
1692
1693 #endif /* CONFIG_BT_CONN */
1694
1695 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_BREDR)
bt_security_err_get(uint8_t hci_err)1696 enum bt_security_err bt_security_err_get(uint8_t hci_err)
1697 {
1698 switch (hci_err) {
1699 case BT_HCI_ERR_SUCCESS:
1700 return BT_SECURITY_ERR_SUCCESS;
1701 case BT_HCI_ERR_AUTH_FAIL:
1702 return BT_SECURITY_ERR_AUTH_FAIL;
1703 case BT_HCI_ERR_PIN_OR_KEY_MISSING:
1704 return BT_SECURITY_ERR_PIN_OR_KEY_MISSING;
1705 case BT_HCI_ERR_PAIRING_NOT_SUPPORTED:
1706 return BT_SECURITY_ERR_PAIR_NOT_SUPPORTED;
1707 case BT_HCI_ERR_PAIRING_NOT_ALLOWED:
1708 return BT_SECURITY_ERR_PAIR_NOT_ALLOWED;
1709 case BT_HCI_ERR_INVALID_PARAM:
1710 return BT_SECURITY_ERR_INVALID_PARAM;
1711 default:
1712 return BT_SECURITY_ERR_UNSPECIFIED;
1713 }
1714 }
1715 #endif /* defined(CONFIG_BT_SMP) || defined(CONFIG_BT_BREDR) */
1716
1717 #if defined(CONFIG_BT_SMP)
update_sec_level(struct bt_conn * conn)1718 static bool update_sec_level(struct bt_conn *conn)
1719 {
1720 if (conn->le.keys && (conn->le.keys->flags & BT_KEYS_AUTHENTICATED)) {
1721 if (conn->le.keys->flags & BT_KEYS_SC &&
1722 conn->le.keys->enc_size == BT_SMP_MAX_ENC_KEY_SIZE) {
1723 conn->sec_level = BT_SECURITY_L4;
1724 } else {
1725 conn->sec_level = BT_SECURITY_L3;
1726 }
1727 } else {
1728 conn->sec_level = BT_SECURITY_L2;
1729 }
1730
1731 return !(conn->required_sec_level > conn->sec_level);
1732 }
1733 #endif /* CONFIG_BT_SMP */
1734
1735 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_BREDR)
hci_encrypt_change(struct net_buf * buf)1736 static void hci_encrypt_change(struct net_buf *buf)
1737 {
1738 struct bt_hci_evt_encrypt_change *evt = (void *)buf->data;
1739 uint16_t handle = sys_le16_to_cpu(evt->handle);
1740 uint8_t status = evt->status;
1741 struct bt_conn *conn;
1742
1743 BT_DBG("status 0x%02x handle %u encrypt 0x%02x", evt->status, handle,
1744 evt->encrypt);
1745
1746 conn = bt_conn_lookup_handle(handle);
1747 if (!conn) {
1748 BT_ERR("Unable to look up conn with handle %u", handle);
1749 return;
1750 }
1751
1752 if (status) {
1753 bt_conn_security_changed(conn, status,
1754 bt_security_err_get(status));
1755 bt_conn_unref(conn);
1756 return;
1757 }
1758
1759 conn->encrypt = evt->encrypt;
1760
1761 #if defined(CONFIG_BT_SMP)
1762 if (conn->type == BT_CONN_TYPE_LE) {
1763 /*
1764 * we update keys properties only on successful encryption to
1765 * avoid losing valid keys if encryption was not successful.
1766 *
1767 * Update keys with last pairing info for proper sec level
1768 * update. This is done only for LE transport, for BR/EDR keys
1769 * are updated on HCI 'Link Key Notification Event'
1770 */
1771 if (conn->encrypt) {
1772 bt_smp_update_keys(conn);
1773 }
1774
1775 if (!update_sec_level(conn)) {
1776 status = BT_HCI_ERR_AUTH_FAIL;
1777 }
1778 }
1779 #endif /* CONFIG_BT_SMP */
1780 #if defined(CONFIG_BT_BREDR)
1781 if (conn->type == BT_CONN_TYPE_BR) {
1782 if (!bt_br_update_sec_level(conn)) {
1783 bt_conn_unref(conn);
1784 return;
1785 }
1786
1787 if (IS_ENABLED(CONFIG_BT_SMP)) {
1788 /*
1789 * Start SMP over BR/EDR if we are pairing and are
1790 * central on the link
1791 */
1792 if (atomic_test_bit(conn->flags, BT_CONN_BR_PAIRING) &&
1793 conn->role == BT_CONN_ROLE_CENTRAL) {
1794 bt_smp_br_send_pairing_req(conn);
1795 }
1796 }
1797 }
1798 #endif /* CONFIG_BT_BREDR */
1799
1800 bt_conn_security_changed(conn, status, bt_security_err_get(status));
1801
1802 if (status) {
1803 BT_ERR("Failed to set required security level");
1804 bt_conn_disconnect(conn, status);
1805 }
1806
1807 bt_conn_unref(conn);
1808 }
1809
hci_encrypt_key_refresh_complete(struct net_buf * buf)1810 static void hci_encrypt_key_refresh_complete(struct net_buf *buf)
1811 {
1812 struct bt_hci_evt_encrypt_key_refresh_complete *evt = (void *)buf->data;
1813 uint8_t status = evt->status;
1814 struct bt_conn *conn;
1815 uint16_t handle;
1816
1817 handle = sys_le16_to_cpu(evt->handle);
1818
1819 BT_DBG("status 0x%02x handle %u", evt->status, handle);
1820
1821 conn = bt_conn_lookup_handle(handle);
1822 if (!conn) {
1823 BT_ERR("Unable to look up conn with handle %u", handle);
1824 return;
1825 }
1826
1827 if (status) {
1828 bt_conn_security_changed(conn, status,
1829 bt_security_err_get(status));
1830 bt_conn_unref(conn);
1831 return;
1832 }
1833
1834 /*
1835 * Update keys with last pairing info for proper sec level update.
1836 * This is done only for LE transport. For BR/EDR transport keys are
1837 * updated on HCI 'Link Key Notification Event', therefore update here
1838 * only security level based on available keys and encryption state.
1839 */
1840 #if defined(CONFIG_BT_SMP)
1841 if (conn->type == BT_CONN_TYPE_LE) {
1842 bt_smp_update_keys(conn);
1843
1844 if (!update_sec_level(conn)) {
1845 status = BT_HCI_ERR_AUTH_FAIL;
1846 }
1847 }
1848 #endif /* CONFIG_BT_SMP */
1849 #if defined(CONFIG_BT_BREDR)
1850 if (conn->type == BT_CONN_TYPE_BR) {
1851 if (!bt_br_update_sec_level(conn)) {
1852 bt_conn_unref(conn);
1853 return;
1854 }
1855 }
1856 #endif /* CONFIG_BT_BREDR */
1857
1858 bt_conn_security_changed(conn, status, bt_security_err_get(status));
1859 if (status) {
1860 BT_ERR("Failed to set required security level");
1861 bt_conn_disconnect(conn, status);
1862 }
1863
1864 bt_conn_unref(conn);
1865 }
1866 #endif /* CONFIG_BT_SMP || CONFIG_BT_BREDR */
1867
1868 #if defined(CONFIG_BT_REMOTE_VERSION)
bt_hci_evt_read_remote_version_complete(struct net_buf * buf)1869 static void bt_hci_evt_read_remote_version_complete(struct net_buf *buf)
1870 {
1871 struct bt_hci_evt_remote_version_info *evt;
1872 struct bt_conn *conn;
1873 uint16_t handle;
1874
1875 evt = net_buf_pull_mem(buf, sizeof(*evt));
1876 handle = sys_le16_to_cpu(evt->handle);
1877 conn = bt_conn_lookup_handle(handle);
1878 if (!conn) {
1879 BT_ERR("No connection for handle %u", handle);
1880 return;
1881 }
1882
1883 if (!evt->status) {
1884 conn->rv.version = evt->version;
1885 conn->rv.manufacturer = sys_le16_to_cpu(evt->manufacturer);
1886 conn->rv.subversion = sys_le16_to_cpu(evt->subversion);
1887 }
1888
1889 atomic_set_bit(conn->flags, BT_CONN_AUTO_VERSION_INFO);
1890
1891 if (IS_ENABLED(CONFIG_BT_REMOTE_INFO)) {
1892 /* Remote features is already present */
1893 notify_remote_info(conn);
1894 }
1895
1896 /* Continue with auto-initiated procedures */
1897 conn_auto_initiate(conn);
1898
1899 bt_conn_unref(conn);
1900 }
1901 #endif /* CONFIG_BT_REMOTE_VERSION */
1902
hci_hardware_error(struct net_buf * buf)1903 static void hci_hardware_error(struct net_buf *buf)
1904 {
1905 struct bt_hci_evt_hardware_error *evt;
1906
1907 evt = net_buf_pull_mem(buf, sizeof(*evt));
1908
1909 BT_ERR("Hardware error, hardware code: %d", evt->hardware_code);
1910 }
1911
1912 #if defined(CONFIG_BT_SMP)
le_ltk_neg_reply(uint16_t handle)1913 static void le_ltk_neg_reply(uint16_t handle)
1914 {
1915 struct bt_hci_cp_le_ltk_req_neg_reply *cp;
1916 struct net_buf *buf;
1917
1918 buf = bt_hci_cmd_create(BT_HCI_OP_LE_LTK_REQ_NEG_REPLY, sizeof(*cp));
1919 if (!buf) {
1920 BT_ERR("Out of command buffers");
1921
1922 return;
1923 }
1924
1925 cp = net_buf_add(buf, sizeof(*cp));
1926 cp->handle = sys_cpu_to_le16(handle);
1927
1928 bt_hci_cmd_send(BT_HCI_OP_LE_LTK_REQ_NEG_REPLY, buf);
1929 }
1930
le_ltk_reply(uint16_t handle,uint8_t * ltk)1931 static void le_ltk_reply(uint16_t handle, uint8_t *ltk)
1932 {
1933 struct bt_hci_cp_le_ltk_req_reply *cp;
1934 struct net_buf *buf;
1935
1936 buf = bt_hci_cmd_create(BT_HCI_OP_LE_LTK_REQ_REPLY,
1937 sizeof(*cp));
1938 if (!buf) {
1939 BT_ERR("Out of command buffers");
1940 return;
1941 }
1942
1943 cp = net_buf_add(buf, sizeof(*cp));
1944 cp->handle = sys_cpu_to_le16(handle);
1945 memcpy(cp->ltk, ltk, sizeof(cp->ltk));
1946
1947 bt_hci_cmd_send(BT_HCI_OP_LE_LTK_REQ_REPLY, buf);
1948 }
1949
le_ltk_request(struct net_buf * buf)1950 static void le_ltk_request(struct net_buf *buf)
1951 {
1952 struct bt_hci_evt_le_ltk_request *evt = (void *)buf->data;
1953 struct bt_conn *conn;
1954 uint16_t handle;
1955 uint8_t ltk[16];
1956
1957 handle = sys_le16_to_cpu(evt->handle);
1958
1959 BT_DBG("handle %u", handle);
1960
1961 conn = bt_conn_lookup_handle(handle);
1962 if (!conn) {
1963 BT_ERR("Unable to lookup conn for handle %u", handle);
1964 return;
1965 }
1966
1967 if (bt_smp_request_ltk(conn, evt->rand, evt->ediv, ltk)) {
1968 le_ltk_reply(handle, ltk);
1969 } else {
1970 le_ltk_neg_reply(handle);
1971 }
1972
1973 bt_conn_unref(conn);
1974 }
1975 #endif /* CONFIG_BT_SMP */
1976
hci_reset_complete(struct net_buf * buf)1977 static void hci_reset_complete(struct net_buf *buf)
1978 {
1979 uint8_t status = buf->data[0];
1980 atomic_t flags;
1981
1982 BT_DBG("status 0x%02x", status);
1983
1984 if (status) {
1985 return;
1986 }
1987
1988 if (IS_ENABLED(CONFIG_BT_OBSERVER)) {
1989 bt_scan_reset();
1990 }
1991
1992 #if defined(CONFIG_BT_BREDR)
1993 bt_br_discovery_reset();
1994 #endif /* CONFIG_BT_BREDR */
1995
1996 flags = (atomic_get(bt_dev.flags) & BT_DEV_PERSISTENT_FLAGS);
1997 atomic_set(bt_dev.flags, flags);
1998 }
1999
hci_cmd_done(uint16_t opcode,uint8_t status,struct net_buf * buf)2000 static void hci_cmd_done(uint16_t opcode, uint8_t status, struct net_buf *buf)
2001 {
2002 BT_DBG("opcode 0x%04x status 0x%02x buf %p", opcode, status, buf);
2003
2004 if (net_buf_pool_get(buf->pool_id) != &hci_cmd_pool) {
2005 BT_WARN("opcode 0x%04x pool id %u pool %p != &hci_cmd_pool %p",
2006 opcode, buf->pool_id, net_buf_pool_get(buf->pool_id),
2007 &hci_cmd_pool);
2008 return;
2009 }
2010
2011 if (cmd(buf)->opcode != opcode) {
2012 BT_WARN("OpCode 0x%04x completed instead of expected 0x%04x",
2013 opcode, cmd(buf)->opcode);
2014 return;
2015 }
2016
2017 if (bt_dev.sent_cmd) {
2018 net_buf_unref(bt_dev.sent_cmd);
2019 bt_dev.sent_cmd = NULL;
2020 }
2021
2022 if (cmd(buf)->state && !status) {
2023 struct bt_hci_cmd_state_set *update = cmd(buf)->state;
2024
2025 atomic_set_bit_to(update->target, update->bit, update->val);
2026 }
2027
2028 /* If the command was synchronous wake up bt_hci_cmd_send_sync() */
2029 if (cmd(buf)->sync) {
2030 cmd(buf)->status = status;
2031 k_sem_give(cmd(buf)->sync);
2032 }
2033 }
2034
hci_cmd_complete(struct net_buf * buf)2035 static void hci_cmd_complete(struct net_buf *buf)
2036 {
2037 struct bt_hci_evt_cmd_complete *evt;
2038 uint8_t status, ncmd;
2039 uint16_t opcode;
2040
2041 evt = net_buf_pull_mem(buf, sizeof(*evt));
2042 ncmd = evt->ncmd;
2043 opcode = sys_le16_to_cpu(evt->opcode);
2044
2045 BT_DBG("opcode 0x%04x", opcode);
2046
2047 /* All command return parameters have a 1-byte status in the
2048 * beginning, so we can safely make this generalization.
2049 */
2050 status = buf->data[0];
2051
2052 hci_cmd_done(opcode, status, buf);
2053
2054 /* Allow next command to be sent */
2055 if (ncmd) {
2056 k_sem_give(&bt_dev.ncmd_sem);
2057 }
2058 }
2059
hci_cmd_status(struct net_buf * buf)2060 static void hci_cmd_status(struct net_buf *buf)
2061 {
2062 struct bt_hci_evt_cmd_status *evt;
2063 uint16_t opcode;
2064 uint8_t ncmd;
2065
2066 evt = net_buf_pull_mem(buf, sizeof(*evt));
2067 opcode = sys_le16_to_cpu(evt->opcode);
2068 ncmd = evt->ncmd;
2069
2070 BT_DBG("opcode 0x%04x", opcode);
2071
2072 hci_cmd_done(opcode, evt->status, buf);
2073
2074 /* Allow next command to be sent */
2075 if (ncmd) {
2076 k_sem_give(&bt_dev.ncmd_sem);
2077 }
2078 }
2079
bt_hci_get_conn_handle(const struct bt_conn * conn,uint16_t * conn_handle)2080 int bt_hci_get_conn_handle(const struct bt_conn *conn, uint16_t *conn_handle)
2081 {
2082 if (conn->state != BT_CONN_CONNECTED) {
2083 return -ENOTCONN;
2084 }
2085
2086 *conn_handle = conn->handle;
2087 return 0;
2088 }
2089
2090 #if defined(CONFIG_BT_EXT_ADV)
bt_hci_get_adv_handle(const struct bt_le_ext_adv * adv,uint8_t * adv_handle)2091 int bt_hci_get_adv_handle(const struct bt_le_ext_adv *adv, uint8_t *adv_handle)
2092 {
2093 if (!atomic_test_bit(adv->flags, BT_ADV_CREATED)) {
2094 return -EINVAL;
2095 }
2096
2097 *adv_handle = adv->handle;
2098 return 0;
2099 }
2100 #endif /* CONFIG_BT_EXT_ADV */
2101
2102 #if defined(CONFIG_BT_HCI_VS_EVT_USER)
bt_hci_register_vnd_evt_cb(bt_hci_vnd_evt_cb_t cb)2103 int bt_hci_register_vnd_evt_cb(bt_hci_vnd_evt_cb_t cb)
2104 {
2105 hci_vnd_evt_cb = cb;
2106 return 0;
2107 }
2108 #endif /* CONFIG_BT_HCI_VS_EVT_USER */
2109
hci_vendor_event(struct net_buf * buf)2110 static void hci_vendor_event(struct net_buf *buf)
2111 {
2112 bool handled = false;
2113
2114 #if defined(CONFIG_BT_HCI_VS_EVT_USER)
2115 if (hci_vnd_evt_cb) {
2116 struct net_buf_simple_state state;
2117
2118 net_buf_simple_save(&buf->b, &state);
2119
2120 handled = hci_vnd_evt_cb(&buf->b);
2121
2122 net_buf_simple_restore(&buf->b, &state);
2123 }
2124 #endif /* CONFIG_BT_HCI_VS_EVT_USER */
2125
2126 if (IS_ENABLED(CONFIG_BT_HCI_VS_EXT) && !handled) {
2127 /* do nothing at present time */
2128 BT_WARN("Unhandled vendor-specific event: %s",
2129 bt_hex(buf->data, buf->len));
2130 }
2131 }
2132
2133 static const struct event_handler meta_events[] = {
2134 #if defined(CONFIG_BT_OBSERVER)
2135 EVENT_HANDLER(BT_HCI_EVT_LE_ADVERTISING_REPORT, bt_hci_le_adv_report,
2136 sizeof(struct bt_hci_evt_le_advertising_report)),
2137 #endif /* CONFIG_BT_OBSERVER */
2138 #if defined(CONFIG_BT_CONN)
2139 EVENT_HANDLER(BT_HCI_EVT_LE_CONN_COMPLETE, le_legacy_conn_complete,
2140 sizeof(struct bt_hci_evt_le_conn_complete)),
2141 EVENT_HANDLER(BT_HCI_EVT_LE_ENH_CONN_COMPLETE, le_enh_conn_complete,
2142 sizeof(struct bt_hci_evt_le_enh_conn_complete)),
2143 EVENT_HANDLER(BT_HCI_EVT_LE_CONN_UPDATE_COMPLETE,
2144 le_conn_update_complete,
2145 sizeof(struct bt_hci_evt_le_conn_update_complete)),
2146 EVENT_HANDLER(BT_HCI_EVT_LE_REMOTE_FEAT_COMPLETE,
2147 le_remote_feat_complete,
2148 sizeof(struct bt_hci_evt_le_remote_feat_complete)),
2149 EVENT_HANDLER(BT_HCI_EVT_LE_CONN_PARAM_REQ, le_conn_param_req,
2150 sizeof(struct bt_hci_evt_le_conn_param_req)),
2151 #if defined(CONFIG_BT_DATA_LEN_UPDATE)
2152 EVENT_HANDLER(BT_HCI_EVT_LE_DATA_LEN_CHANGE, le_data_len_change,
2153 sizeof(struct bt_hci_evt_le_data_len_change)),
2154 #endif /* CONFIG_BT_DATA_LEN_UPDATE */
2155 #if defined(CONFIG_BT_PHY_UPDATE)
2156 EVENT_HANDLER(BT_HCI_EVT_LE_PHY_UPDATE_COMPLETE,
2157 le_phy_update_complete,
2158 sizeof(struct bt_hci_evt_le_phy_update_complete)),
2159 #endif /* CONFIG_BT_PHY_UPDATE */
2160 #endif /* CONFIG_BT_CONN */
2161 #if defined(CONFIG_BT_SMP)
2162 EVENT_HANDLER(BT_HCI_EVT_LE_LTK_REQUEST, le_ltk_request,
2163 sizeof(struct bt_hci_evt_le_ltk_request)),
2164 #endif /* CONFIG_BT_SMP */
2165 #if defined(CONFIG_BT_ECC)
2166 EVENT_HANDLER(BT_HCI_EVT_LE_P256_PUBLIC_KEY_COMPLETE,
2167 bt_hci_evt_le_pkey_complete,
2168 sizeof(struct bt_hci_evt_le_p256_public_key_complete)),
2169 EVENT_HANDLER(BT_HCI_EVT_LE_GENERATE_DHKEY_COMPLETE,
2170 bt_hci_evt_le_dhkey_complete,
2171 sizeof(struct bt_hci_evt_le_generate_dhkey_complete)),
2172 #endif /* CONFIG_BT_SMP */
2173 #if defined(CONFIG_BT_EXT_ADV)
2174 #if defined(CONFIG_BT_BROADCASTER)
2175 EVENT_HANDLER(BT_HCI_EVT_LE_ADV_SET_TERMINATED, bt_hci_le_adv_set_terminated,
2176 sizeof(struct bt_hci_evt_le_adv_set_terminated)),
2177 EVENT_HANDLER(BT_HCI_EVT_LE_SCAN_REQ_RECEIVED, bt_hci_le_scan_req_received,
2178 sizeof(struct bt_hci_evt_le_scan_req_received)),
2179 #endif
2180 #if defined(CONFIG_BT_OBSERVER)
2181 EVENT_HANDLER(BT_HCI_EVT_LE_SCAN_TIMEOUT, bt_hci_le_scan_timeout,
2182 0),
2183 EVENT_HANDLER(BT_HCI_EVT_LE_EXT_ADVERTISING_REPORT, bt_hci_le_adv_ext_report,
2184 sizeof(struct bt_hci_evt_le_ext_advertising_report)),
2185 #endif /* defined(CONFIG_BT_OBSERVER) */
2186 #if defined(CONFIG_BT_PER_ADV_SYNC)
2187 EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADV_SYNC_ESTABLISHED,
2188 bt_hci_le_per_adv_sync_established,
2189 sizeof(struct bt_hci_evt_le_per_adv_sync_established)),
2190 EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADVERTISING_REPORT, bt_hci_le_per_adv_report,
2191 sizeof(struct bt_hci_evt_le_per_advertising_report)),
2192 EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADV_SYNC_LOST, bt_hci_le_per_adv_sync_lost,
2193 sizeof(struct bt_hci_evt_le_per_adv_sync_lost)),
2194 #if defined(CONFIG_BT_CONN)
2195 EVENT_HANDLER(BT_HCI_EVT_LE_PAST_RECEIVED, bt_hci_le_past_received,
2196 sizeof(struct bt_hci_evt_le_past_received)),
2197 #endif /* CONFIG_BT_CONN */
2198 #endif /* defined(CONFIG_BT_PER_ADV_SYNC) */
2199 #endif /* defined(CONFIG_BT_EXT_ADV) */
2200 #if defined(CONFIG_BT_ISO_UNICAST)
2201 EVENT_HANDLER(BT_HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished,
2202 sizeof(struct bt_hci_evt_le_cis_established)),
2203 EVENT_HANDLER(BT_HCI_EVT_LE_CIS_REQ, hci_le_cis_req,
2204 sizeof(struct bt_hci_evt_le_cis_req)),
2205 #endif /* (CONFIG_BT_ISO_UNICAST) */
2206 #if defined(CONFIG_BT_ISO_BROADCAST)
2207 EVENT_HANDLER(BT_HCI_EVT_LE_BIG_COMPLETE,
2208 hci_le_big_complete,
2209 sizeof(struct bt_hci_evt_le_big_complete)),
2210 EVENT_HANDLER(BT_HCI_EVT_LE_BIG_TERMINATE,
2211 hci_le_big_terminate,
2212 sizeof(struct bt_hci_evt_le_big_terminate)),
2213 EVENT_HANDLER(BT_HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
2214 hci_le_big_sync_established,
2215 sizeof(struct bt_hci_evt_le_big_sync_established)),
2216 EVENT_HANDLER(BT_HCI_EVT_LE_BIG_SYNC_LOST,
2217 hci_le_big_sync_lost,
2218 sizeof(struct bt_hci_evt_le_big_sync_lost)),
2219 EVENT_HANDLER(BT_HCI_EVT_LE_BIGINFO_ADV_REPORT,
2220 bt_hci_le_biginfo_adv_report,
2221 sizeof(struct bt_hci_evt_le_biginfo_adv_report)),
2222 #endif /* (CONFIG_BT_ISO_BROADCAST) */
2223 #if defined(CONFIG_BT_DF_CONNECTIONLESS_CTE_RX)
2224 EVENT_HANDLER(BT_HCI_EVT_LE_CONNECTIONLESS_IQ_REPORT, bt_hci_le_df_connectionless_iq_report,
2225 sizeof(struct bt_hci_evt_le_connectionless_iq_report)),
2226 #endif /* CONFIG_BT_DF_CONNECTIONLESS_CTE_RX */
2227 };
2228
hci_le_meta_event(struct net_buf * buf)2229 static void hci_le_meta_event(struct net_buf *buf)
2230 {
2231 struct bt_hci_evt_le_meta_event *evt;
2232
2233 evt = net_buf_pull_mem(buf, sizeof(*evt));
2234
2235 BT_DBG("subevent 0x%02x", evt->subevent);
2236
2237 handle_event(evt->subevent, buf, meta_events, ARRAY_SIZE(meta_events));
2238 }
2239
2240 static const struct event_handler normal_events[] = {
2241 EVENT_HANDLER(BT_HCI_EVT_VENDOR, hci_vendor_event,
2242 sizeof(struct bt_hci_evt_vs)),
2243 EVENT_HANDLER(BT_HCI_EVT_LE_META_EVENT, hci_le_meta_event,
2244 sizeof(struct bt_hci_evt_le_meta_event)),
2245 #if defined(CONFIG_BT_BREDR)
2246 EVENT_HANDLER(BT_HCI_EVT_CONN_REQUEST, bt_hci_conn_req,
2247 sizeof(struct bt_hci_evt_conn_request)),
2248 EVENT_HANDLER(BT_HCI_EVT_CONN_COMPLETE, bt_hci_conn_complete,
2249 sizeof(struct bt_hci_evt_conn_complete)),
2250 EVENT_HANDLER(BT_HCI_EVT_PIN_CODE_REQ, bt_hci_pin_code_req,
2251 sizeof(struct bt_hci_evt_pin_code_req)),
2252 EVENT_HANDLER(BT_HCI_EVT_LINK_KEY_NOTIFY, bt_hci_link_key_notify,
2253 sizeof(struct bt_hci_evt_link_key_notify)),
2254 EVENT_HANDLER(BT_HCI_EVT_LINK_KEY_REQ, bt_hci_link_key_req,
2255 sizeof(struct bt_hci_evt_link_key_req)),
2256 EVENT_HANDLER(BT_HCI_EVT_IO_CAPA_RESP, bt_hci_io_capa_resp,
2257 sizeof(struct bt_hci_evt_io_capa_resp)),
2258 EVENT_HANDLER(BT_HCI_EVT_IO_CAPA_REQ, bt_hci_io_capa_req,
2259 sizeof(struct bt_hci_evt_io_capa_req)),
2260 EVENT_HANDLER(BT_HCI_EVT_SSP_COMPLETE, bt_hci_ssp_complete,
2261 sizeof(struct bt_hci_evt_ssp_complete)),
2262 EVENT_HANDLER(BT_HCI_EVT_USER_CONFIRM_REQ, bt_hci_user_confirm_req,
2263 sizeof(struct bt_hci_evt_user_confirm_req)),
2264 EVENT_HANDLER(BT_HCI_EVT_USER_PASSKEY_NOTIFY,
2265 bt_hci_user_passkey_notify,
2266 sizeof(struct bt_hci_evt_user_passkey_notify)),
2267 EVENT_HANDLER(BT_HCI_EVT_USER_PASSKEY_REQ, bt_hci_user_passkey_req,
2268 sizeof(struct bt_hci_evt_user_passkey_req)),
2269 EVENT_HANDLER(BT_HCI_EVT_INQUIRY_COMPLETE, bt_hci_inquiry_complete,
2270 sizeof(struct bt_hci_evt_inquiry_complete)),
2271 EVENT_HANDLER(BT_HCI_EVT_INQUIRY_RESULT_WITH_RSSI,
2272 bt_hci_inquiry_result_with_rssi,
2273 sizeof(struct bt_hci_evt_inquiry_result_with_rssi)),
2274 EVENT_HANDLER(BT_HCI_EVT_EXTENDED_INQUIRY_RESULT,
2275 bt_hci_extended_inquiry_result,
2276 sizeof(struct bt_hci_evt_extended_inquiry_result)),
2277 EVENT_HANDLER(BT_HCI_EVT_REMOTE_NAME_REQ_COMPLETE,
2278 bt_hci_remote_name_request_complete,
2279 sizeof(struct bt_hci_evt_remote_name_req_complete)),
2280 EVENT_HANDLER(BT_HCI_EVT_AUTH_COMPLETE, bt_hci_auth_complete,
2281 sizeof(struct bt_hci_evt_auth_complete)),
2282 EVENT_HANDLER(BT_HCI_EVT_REMOTE_FEATURES,
2283 bt_hci_read_remote_features_complete,
2284 sizeof(struct bt_hci_evt_remote_features)),
2285 EVENT_HANDLER(BT_HCI_EVT_REMOTE_EXT_FEATURES,
2286 bt_hci_read_remote_ext_features_complete,
2287 sizeof(struct bt_hci_evt_remote_ext_features)),
2288 EVENT_HANDLER(BT_HCI_EVT_ROLE_CHANGE, bt_hci_role_change,
2289 sizeof(struct bt_hci_evt_role_change)),
2290 EVENT_HANDLER(BT_HCI_EVT_SYNC_CONN_COMPLETE, bt_hci_synchronous_conn_complete,
2291 sizeof(struct bt_hci_evt_sync_conn_complete)),
2292 #endif /* CONFIG_BT_BREDR */
2293 #if defined(CONFIG_BT_CONN)
2294 EVENT_HANDLER(BT_HCI_EVT_DISCONN_COMPLETE, hci_disconn_complete,
2295 sizeof(struct bt_hci_evt_disconn_complete)),
2296 #endif /* CONFIG_BT_CONN */
2297 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_BREDR)
2298 EVENT_HANDLER(BT_HCI_EVT_ENCRYPT_CHANGE, hci_encrypt_change,
2299 sizeof(struct bt_hci_evt_encrypt_change)),
2300 EVENT_HANDLER(BT_HCI_EVT_ENCRYPT_KEY_REFRESH_COMPLETE,
2301 hci_encrypt_key_refresh_complete,
2302 sizeof(struct bt_hci_evt_encrypt_key_refresh_complete)),
2303 #endif /* CONFIG_BT_SMP || CONFIG_BT_BREDR */
2304 #if defined(CONFIG_BT_REMOTE_VERSION)
2305 EVENT_HANDLER(BT_HCI_EVT_REMOTE_VERSION_INFO,
2306 bt_hci_evt_read_remote_version_complete,
2307 sizeof(struct bt_hci_evt_remote_version_info)),
2308 #endif /* CONFIG_BT_REMOTE_VERSION */
2309 EVENT_HANDLER(BT_HCI_EVT_HARDWARE_ERROR, hci_hardware_error,
2310 sizeof(struct bt_hci_evt_hardware_error)),
2311 };
2312
hci_event(struct net_buf * buf)2313 static void hci_event(struct net_buf *buf)
2314 {
2315 struct bt_hci_evt_hdr *hdr;
2316
2317 BT_ASSERT(buf->len >= sizeof(*hdr));
2318
2319 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2320 BT_DBG("event 0x%02x", hdr->evt);
2321 BT_ASSERT(bt_hci_evt_get_flags(hdr->evt) & BT_HCI_EVT_FLAG_RECV);
2322
2323 handle_event(hdr->evt, buf, normal_events, ARRAY_SIZE(normal_events));
2324
2325 net_buf_unref(buf);
2326 }
2327
send_cmd(void)2328 static void send_cmd(void)
2329 {
2330 struct net_buf *buf;
2331 int err;
2332
2333 /* Get next command */
2334 BT_DBG("calling net_buf_get");
2335 buf = net_buf_get(&bt_dev.cmd_tx_queue, K_NO_WAIT);
2336 BT_ASSERT(buf);
2337
2338 /* Wait until ncmd > 0 */
2339 BT_DBG("calling sem_take_wait");
2340 k_sem_take(&bt_dev.ncmd_sem, K_FOREVER);
2341
2342 /* Clear out any existing sent command */
2343 if (bt_dev.sent_cmd) {
2344 BT_ERR("Uncleared pending sent_cmd");
2345 net_buf_unref(bt_dev.sent_cmd);
2346 bt_dev.sent_cmd = NULL;
2347 }
2348
2349 bt_dev.sent_cmd = net_buf_ref(buf);
2350
2351 BT_DBG("Sending command 0x%04x (buf %p) to driver",
2352 cmd(buf)->opcode, buf);
2353
2354 err = bt_send(buf);
2355 if (err) {
2356 BT_ERR("Unable to send to driver (err %d)", err);
2357 k_sem_give(&bt_dev.ncmd_sem);
2358 hci_cmd_done(cmd(buf)->opcode, BT_HCI_ERR_UNSPECIFIED, buf);
2359 net_buf_unref(bt_dev.sent_cmd);
2360 bt_dev.sent_cmd = NULL;
2361 net_buf_unref(buf);
2362 }
2363 }
2364
process_events(struct k_poll_event * ev,int count)2365 static void process_events(struct k_poll_event *ev, int count)
2366 {
2367 BT_DBG("count %d", count);
2368
2369 for (; count; ev++, count--) {
2370 BT_DBG("ev->state %u", ev->state);
2371
2372 switch (ev->state) {
2373 case K_POLL_STATE_SIGNALED:
2374 break;
2375 case K_POLL_STATE_SEM_AVAILABLE:
2376 /* After this fn is exec'd, `bt_conn_prepare_events()`
2377 * will be called once again, and this time buffers will
2378 * be available, so the FIFO will be added to the poll
2379 * list instead of the ctlr buffers semaphore.
2380 */
2381 break;
2382 case K_POLL_STATE_FIFO_DATA_AVAILABLE:
2383 if (ev->tag == BT_EVENT_CMD_TX) {
2384 send_cmd();
2385 } else if (IS_ENABLED(CONFIG_BT_CONN) ||
2386 IS_ENABLED(CONFIG_BT_ISO)) {
2387 struct bt_conn *conn;
2388
2389 if (ev->tag == BT_EVENT_CONN_TX_QUEUE) {
2390 conn = CONTAINER_OF(ev->fifo,
2391 struct bt_conn,
2392 tx_queue);
2393 bt_conn_process_tx(conn);
2394 }
2395 }
2396 break;
2397 case K_POLL_STATE_NOT_READY:
2398 break;
2399 default:
2400 BT_WARN("Unexpected k_poll event state %u", ev->state);
2401 break;
2402 }
2403 }
2404 }
2405
2406 #if defined(CONFIG_BT_CONN)
2407 #if defined(CONFIG_BT_ISO)
2408 /* command FIFO + conn_change signal + MAX_CONN + ISO_MAX_CHAN */
2409 #define EV_COUNT (2 + CONFIG_BT_MAX_CONN + CONFIG_BT_ISO_MAX_CHAN)
2410 #else
2411 /* command FIFO + conn_change signal + MAX_CONN */
2412 #define EV_COUNT (2 + CONFIG_BT_MAX_CONN)
2413 #endif /* CONFIG_BT_ISO */
2414 #else
2415 #if defined(CONFIG_BT_ISO)
2416 /* command FIFO + conn_change signal + ISO_MAX_CHAN */
2417 #define EV_COUNT (2 + CONFIG_BT_ISO_MAX_CHAN)
2418 #else
2419 /* command FIFO */
2420 #define EV_COUNT 1
2421 #endif /* CONFIG_BT_ISO */
2422 #endif /* CONFIG_BT_CONN */
2423
hci_tx_thread(void * p1,void * p2,void * p3)2424 static void hci_tx_thread(void *p1, void *p2, void *p3)
2425 {
2426 static struct k_poll_event events[EV_COUNT] = {
2427 K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE,
2428 K_POLL_MODE_NOTIFY_ONLY,
2429 &bt_dev.cmd_tx_queue,
2430 BT_EVENT_CMD_TX),
2431 };
2432
2433 BT_DBG("Started");
2434
2435 while (1) {
2436 int ev_count, err;
2437
2438 events[0].state = K_POLL_STATE_NOT_READY;
2439 ev_count = 1;
2440
2441 /* This adds the FIFO per-connection */
2442 if (IS_ENABLED(CONFIG_BT_CONN) || IS_ENABLED(CONFIG_BT_ISO)) {
2443 ev_count += bt_conn_prepare_events(&events[1]);
2444 }
2445
2446 BT_DBG("Calling k_poll with %d events", ev_count);
2447
2448 err = k_poll(events, ev_count, K_FOREVER);
2449 BT_ASSERT(err == 0);
2450
2451 process_events(events, ev_count);
2452
2453 /* Make sure we don't hog the CPU if there's all the time
2454 * some ready events.
2455 */
2456 k_yield();
2457 }
2458 }
2459
2460
read_local_ver_complete(struct net_buf * buf)2461 static void read_local_ver_complete(struct net_buf *buf)
2462 {
2463 struct bt_hci_rp_read_local_version_info *rp = (void *)buf->data;
2464
2465 BT_DBG("status 0x%02x", rp->status);
2466
2467 bt_dev.hci_version = rp->hci_version;
2468 bt_dev.hci_revision = sys_le16_to_cpu(rp->hci_revision);
2469 bt_dev.lmp_version = rp->lmp_version;
2470 bt_dev.lmp_subversion = sys_le16_to_cpu(rp->lmp_subversion);
2471 bt_dev.manufacturer = sys_le16_to_cpu(rp->manufacturer);
2472 }
2473
read_le_features_complete(struct net_buf * buf)2474 static void read_le_features_complete(struct net_buf *buf)
2475 {
2476 struct bt_hci_rp_le_read_local_features *rp = (void *)buf->data;
2477
2478 BT_DBG("status 0x%02x", rp->status);
2479
2480 memcpy(bt_dev.le.features, rp->features, sizeof(bt_dev.le.features));
2481 }
2482
2483 #if defined(CONFIG_BT_CONN)
2484 #if !defined(CONFIG_BT_BREDR)
read_buffer_size_complete(struct net_buf * buf)2485 static void read_buffer_size_complete(struct net_buf *buf)
2486 {
2487 struct bt_hci_rp_read_buffer_size *rp = (void *)buf->data;
2488 uint16_t pkts;
2489
2490 BT_DBG("status 0x%02x", rp->status);
2491
2492 /* If LE-side has buffers we can ignore the BR/EDR values */
2493 if (bt_dev.le.acl_mtu) {
2494 return;
2495 }
2496
2497 bt_dev.le.acl_mtu = sys_le16_to_cpu(rp->acl_max_len);
2498 pkts = sys_le16_to_cpu(rp->acl_max_num);
2499
2500 BT_DBG("ACL BR/EDR buffers: pkts %u mtu %u", pkts, bt_dev.le.acl_mtu);
2501
2502 k_sem_init(&bt_dev.le.acl_pkts, pkts, pkts);
2503 }
2504 #endif /* !defined(CONFIG_BT_BREDR) */
2505 #endif /* CONFIG_BT_CONN */
2506
le_read_buffer_size_complete(struct net_buf * buf)2507 static void le_read_buffer_size_complete(struct net_buf *buf)
2508 {
2509 struct bt_hci_rp_le_read_buffer_size *rp = (void *)buf->data;
2510
2511 BT_DBG("status 0x%02x", rp->status);
2512
2513 #if defined(CONFIG_BT_CONN)
2514 uint16_t acl_mtu = sys_le16_to_cpu(rp->le_max_len);
2515
2516 if (!acl_mtu || !rp->le_max_num) {
2517 return;
2518 }
2519
2520 bt_dev.le.acl_mtu = acl_mtu;
2521
2522 BT_DBG("ACL LE buffers: pkts %u mtu %u", rp->le_max_num, bt_dev.le.acl_mtu);
2523
2524 k_sem_init(&bt_dev.le.acl_pkts, rp->le_max_num, rp->le_max_num);
2525 #endif /* CONFIG_BT_CONN */
2526 }
2527
read_buffer_size_v2_complete(struct net_buf * buf)2528 static void read_buffer_size_v2_complete(struct net_buf *buf)
2529 {
2530 #if defined(CONFIG_BT_ISO)
2531 struct bt_hci_rp_le_read_buffer_size_v2 *rp = (void *)buf->data;
2532
2533 BT_DBG("status %u", rp->status);
2534
2535 #if defined(CONFIG_BT_CONN)
2536 uint16_t acl_mtu = sys_le16_to_cpu(rp->acl_max_len);
2537
2538 if (acl_mtu && rp->acl_max_num) {
2539 bt_dev.le.acl_mtu = acl_mtu;
2540 LOG_DBG("ACL LE buffers: pkts %u mtu %u", rp->acl_max_num, bt_dev.le.acl_mtu);
2541
2542 k_sem_init(&bt_dev.le.acl_pkts, rp->acl_max_num, rp->acl_max_num);
2543 }
2544 #endif /* CONFIG_BT_CONN */
2545
2546 uint16_t iso_mtu = sys_le16_to_cpu(rp->iso_max_len);
2547
2548 if (!iso_mtu || !rp->iso_max_num) {
2549 BT_ERR("ISO buffer size not set");
2550 return;
2551 }
2552
2553 bt_dev.le.iso_mtu = iso_mtu;
2554
2555 BT_DBG("ISO buffers: pkts %u mtu %u", rp->iso_max_num, bt_dev.le.iso_mtu);
2556
2557 k_sem_init(&bt_dev.le.iso_pkts, rp->iso_max_num, rp->iso_max_num);
2558 #endif /* CONFIG_BT_ISO */
2559 }
2560
le_set_host_feature(uint8_t bit_number,uint8_t bit_value)2561 static int le_set_host_feature(uint8_t bit_number, uint8_t bit_value)
2562 {
2563 struct bt_hci_cp_le_set_host_feature *cp;
2564 struct net_buf *buf;
2565
2566 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_HOST_FEATURE, sizeof(*cp));
2567 if (!buf) {
2568 return -ENOBUFS;
2569 }
2570
2571 cp = net_buf_add(buf, sizeof(*cp));
2572 cp->bit_number = bit_number;
2573 cp->bit_value = bit_value;
2574
2575 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_HOST_FEATURE, buf, NULL);
2576 }
2577
read_supported_commands_complete(struct net_buf * buf)2578 static void read_supported_commands_complete(struct net_buf *buf)
2579 {
2580 struct bt_hci_rp_read_supported_commands *rp = (void *)buf->data;
2581
2582 BT_DBG("status 0x%02x", rp->status);
2583
2584 memcpy(bt_dev.supported_commands, rp->commands,
2585 sizeof(bt_dev.supported_commands));
2586
2587 /* Report additional HCI commands used for ECDH as
2588 * supported if TinyCrypt ECC is used for emulation.
2589 */
2590 if (IS_ENABLED(CONFIG_BT_TINYCRYPT_ECC)) {
2591 bt_hci_ecc_supported_commands(bt_dev.supported_commands);
2592 }
2593 }
2594
read_local_features_complete(struct net_buf * buf)2595 static void read_local_features_complete(struct net_buf *buf)
2596 {
2597 struct bt_hci_rp_read_local_features *rp = (void *)buf->data;
2598
2599 BT_DBG("status 0x%02x", rp->status);
2600
2601 memcpy(bt_dev.features[0], rp->features, sizeof(bt_dev.features[0]));
2602 }
2603
le_read_supp_states_complete(struct net_buf * buf)2604 static void le_read_supp_states_complete(struct net_buf *buf)
2605 {
2606 struct bt_hci_rp_le_read_supp_states *rp = (void *)buf->data;
2607
2608 BT_DBG("status 0x%02x", rp->status);
2609
2610 bt_dev.le.states = sys_get_le64(rp->le_states);
2611 }
2612
2613 #if defined(CONFIG_BT_SMP)
le_read_resolving_list_size_complete(struct net_buf * buf)2614 static void le_read_resolving_list_size_complete(struct net_buf *buf)
2615 {
2616 struct bt_hci_rp_le_read_rl_size *rp = (void *)buf->data;
2617
2618 BT_DBG("Resolving List size %u", rp->rl_size);
2619
2620 bt_dev.le.rl_size = rp->rl_size;
2621 }
2622 #endif /* defined(CONFIG_BT_SMP) */
2623
common_init(void)2624 static int common_init(void)
2625 {
2626 struct net_buf *rsp;
2627 int err;
2628
2629 if (!(bt_dev.drv->quirks & BT_QUIRK_NO_RESET)) {
2630 /* Send HCI_RESET */
2631 err = bt_hci_cmd_send_sync(BT_HCI_OP_RESET, NULL, &rsp);
2632 if (err) {
2633 return err;
2634 }
2635 hci_reset_complete(rsp);
2636 net_buf_unref(rsp);
2637 }
2638
2639 /* Read Local Supported Features */
2640 err = bt_hci_cmd_send_sync(BT_HCI_OP_READ_LOCAL_FEATURES, NULL, &rsp);
2641 if (err) {
2642 return err;
2643 }
2644 read_local_features_complete(rsp);
2645 net_buf_unref(rsp);
2646
2647 /* Read Local Version Information */
2648 err = bt_hci_cmd_send_sync(BT_HCI_OP_READ_LOCAL_VERSION_INFO, NULL,
2649 &rsp);
2650 if (err) {
2651 return err;
2652 }
2653 read_local_ver_complete(rsp);
2654 net_buf_unref(rsp);
2655
2656 /* Read Local Supported Commands */
2657 err = bt_hci_cmd_send_sync(BT_HCI_OP_READ_SUPPORTED_COMMANDS, NULL,
2658 &rsp);
2659 if (err) {
2660 return err;
2661 }
2662 read_supported_commands_complete(rsp);
2663 net_buf_unref(rsp);
2664
2665 if (IS_ENABLED(CONFIG_BT_HOST_CRYPTO)) {
2666 /* Initialize the PRNG so that it is safe to use it later
2667 * on in the initialization process.
2668 */
2669 err = prng_init();
2670 if (err) {
2671 return err;
2672 }
2673 }
2674
2675 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
2676 err = set_flow_control();
2677 if (err) {
2678 return err;
2679 }
2680 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
2681
2682 return 0;
2683 }
2684
le_set_event_mask(void)2685 static int le_set_event_mask(void)
2686 {
2687 struct bt_hci_cp_le_set_event_mask *cp_mask;
2688 struct net_buf *buf;
2689 uint64_t mask = 0U;
2690
2691 /* Set LE event mask */
2692 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_EVENT_MASK, sizeof(*cp_mask));
2693 if (!buf) {
2694 return -ENOBUFS;
2695 }
2696
2697 cp_mask = net_buf_add(buf, sizeof(*cp_mask));
2698
2699 mask |= BT_EVT_MASK_LE_ADVERTISING_REPORT;
2700
2701 if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
2702 BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
2703 mask |= BT_EVT_MASK_LE_ADV_SET_TERMINATED;
2704 mask |= BT_EVT_MASK_LE_SCAN_REQ_RECEIVED;
2705 mask |= BT_EVT_MASK_LE_EXT_ADVERTISING_REPORT;
2706 mask |= BT_EVT_MASK_LE_SCAN_TIMEOUT;
2707 if (IS_ENABLED(CONFIG_BT_PER_ADV_SYNC)) {
2708 mask |= BT_EVT_MASK_LE_PER_ADV_SYNC_ESTABLISHED;
2709 mask |= BT_EVT_MASK_LE_PER_ADVERTISING_REPORT;
2710 mask |= BT_EVT_MASK_LE_PER_ADV_SYNC_LOST;
2711 mask |= BT_EVT_MASK_LE_PAST_RECEIVED;
2712 }
2713 }
2714
2715 if (IS_ENABLED(CONFIG_BT_CONN)) {
2716 if ((IS_ENABLED(CONFIG_BT_SMP) &&
2717 BT_FEAT_LE_PRIVACY(bt_dev.le.features)) ||
2718 (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
2719 BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
2720 /* C24:
2721 * Mandatory if the LE Controller supports Connection
2722 * State and either LE Feature (LL Privacy) or
2723 * LE Feature (Extended Advertising) is supported, ...
2724 */
2725 mask |= BT_EVT_MASK_LE_ENH_CONN_COMPLETE;
2726 } else {
2727 mask |= BT_EVT_MASK_LE_CONN_COMPLETE;
2728 }
2729
2730 mask |= BT_EVT_MASK_LE_CONN_UPDATE_COMPLETE;
2731 mask |= BT_EVT_MASK_LE_REMOTE_FEAT_COMPLETE;
2732
2733 if (BT_FEAT_LE_CONN_PARAM_REQ_PROC(bt_dev.le.features)) {
2734 mask |= BT_EVT_MASK_LE_CONN_PARAM_REQ;
2735 }
2736
2737 if (IS_ENABLED(CONFIG_BT_DATA_LEN_UPDATE) &&
2738 BT_FEAT_LE_DLE(bt_dev.le.features)) {
2739 mask |= BT_EVT_MASK_LE_DATA_LEN_CHANGE;
2740 }
2741
2742 if (IS_ENABLED(CONFIG_BT_PHY_UPDATE) &&
2743 (BT_FEAT_LE_PHY_2M(bt_dev.le.features) ||
2744 BT_FEAT_LE_PHY_CODED(bt_dev.le.features))) {
2745 mask |= BT_EVT_MASK_LE_PHY_UPDATE_COMPLETE;
2746 }
2747 }
2748
2749 if (IS_ENABLED(CONFIG_BT_SMP) &&
2750 BT_FEAT_LE_ENCR(bt_dev.le.features)) {
2751 mask |= BT_EVT_MASK_LE_LTK_REQUEST;
2752 }
2753
2754 /*
2755 * If "LE Read Local P-256 Public Key" and "LE Generate DH Key" are
2756 * supported we need to enable events generated by those commands.
2757 */
2758 if (IS_ENABLED(CONFIG_BT_ECC) &&
2759 (BT_CMD_TEST(bt_dev.supported_commands, 34, 1)) &&
2760 (BT_CMD_TEST(bt_dev.supported_commands, 34, 2))) {
2761 mask |= BT_EVT_MASK_LE_P256_PUBLIC_KEY_COMPLETE;
2762 mask |= BT_EVT_MASK_LE_GENERATE_DHKEY_COMPLETE;
2763 }
2764
2765 /*
2766 * Enable CIS events only if ISO connections are enabled and controller
2767 * support them.
2768 */
2769 if (IS_ENABLED(CONFIG_BT_ISO) &&
2770 BT_FEAT_LE_CIS(bt_dev.le.features)) {
2771 mask |= BT_EVT_MASK_LE_CIS_ESTABLISHED;
2772 if (BT_FEAT_LE_CIS_PERIPHERAL(bt_dev.le.features)) {
2773 mask |= BT_EVT_MASK_LE_CIS_REQ;
2774 }
2775 }
2776
2777 /* Enable BIS events for broadcaster and/or receiver */
2778 if (IS_ENABLED(CONFIG_BT_ISO_BROADCAST) &&
2779 BT_FEAT_LE_BIS(bt_dev.le.features)) {
2780 if (BT_FEAT_LE_ISO_BROADCASTER(bt_dev.le.features)) {
2781 mask |= BT_EVT_MASK_LE_BIG_COMPLETE;
2782 mask |= BT_EVT_MASK_LE_BIG_TERMINATED;
2783 }
2784 if (BT_FEAT_LE_SYNC_RECEIVER(bt_dev.le.features)) {
2785 mask |= BT_EVT_MASK_LE_BIG_SYNC_ESTABLISHED;
2786 mask |= BT_EVT_MASK_LE_BIG_SYNC_LOST;
2787 mask |= BT_EVT_MASK_LE_BIGINFO_ADV_REPORT;
2788 }
2789 }
2790
2791 /* Enable IQ samples report events receiver */
2792 if (IS_ENABLED(CONFIG_BT_DF_CONNECTIONLESS_CTE_RX)) {
2793 mask |= BT_EVT_MASK_LE_CONNECTIONLESS_IQ_REPORT;
2794 }
2795
2796 if (IS_ENABLED(CONFIG_BT_DF_CONNECTION_CTE_RX)) {
2797 mask |= BT_EVT_MASK_LE_CONNECTION_IQ_REPORT;
2798 mask |= BT_EVT_MASK_LE_CTE_REQUEST_FAILED;
2799 }
2800
2801 sys_put_le64(mask, cp_mask->events);
2802 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_EVENT_MASK, buf, NULL);
2803 }
2804
le_init_iso(void)2805 static int le_init_iso(void)
2806 {
2807 int err;
2808 struct net_buf *rsp;
2809
2810 /* Set Isochronus Channels - Host support */
2811 err = le_set_host_feature(BT_LE_FEAT_BIT_ISO_CHANNELS, 1);
2812 if (err) {
2813 return err;
2814 }
2815
2816 /* Octet 41, bit 5 is read buffer size V2 */
2817 if (BT_CMD_TEST(bt_dev.supported_commands, 41, 5)) {
2818 /* Read ISO Buffer Size V2 */
2819 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_BUFFER_SIZE_V2,
2820 NULL, &rsp);
2821 if (err) {
2822 return err;
2823 }
2824
2825 read_buffer_size_v2_complete(rsp);
2826
2827 net_buf_unref(rsp);
2828 } else if (IS_ENABLED(CONFIG_BT_CONN)) {
2829 BT_WARN("Read Buffer Size V2 command is not supported."
2830 "No ISO buffers will be available");
2831
2832 /* Read LE Buffer Size */
2833 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_BUFFER_SIZE,
2834 NULL, &rsp);
2835 if (err) {
2836 return err;
2837 }
2838
2839 le_read_buffer_size_complete(rsp);
2840
2841 net_buf_unref(rsp);
2842 }
2843
2844 return 0;
2845 }
2846
le_init(void)2847 static int le_init(void)
2848 {
2849 struct bt_hci_cp_write_le_host_supp *cp_le;
2850 struct net_buf *buf, *rsp;
2851 int err;
2852
2853 /* For now we only support LE capable controllers */
2854 if (!BT_FEAT_LE(bt_dev.features)) {
2855 BT_ERR("Non-LE capable controller detected!");
2856 return -ENODEV;
2857 }
2858
2859 /* Read Low Energy Supported Features */
2860 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_LOCAL_FEATURES, NULL,
2861 &rsp);
2862 if (err) {
2863 return err;
2864 }
2865
2866 read_le_features_complete(rsp);
2867 net_buf_unref(rsp);
2868
2869 if (IS_ENABLED(CONFIG_BT_ISO) &&
2870 BT_FEAT_LE_ISO(bt_dev.le.features)) {
2871 err = le_init_iso();
2872 if (err) {
2873 return err;
2874 }
2875 } else if (IS_ENABLED(CONFIG_BT_CONN)) {
2876 /* Read LE Buffer Size */
2877 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_BUFFER_SIZE,
2878 NULL, &rsp);
2879 if (err) {
2880 return err;
2881 }
2882
2883 le_read_buffer_size_complete(rsp);
2884
2885 net_buf_unref(rsp);
2886 }
2887
2888 if (BT_FEAT_BREDR(bt_dev.features)) {
2889 buf = bt_hci_cmd_create(BT_HCI_OP_LE_WRITE_LE_HOST_SUPP,
2890 sizeof(*cp_le));
2891 if (!buf) {
2892 return -ENOBUFS;
2893 }
2894
2895 cp_le = net_buf_add(buf, sizeof(*cp_le));
2896
2897 /* Explicitly enable LE for dual-mode controllers */
2898 cp_le->le = 0x01;
2899 cp_le->simul = 0x00;
2900 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_WRITE_LE_HOST_SUPP, buf,
2901 NULL);
2902 if (err) {
2903 return err;
2904 }
2905 }
2906
2907 /* Read LE Supported States */
2908 if (BT_CMD_LE_STATES(bt_dev.supported_commands)) {
2909 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_SUPP_STATES, NULL,
2910 &rsp);
2911 if (err) {
2912 return err;
2913 }
2914
2915 le_read_supp_states_complete(rsp);
2916 net_buf_unref(rsp);
2917 }
2918
2919 if (IS_ENABLED(CONFIG_BT_CONN) &&
2920 IS_ENABLED(CONFIG_BT_DATA_LEN_UPDATE) &&
2921 IS_ENABLED(CONFIG_BT_AUTO_DATA_LEN_UPDATE) &&
2922 BT_FEAT_LE_DLE(bt_dev.le.features)) {
2923 struct bt_hci_cp_le_write_default_data_len *cp;
2924 uint16_t tx_octets, tx_time;
2925
2926 err = hci_le_read_max_data_len(&tx_octets, &tx_time);
2927 if (err) {
2928 return err;
2929 }
2930
2931 buf = bt_hci_cmd_create(BT_HCI_OP_LE_WRITE_DEFAULT_DATA_LEN,
2932 sizeof(*cp));
2933 if (!buf) {
2934 return -ENOBUFS;
2935 }
2936
2937 cp = net_buf_add(buf, sizeof(*cp));
2938 cp->max_tx_octets = sys_cpu_to_le16(tx_octets);
2939 cp->max_tx_time = sys_cpu_to_le16(tx_time);
2940
2941 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_WRITE_DEFAULT_DATA_LEN,
2942 buf, NULL);
2943 if (err) {
2944 return err;
2945 }
2946 }
2947
2948 #if defined(CONFIG_BT_SMP)
2949 if (BT_FEAT_LE_PRIVACY(bt_dev.le.features)) {
2950 #if defined(CONFIG_BT_PRIVACY)
2951 struct bt_hci_cp_le_set_rpa_timeout *cp;
2952
2953 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_RPA_TIMEOUT,
2954 sizeof(*cp));
2955 if (!buf) {
2956 return -ENOBUFS;
2957 }
2958
2959 cp = net_buf_add(buf, sizeof(*cp));
2960 cp->rpa_timeout = sys_cpu_to_le16(CONFIG_BT_RPA_TIMEOUT);
2961 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_RPA_TIMEOUT, buf,
2962 NULL);
2963 if (err) {
2964 return err;
2965 }
2966 #endif /* defined(CONFIG_BT_PRIVACY) */
2967
2968 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_RL_SIZE, NULL,
2969 &rsp);
2970 if (err) {
2971 return err;
2972 }
2973 le_read_resolving_list_size_complete(rsp);
2974 net_buf_unref(rsp);
2975 }
2976 #endif
2977
2978 #if IS_ENABLED(CONFIG_BT_DF)
2979 if (BT_FEAT_LE_CONNECTIONLESS_CTE_TX(bt_dev.le.features) ||
2980 BT_FEAT_LE_CONNECTIONLESS_CTE_RX(bt_dev.le.features)) {
2981 err = le_df_init();
2982 if (err) {
2983 return err;
2984 }
2985 }
2986 #endif /* CONFIG_BT_DF */
2987
2988 return le_set_event_mask();
2989 }
2990
2991 #if !defined(CONFIG_BT_BREDR)
bt_br_init(void)2992 static int bt_br_init(void)
2993 {
2994 #if defined(CONFIG_BT_CONN)
2995 struct net_buf *rsp;
2996 int err;
2997
2998 if (bt_dev.le.acl_mtu) {
2999 return 0;
3000 }
3001
3002 /* Use BR/EDR buffer size if LE reports zero buffers */
3003 err = bt_hci_cmd_send_sync(BT_HCI_OP_READ_BUFFER_SIZE, NULL, &rsp);
3004 if (err) {
3005 return err;
3006 }
3007
3008 read_buffer_size_complete(rsp);
3009 net_buf_unref(rsp);
3010 #endif /* CONFIG_BT_CONN */
3011
3012 return 0;
3013 }
3014 #endif /* !defined(CONFIG_BT_BREDR) */
3015
set_event_mask(void)3016 static int set_event_mask(void)
3017 {
3018 struct bt_hci_cp_set_event_mask *ev;
3019 struct net_buf *buf;
3020 uint64_t mask = 0U;
3021
3022 buf = bt_hci_cmd_create(BT_HCI_OP_SET_EVENT_MASK, sizeof(*ev));
3023 if (!buf) {
3024 return -ENOBUFS;
3025 }
3026
3027 ev = net_buf_add(buf, sizeof(*ev));
3028
3029 if (IS_ENABLED(CONFIG_BT_BREDR)) {
3030 /* Since we require LE support, we can count on a
3031 * Bluetooth 4.0 feature set
3032 */
3033 mask |= BT_EVT_MASK_INQUIRY_COMPLETE;
3034 mask |= BT_EVT_MASK_CONN_COMPLETE;
3035 mask |= BT_EVT_MASK_CONN_REQUEST;
3036 mask |= BT_EVT_MASK_AUTH_COMPLETE;
3037 mask |= BT_EVT_MASK_REMOTE_NAME_REQ_COMPLETE;
3038 mask |= BT_EVT_MASK_REMOTE_FEATURES;
3039 mask |= BT_EVT_MASK_ROLE_CHANGE;
3040 mask |= BT_EVT_MASK_PIN_CODE_REQ;
3041 mask |= BT_EVT_MASK_LINK_KEY_REQ;
3042 mask |= BT_EVT_MASK_LINK_KEY_NOTIFY;
3043 mask |= BT_EVT_MASK_INQUIRY_RESULT_WITH_RSSI;
3044 mask |= BT_EVT_MASK_REMOTE_EXT_FEATURES;
3045 mask |= BT_EVT_MASK_SYNC_CONN_COMPLETE;
3046 mask |= BT_EVT_MASK_EXTENDED_INQUIRY_RESULT;
3047 mask |= BT_EVT_MASK_IO_CAPA_REQ;
3048 mask |= BT_EVT_MASK_IO_CAPA_RESP;
3049 mask |= BT_EVT_MASK_USER_CONFIRM_REQ;
3050 mask |= BT_EVT_MASK_USER_PASSKEY_REQ;
3051 mask |= BT_EVT_MASK_SSP_COMPLETE;
3052 mask |= BT_EVT_MASK_USER_PASSKEY_NOTIFY;
3053 }
3054
3055 mask |= BT_EVT_MASK_HARDWARE_ERROR;
3056 mask |= BT_EVT_MASK_DATA_BUFFER_OVERFLOW;
3057 mask |= BT_EVT_MASK_LE_META_EVENT;
3058
3059 if (IS_ENABLED(CONFIG_BT_CONN)) {
3060 mask |= BT_EVT_MASK_DISCONN_COMPLETE;
3061 mask |= BT_EVT_MASK_REMOTE_VERSION_INFO;
3062 }
3063
3064 if (IS_ENABLED(CONFIG_BT_SMP) &&
3065 BT_FEAT_LE_ENCR(bt_dev.le.features)) {
3066 mask |= BT_EVT_MASK_ENCRYPT_CHANGE;
3067 mask |= BT_EVT_MASK_ENCRYPT_KEY_REFRESH_COMPLETE;
3068 }
3069
3070 sys_put_le64(mask, ev->events);
3071 return bt_hci_cmd_send_sync(BT_HCI_OP_SET_EVENT_MASK, buf, NULL);
3072 }
3073
3074 #if defined(CONFIG_BT_DEBUG)
ver_str(uint8_t ver)3075 static const char *ver_str(uint8_t ver)
3076 {
3077 const char * const str[] = {
3078 "1.0b", "1.1", "1.2", "2.0", "2.1", "3.0", "4.0", "4.1", "4.2",
3079 "5.0", "5.1", "5.2", "5.3"
3080 };
3081
3082 if (ver < ARRAY_SIZE(str)) {
3083 return str[ver];
3084 }
3085
3086 return "unknown";
3087 }
3088
bt_dev_show_info(void)3089 static void bt_dev_show_info(void)
3090 {
3091 int i;
3092
3093 BT_INFO("Identity%s: %s", bt_dev.id_count > 1 ? "[0]" : "",
3094 bt_addr_le_str(&bt_dev.id_addr[0]));
3095
3096 if (IS_ENABLED(CONFIG_BT_LOG_SNIFFER_INFO)) {
3097 #if defined(CONFIG_BT_PRIVACY)
3098 uint8_t irk[16];
3099
3100 sys_memcpy_swap(irk, bt_dev.irk[0], 16);
3101 BT_INFO("IRK%s: 0x%s", bt_dev.id_count > 1 ? "[0]" : "",
3102 bt_hex(irk, 16));
3103 #endif
3104 }
3105
3106 for (i = 1; i < bt_dev.id_count; i++) {
3107 BT_INFO("Identity[%d]: %s",
3108 i, bt_addr_le_str(&bt_dev.id_addr[i]));
3109
3110 if (IS_ENABLED(CONFIG_BT_LOG_SNIFFER_INFO)) {
3111 #if defined(CONFIG_BT_PRIVACY)
3112 uint8_t irk[16];
3113
3114 sys_memcpy_swap(irk, bt_dev.irk[i], 16);
3115 BT_INFO("IRK[%d]: 0x%s", i, bt_hex(irk, 16));
3116 #endif
3117 }
3118 }
3119
3120 if (IS_ENABLED(CONFIG_BT_SMP) &&
3121 IS_ENABLED(CONFIG_BT_LOG_SNIFFER_INFO)) {
3122 bt_keys_foreach(BT_KEYS_ALL, bt_keys_show_sniffer_info, NULL);
3123 }
3124
3125 BT_INFO("HCI: version %s (0x%02x) revision 0x%04x, manufacturer 0x%04x",
3126 ver_str(bt_dev.hci_version), bt_dev.hci_version,
3127 bt_dev.hci_revision, bt_dev.manufacturer);
3128 BT_INFO("LMP: version %s (0x%02x) subver 0x%04x",
3129 ver_str(bt_dev.lmp_version), bt_dev.lmp_version,
3130 bt_dev.lmp_subversion);
3131 }
3132 #else
bt_dev_show_info(void)3133 static inline void bt_dev_show_info(void)
3134 {
3135 }
3136 #endif /* CONFIG_BT_DEBUG */
3137
3138 #if defined(CONFIG_BT_HCI_VS_EXT)
3139 #if defined(CONFIG_BT_DEBUG)
vs_hw_platform(uint16_t platform)3140 static const char *vs_hw_platform(uint16_t platform)
3141 {
3142 static const char * const plat_str[] = {
3143 "reserved", "Intel Corporation", "Nordic Semiconductor",
3144 "NXP Semiconductors" };
3145
3146 if (platform < ARRAY_SIZE(plat_str)) {
3147 return plat_str[platform];
3148 }
3149
3150 return "unknown";
3151 }
3152
vs_hw_variant(uint16_t platform,uint16_t variant)3153 static const char *vs_hw_variant(uint16_t platform, uint16_t variant)
3154 {
3155 static const char * const nordic_str[] = {
3156 "reserved", "nRF51x", "nRF52x", "nRF53x"
3157 };
3158
3159 if (platform != BT_HCI_VS_HW_PLAT_NORDIC) {
3160 return "unknown";
3161 }
3162
3163 if (variant < ARRAY_SIZE(nordic_str)) {
3164 return nordic_str[variant];
3165 }
3166
3167 return "unknown";
3168 }
3169
vs_fw_variant(uint8_t variant)3170 static const char *vs_fw_variant(uint8_t variant)
3171 {
3172 static const char * const var_str[] = {
3173 "Standard Bluetooth controller",
3174 "Vendor specific controller",
3175 "Firmware loader",
3176 "Rescue image",
3177 };
3178
3179 if (variant < ARRAY_SIZE(var_str)) {
3180 return var_str[variant];
3181 }
3182
3183 return "unknown";
3184 }
3185 #endif /* CONFIG_BT_DEBUG */
3186
hci_vs_init(void)3187 static void hci_vs_init(void)
3188 {
3189 union {
3190 struct bt_hci_rp_vs_read_version_info *info;
3191 struct bt_hci_rp_vs_read_supported_commands *cmds;
3192 struct bt_hci_rp_vs_read_supported_features *feat;
3193 } rp;
3194 struct net_buf *rsp;
3195 int err;
3196
3197 /* If heuristics is enabled, try to guess HCI VS support by looking
3198 * at the HCI version and identity address. We haven't set any addresses
3199 * at this point. So we need to read the public address.
3200 */
3201 if (IS_ENABLED(CONFIG_BT_HCI_VS_EXT_DETECT)) {
3202 bt_addr_le_t addr;
3203
3204 if ((bt_dev.hci_version < BT_HCI_VERSION_5_0) ||
3205 bt_id_read_public_addr(&addr)) {
3206 BT_WARN("Controller doesn't seem to support "
3207 "Zephyr vendor HCI");
3208 return;
3209 }
3210 }
3211
3212 err = bt_hci_cmd_send_sync(BT_HCI_OP_VS_READ_VERSION_INFO, NULL, &rsp);
3213 if (err) {
3214 BT_WARN("Vendor HCI extensions not available");
3215 return;
3216 }
3217
3218 if (IS_ENABLED(CONFIG_BT_HCI_VS_EXT_DETECT) &&
3219 rsp->len != sizeof(struct bt_hci_rp_vs_read_version_info)) {
3220 BT_WARN("Invalid Vendor HCI extensions");
3221 net_buf_unref(rsp);
3222 return;
3223 }
3224
3225 #if defined(CONFIG_BT_DEBUG)
3226 rp.info = (void *)rsp->data;
3227 BT_INFO("HW Platform: %s (0x%04x)",
3228 vs_hw_platform(sys_le16_to_cpu(rp.info->hw_platform)),
3229 sys_le16_to_cpu(rp.info->hw_platform));
3230 BT_INFO("HW Variant: %s (0x%04x)",
3231 vs_hw_variant(sys_le16_to_cpu(rp.info->hw_platform),
3232 sys_le16_to_cpu(rp.info->hw_variant)),
3233 sys_le16_to_cpu(rp.info->hw_variant));
3234 BT_INFO("Firmware: %s (0x%02x) Version %u.%u Build %u",
3235 vs_fw_variant(rp.info->fw_variant), rp.info->fw_variant,
3236 rp.info->fw_version, sys_le16_to_cpu(rp.info->fw_revision),
3237 sys_le32_to_cpu(rp.info->fw_build));
3238 #endif /* CONFIG_BT_DEBUG */
3239
3240 net_buf_unref(rsp);
3241
3242 err = bt_hci_cmd_send_sync(BT_HCI_OP_VS_READ_SUPPORTED_COMMANDS,
3243 NULL, &rsp);
3244 if (err) {
3245 BT_WARN("Failed to read supported vendor commands");
3246 return;
3247 }
3248
3249 if (IS_ENABLED(CONFIG_BT_HCI_VS_EXT_DETECT) &&
3250 rsp->len != sizeof(struct bt_hci_rp_vs_read_supported_commands)) {
3251 BT_WARN("Invalid Vendor HCI extensions");
3252 net_buf_unref(rsp);
3253 return;
3254 }
3255
3256 rp.cmds = (void *)rsp->data;
3257 memcpy(bt_dev.vs_commands, rp.cmds->commands, BT_DEV_VS_CMDS_MAX);
3258 net_buf_unref(rsp);
3259
3260 if (BT_VS_CMD_SUP_FEAT(bt_dev.vs_commands)) {
3261 err = bt_hci_cmd_send_sync(BT_HCI_OP_VS_READ_SUPPORTED_FEATURES,
3262 NULL, &rsp);
3263 if (err) {
3264 BT_WARN("Failed to read supported vendor features");
3265 return;
3266 }
3267
3268 if (IS_ENABLED(CONFIG_BT_HCI_VS_EXT_DETECT) &&
3269 rsp->len !=
3270 sizeof(struct bt_hci_rp_vs_read_supported_features)) {
3271 BT_WARN("Invalid Vendor HCI extensions");
3272 net_buf_unref(rsp);
3273 return;
3274 }
3275
3276 rp.feat = (void *)rsp->data;
3277 memcpy(bt_dev.vs_features, rp.feat->features,
3278 BT_DEV_VS_FEAT_MAX);
3279 net_buf_unref(rsp);
3280 }
3281 }
3282 #endif /* CONFIG_BT_HCI_VS_EXT */
3283
hci_init(void)3284 static int hci_init(void)
3285 {
3286 int err;
3287
3288 err = common_init();
3289 if (err) {
3290 return err;
3291 }
3292
3293 err = le_init();
3294 if (err) {
3295 return err;
3296 }
3297
3298 if (BT_FEAT_BREDR(bt_dev.features)) {
3299 err = bt_br_init();
3300 if (err) {
3301 return err;
3302 }
3303 } else if (IS_ENABLED(CONFIG_BT_BREDR)) {
3304 BT_ERR("Non-BR/EDR controller detected");
3305 return -EIO;
3306 }
3307
3308 err = set_event_mask();
3309 if (err) {
3310 return err;
3311 }
3312
3313 #if defined(CONFIG_BT_HCI_VS_EXT)
3314 hci_vs_init();
3315 #endif
3316 err = bt_id_init();
3317 if (err) {
3318 return err;
3319 }
3320
3321 return 0;
3322 }
3323
bt_send(struct net_buf * buf)3324 int bt_send(struct net_buf *buf)
3325 {
3326 BT_DBG("buf %p len %u type %u", buf, buf->len, bt_buf_get_type(buf));
3327
3328 bt_monitor_send(bt_monitor_opcode(buf), buf->data, buf->len);
3329
3330 if (IS_ENABLED(CONFIG_BT_TINYCRYPT_ECC)) {
3331 return bt_hci_ecc_send(buf);
3332 }
3333
3334 return bt_dev.drv->send(buf);
3335 }
3336
3337 static const struct event_handler prio_events[] = {
3338 EVENT_HANDLER(BT_HCI_EVT_CMD_COMPLETE, hci_cmd_complete,
3339 sizeof(struct bt_hci_evt_cmd_complete)),
3340 EVENT_HANDLER(BT_HCI_EVT_CMD_STATUS, hci_cmd_status,
3341 sizeof(struct bt_hci_evt_cmd_status)),
3342 #if defined(CONFIG_BT_CONN)
3343 EVENT_HANDLER(BT_HCI_EVT_DATA_BUF_OVERFLOW,
3344 hci_data_buf_overflow,
3345 sizeof(struct bt_hci_evt_data_buf_overflow)),
3346 EVENT_HANDLER(BT_HCI_EVT_DISCONN_COMPLETE, hci_disconn_complete_prio,
3347 sizeof(struct bt_hci_evt_disconn_complete)),
3348 #endif /* CONFIG_BT_CONN */
3349 #if defined(CONFIG_BT_CONN) || defined(CONFIG_BT_ISO)
3350 EVENT_HANDLER(BT_HCI_EVT_NUM_COMPLETED_PACKETS,
3351 hci_num_completed_packets,
3352 sizeof(struct bt_hci_evt_num_completed_packets)),
3353 #endif /* CONFIG_BT_CONN || CONFIG_BT_ISO */
3354 };
3355
hci_event_prio(struct net_buf * buf)3356 void hci_event_prio(struct net_buf *buf)
3357 {
3358 struct net_buf_simple_state state;
3359 struct bt_hci_evt_hdr *hdr;
3360 uint8_t evt_flags;
3361
3362 net_buf_simple_save(&buf->b, &state);
3363
3364 BT_ASSERT(buf->len >= sizeof(*hdr));
3365
3366 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
3367 evt_flags = bt_hci_evt_get_flags(hdr->evt);
3368 BT_ASSERT(evt_flags & BT_HCI_EVT_FLAG_RECV_PRIO);
3369
3370 handle_event(hdr->evt, buf, prio_events, ARRAY_SIZE(prio_events));
3371
3372 if (evt_flags & BT_HCI_EVT_FLAG_RECV) {
3373 net_buf_simple_restore(&buf->b, &state);
3374 } else {
3375 net_buf_unref(buf);
3376 }
3377 }
3378
bt_recv(struct net_buf * buf)3379 int bt_recv(struct net_buf *buf)
3380 {
3381 bt_monitor_send(bt_monitor_opcode(buf), buf->data, buf->len);
3382
3383 BT_DBG("buf %p len %u", buf, buf->len);
3384
3385 switch (bt_buf_get_type(buf)) {
3386 #if defined(CONFIG_BT_CONN)
3387 case BT_BUF_ACL_IN:
3388 #if defined(CONFIG_BT_RECV_IS_RX_THREAD)
3389 hci_acl(buf);
3390 #else
3391 net_buf_put(&bt_dev.rx_queue, buf);
3392 #endif
3393 return 0;
3394 #endif /* BT_CONN */
3395 case BT_BUF_EVT:
3396 {
3397 #if defined(CONFIG_BT_RECV_IS_RX_THREAD)
3398 hci_event(buf);
3399 #else
3400 struct bt_hci_evt_hdr *hdr = (void *)buf->data;
3401 uint8_t evt_flags = bt_hci_evt_get_flags(hdr->evt);
3402
3403 if (evt_flags & BT_HCI_EVT_FLAG_RECV_PRIO) {
3404 hci_event_prio(buf);
3405 }
3406
3407 if (evt_flags & BT_HCI_EVT_FLAG_RECV) {
3408 net_buf_put(&bt_dev.rx_queue, buf);
3409 }
3410 #endif
3411 return 0;
3412
3413 }
3414 #if defined(CONFIG_BT_ISO)
3415 case BT_BUF_ISO_IN:
3416 #if defined(CONFIG_BT_RECV_IS_RX_THREAD)
3417 hci_iso(buf);
3418 #else
3419 net_buf_put(&bt_dev.rx_queue, buf);
3420 #endif
3421 return 0;
3422 #endif /* CONFIG_BT_ISO */
3423 default:
3424 BT_ERR("Invalid buf type %u", bt_buf_get_type(buf));
3425 net_buf_unref(buf);
3426 return -EINVAL;
3427 }
3428 }
3429
3430 #if defined(CONFIG_BT_RECV_IS_RX_THREAD)
bt_recv_prio(struct net_buf * buf)3431 int bt_recv_prio(struct net_buf *buf)
3432 {
3433 bt_monitor_send(bt_monitor_opcode(buf), buf->data, buf->len);
3434
3435 BT_ASSERT(bt_buf_get_type(buf) == BT_BUF_EVT);
3436
3437 hci_event_prio(buf);
3438
3439 return 0;
3440 }
3441 #endif /* defined(CONFIG_BT_RECV_IS_RX_THREAD) */
3442
bt_hci_driver_register(const struct bt_hci_driver * drv)3443 int bt_hci_driver_register(const struct bt_hci_driver *drv)
3444 {
3445 if (bt_dev.drv) {
3446 return -EALREADY;
3447 }
3448
3449 if (!drv->open || !drv->send) {
3450 return -EINVAL;
3451 }
3452
3453 bt_dev.drv = drv;
3454
3455 BT_DBG("Registered %s", drv->name ? drv->name : "");
3456
3457 bt_monitor_new_index(BT_MONITOR_TYPE_PRIMARY, drv->bus,
3458 BT_ADDR_ANY, drv->name ? drv->name : "bt0");
3459
3460 return 0;
3461 }
3462
bt_finalize_init(void)3463 void bt_finalize_init(void)
3464 {
3465 atomic_set_bit(bt_dev.flags, BT_DEV_READY);
3466
3467 if (IS_ENABLED(CONFIG_BT_OBSERVER)) {
3468 bt_le_scan_update(false);
3469 }
3470
3471 bt_dev_show_info();
3472 }
3473
bt_init(void)3474 static int bt_init(void)
3475 {
3476 int err;
3477
3478 err = hci_init();
3479 if (err) {
3480 return err;
3481 }
3482
3483 if (IS_ENABLED(CONFIG_BT_CONN)) {
3484 err = bt_conn_init();
3485 if (err) {
3486 return err;
3487 }
3488 }
3489
3490 if (IS_ENABLED(CONFIG_BT_ISO)) {
3491 err = bt_conn_iso_init();
3492 if (err) {
3493 return err;
3494 }
3495 }
3496
3497 if (IS_ENABLED(CONFIG_BT_SETTINGS)) {
3498 if (!bt_dev.id_count) {
3499 BT_INFO("No ID address. App must call settings_load()");
3500 return 0;
3501 }
3502
3503 atomic_set_bit(bt_dev.flags, BT_DEV_PRESET_ID);
3504 }
3505
3506 bt_finalize_init();
3507 return 0;
3508 }
3509
init_work(struct k_work * work)3510 static void init_work(struct k_work *work)
3511 {
3512 int err;
3513
3514 err = bt_init();
3515 if (ready_cb) {
3516 ready_cb(err);
3517 }
3518 }
3519
3520 #if !defined(CONFIG_BT_RECV_IS_RX_THREAD)
hci_rx_thread(void)3521 static void hci_rx_thread(void)
3522 {
3523 struct net_buf *buf;
3524
3525 BT_DBG("started");
3526
3527 while (1) {
3528 BT_DBG("calling fifo_get_wait");
3529 buf = net_buf_get(&bt_dev.rx_queue, K_FOREVER);
3530
3531 BT_DBG("buf %p type %u len %u", buf, bt_buf_get_type(buf),
3532 buf->len);
3533
3534 switch (bt_buf_get_type(buf)) {
3535 #if defined(CONFIG_BT_CONN)
3536 case BT_BUF_ACL_IN:
3537 hci_acl(buf);
3538 break;
3539 #endif /* CONFIG_BT_CONN */
3540 #if defined(CONFIG_BT_ISO)
3541 case BT_BUF_ISO_IN:
3542 hci_iso(buf);
3543 break;
3544 #endif /* CONFIG_BT_ISO */
3545 case BT_BUF_EVT:
3546 hci_event(buf);
3547 break;
3548 default:
3549 BT_ERR("Unknown buf type %u", bt_buf_get_type(buf));
3550 net_buf_unref(buf);
3551 break;
3552 }
3553
3554 /* Make sure we don't hog the CPU if the rx_queue never
3555 * gets empty.
3556 */
3557 k_yield();
3558 }
3559 }
3560 #endif /* !CONFIG_BT_RECV_IS_RX_THREAD */
3561
bt_enable(bt_ready_cb_t cb)3562 int bt_enable(bt_ready_cb_t cb)
3563 {
3564 int err;
3565
3566 if (!bt_dev.drv) {
3567 BT_ERR("No HCI driver registered");
3568 return -ENODEV;
3569 }
3570
3571 if (atomic_test_and_set_bit(bt_dev.flags, BT_DEV_ENABLE)) {
3572 return -EALREADY;
3573 }
3574
3575 if (IS_ENABLED(CONFIG_BT_SETTINGS)) {
3576 err = bt_settings_init();
3577 if (err) {
3578 return err;
3579 }
3580 } else {
3581 err = bt_set_name(CONFIG_BT_DEVICE_NAME);
3582 if (err) {
3583 BT_WARN("Failed to set device name (%d)", err);
3584 }
3585 }
3586
3587 ready_cb = cb;
3588
3589 /* TX thread */
3590 k_thread_create(&tx_thread_data, tx_thread_stack,
3591 K_KERNEL_STACK_SIZEOF(tx_thread_stack),
3592 hci_tx_thread, NULL, NULL, NULL,
3593 K_PRIO_COOP(CONFIG_BT_HCI_TX_PRIO),
3594 0, K_NO_WAIT);
3595 k_thread_name_set(&tx_thread_data, "BT TX");
3596
3597 #if !defined(CONFIG_BT_RECV_IS_RX_THREAD)
3598 /* RX thread */
3599 k_thread_create(&rx_thread_data, rx_thread_stack,
3600 K_KERNEL_STACK_SIZEOF(rx_thread_stack),
3601 (k_thread_entry_t)hci_rx_thread, NULL, NULL, NULL,
3602 K_PRIO_COOP(CONFIG_BT_RX_PRIO),
3603 0, K_NO_WAIT);
3604 k_thread_name_set(&rx_thread_data, "BT RX");
3605 #endif
3606
3607 if (IS_ENABLED(CONFIG_BT_TINYCRYPT_ECC)) {
3608 bt_hci_ecc_init();
3609 }
3610
3611 err = bt_dev.drv->open();
3612 if (err) {
3613 BT_ERR("HCI driver open failed (%d)", err);
3614 return err;
3615 }
3616
3617 bt_monitor_send(BT_MONITOR_OPEN_INDEX, NULL, 0);
3618
3619 if (!cb) {
3620 return bt_init();
3621 }
3622
3623 k_work_submit(&bt_dev.init);
3624 return 0;
3625 }
3626
3627 #define DEVICE_NAME_LEN (sizeof(CONFIG_BT_DEVICE_NAME) - 1)
3628 #if defined(CONFIG_BT_DEVICE_NAME_DYNAMIC)
3629 BUILD_ASSERT(DEVICE_NAME_LEN < CONFIG_BT_DEVICE_NAME_MAX);
3630 #else
3631 BUILD_ASSERT(DEVICE_NAME_LEN < 248);
3632 #endif
3633
bt_set_name(const char * name)3634 int bt_set_name(const char *name)
3635 {
3636 #if defined(CONFIG_BT_DEVICE_NAME_DYNAMIC)
3637 size_t len = strlen(name);
3638 int err;
3639
3640 if (len > CONFIG_BT_DEVICE_NAME_MAX) {
3641 return -ENOMEM;
3642 }
3643
3644 if (!strcmp(bt_dev.name, name)) {
3645 return 0;
3646 }
3647
3648 strncpy(bt_dev.name, name, len);
3649 bt_dev.name[len] = '\0';
3650
3651 if (IS_ENABLED(CONFIG_BT_SETTINGS)) {
3652 err = settings_save_one("bt/name", bt_dev.name, len);
3653 if (err) {
3654 BT_WARN("Unable to store name");
3655 }
3656 }
3657
3658 return 0;
3659 #else
3660 return -ENOMEM;
3661 #endif
3662 }
3663
bt_get_name(void)3664 const char *bt_get_name(void)
3665 {
3666 #if defined(CONFIG_BT_DEVICE_NAME_DYNAMIC)
3667 return bt_dev.name;
3668 #else
3669 return CONFIG_BT_DEVICE_NAME;
3670 #endif
3671 }
3672
bt_addr_le_is_bonded(uint8_t id,const bt_addr_le_t * addr)3673 bool bt_addr_le_is_bonded(uint8_t id, const bt_addr_le_t *addr)
3674 {
3675 if (IS_ENABLED(CONFIG_BT_SMP)) {
3676 struct bt_keys *keys = bt_keys_find_addr(id, addr);
3677
3678 /* if there are any keys stored then device is bonded */
3679 return keys && keys->keys;
3680 } else {
3681 return false;
3682 }
3683 }
3684
3685 #if defined(CONFIG_BT_FILTER_ACCEPT_LIST)
bt_le_filter_accept_list_add(const bt_addr_le_t * addr)3686 int bt_le_filter_accept_list_add(const bt_addr_le_t *addr)
3687 {
3688 struct bt_hci_cp_le_add_dev_to_fal *cp;
3689 struct net_buf *buf;
3690 int err;
3691
3692 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
3693 return -EAGAIN;
3694 }
3695
3696 buf = bt_hci_cmd_create(BT_HCI_OP_LE_ADD_DEV_TO_FAL, sizeof(*cp));
3697 if (!buf) {
3698 return -ENOBUFS;
3699 }
3700
3701 cp = net_buf_add(buf, sizeof(*cp));
3702 bt_addr_le_copy(&cp->addr, addr);
3703
3704 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_ADD_DEV_TO_FAL, buf, NULL);
3705 if (err) {
3706 BT_ERR("Failed to add device to filter accept list");
3707
3708 return err;
3709 }
3710
3711 return 0;
3712 }
3713
bt_le_filter_accept_list_remove(const bt_addr_le_t * addr)3714 int bt_le_filter_accept_list_remove(const bt_addr_le_t *addr)
3715 {
3716 struct bt_hci_cp_le_rem_dev_from_fal *cp;
3717 struct net_buf *buf;
3718 int err;
3719
3720 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
3721 return -EAGAIN;
3722 }
3723
3724 buf = bt_hci_cmd_create(BT_HCI_OP_LE_REM_DEV_FROM_FAL, sizeof(*cp));
3725 if (!buf) {
3726 return -ENOBUFS;
3727 }
3728
3729 cp = net_buf_add(buf, sizeof(*cp));
3730 bt_addr_le_copy(&cp->addr, addr);
3731
3732 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_REM_DEV_FROM_FAL, buf, NULL);
3733 if (err) {
3734 BT_ERR("Failed to remove device from filter accept list");
3735 return err;
3736 }
3737
3738 return 0;
3739 }
3740
bt_le_filter_accept_list_clear(void)3741 int bt_le_filter_accept_list_clear(void)
3742 {
3743 int err;
3744
3745 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
3746 return -EAGAIN;
3747 }
3748
3749 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_CLEAR_FAL, NULL, NULL);
3750 if (err) {
3751 BT_ERR("Failed to clear filter accept list");
3752 return err;
3753 }
3754
3755 return 0;
3756 }
3757 #endif /* defined(CONFIG_BT_FILTER_ACCEPT_LIST) */
3758
bt_le_set_chan_map(uint8_t chan_map[5])3759 int bt_le_set_chan_map(uint8_t chan_map[5])
3760 {
3761 struct bt_hci_cp_le_set_host_chan_classif *cp;
3762 struct net_buf *buf;
3763
3764 if (!IS_ENABLED(CONFIG_BT_CENTRAL)) {
3765 return -ENOTSUP;
3766 }
3767
3768 if (!BT_CMD_TEST(bt_dev.supported_commands, 27, 3)) {
3769 BT_WARN("Set Host Channel Classification command is "
3770 "not supported");
3771 return -ENOTSUP;
3772 }
3773
3774 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_HOST_CHAN_CLASSIF,
3775 sizeof(*cp));
3776 if (!buf) {
3777 return -ENOBUFS;
3778 }
3779
3780 cp = net_buf_add(buf, sizeof(*cp));
3781
3782 memcpy(&cp->ch_map[0], &chan_map[0], 4);
3783 cp->ch_map[4] = chan_map[4] & BIT_MASK(5);
3784
3785 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_HOST_CHAN_CLASSIF,
3786 buf, NULL);
3787 }
3788
bt_data_parse(struct net_buf_simple * ad,bool (* func)(struct bt_data * data,void * user_data),void * user_data)3789 void bt_data_parse(struct net_buf_simple *ad,
3790 bool (*func)(struct bt_data *data, void *user_data),
3791 void *user_data)
3792 {
3793 while (ad->len > 1) {
3794 struct bt_data data;
3795 uint8_t len;
3796
3797 len = net_buf_simple_pull_u8(ad);
3798 if (len == 0U) {
3799 /* Early termination */
3800 return;
3801 }
3802
3803 if (len > ad->len) {
3804 BT_WARN("Malformed data");
3805 return;
3806 }
3807
3808 data.type = net_buf_simple_pull_u8(ad);
3809 data.data_len = len - 1;
3810 data.data = ad->data;
3811
3812 if (!func(&data, user_data)) {
3813 return;
3814 }
3815
3816 net_buf_simple_pull(ad, len - 1);
3817 }
3818 }
3819